]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto-dpif-xlate.c
util: New macros ROUND_UP_POW2, ROUND_DOWN_POW2.
[mirror_ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
9583bc14
EJ
1/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
db7d4e46 19#include "bfd.h"
9583bc14
EJ
20#include "bitmap.h"
21#include "bond.h"
22#include "bundle.h"
23#include "byte-order.h"
db7d4e46 24#include "cfm.h"
9583bc14
EJ
25#include "connmgr.h"
26#include "coverage.h"
27#include "dpif.h"
28#include "dynamic-string.h"
f7f1ea29 29#include "in-band.h"
db7d4e46 30#include "lacp.h"
9583bc14 31#include "learn.h"
46c88433 32#include "list.h"
9583bc14
EJ
33#include "mac-learning.h"
34#include "meta-flow.h"
35#include "multipath.h"
36#include "netdev-vport.h"
37#include "netlink.h"
38#include "nx-match.h"
39#include "odp-execute.h"
40#include "ofp-actions.h"
41#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 42#include "ofproto/ofproto-dpif-mirror.h"
9583bc14
EJ
43#include "ofproto/ofproto-dpif-sflow.h"
44#include "ofproto/ofproto-dpif.h"
45#include "tunnel.h"
46#include "vlog.h"
47
46c88433 48COVERAGE_DEFINE(xlate_actions);
9583bc14
EJ
49
50VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
51
8a553e9a
EJ
52/* Maximum depth of flow table recursion (due to resubmit actions) in a
53 * flow translation. */
54#define MAX_RESUBMIT_RECURSION 64
55
46c88433
EJ
56struct xbridge {
57 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
58 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
59
60 struct list xbundles; /* Owned xbundles. */
61 struct hmap xports; /* Indexed by ofp_port. */
62
63 char *name; /* Name used in log messages. */
64 struct mac_learning *ml; /* Mac learning handle. */
65 struct mbridge *mbridge; /* Mirroring. */
66 struct dpif_sflow *sflow; /* SFlow handle, or null. */
67 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
68
69 enum ofp_config_flags frag; /* Fragmentation handling. */
70 bool has_stp; /* Bridge runs stp? */
71 bool has_netflow; /* Bridge runs netflow? */
72 bool has_in_band; /* Bridge has in band control? */
73 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
74};
75
76struct xbundle {
77 struct hmap_node hmap_node; /* In global 'xbundles' map. */
78 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
79
80 struct list list_node; /* In parent 'xbridges' list. */
81 struct xbridge *xbridge; /* Parent xbridge. */
82
83 struct list xports; /* Contains "struct xport"s. */
84
85 char *name; /* Name used in log messages. */
86 struct bond *bond; /* Nonnull iff more than one port. */
87 struct lacp *lacp; /* LACP handle or null. */
88
89 enum port_vlan_mode vlan_mode; /* VLAN mode. */
90 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
91 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
92 * NULL if all VLANs are trunked. */
93 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
94 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
95};
96
97struct xport {
98 struct hmap_node hmap_node; /* Node in global 'xports' map. */
99 struct ofport_dpif *ofport; /* Key in global 'xports map. */
100
101 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
102 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
103
104 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
105
106 struct list bundle_node; /* In parent xbundle (if it exists). */
107 struct xbundle *xbundle; /* Parent xbundle or null. */
108
109 struct netdev *netdev; /* 'ofport''s netdev. */
110
111 struct xbridge *xbridge; /* Parent bridge. */
112 struct xport *peer; /* Patch port peer or null. */
113
114 enum ofputil_port_config config; /* OpenFlow port configuration. */
115 enum stp_state stp_state; /* STP_DISABLED if STP not in use. */
116
117 bool may_enable; /* May be enabled in bonds. */
118 bool is_tunnel; /* Is a tunnel port. */
119
120 struct cfm *cfm; /* CFM handle or null. */
121 struct bfd *bfd; /* BFD handle or null. */
122};
123
4d0acc70
EJ
124struct xlate_ctx {
125 struct xlate_in *xin;
126 struct xlate_out *xout;
127
46c88433 128 const struct xbridge *xbridge;
4d0acc70
EJ
129
130 /* Flow at the last commit. */
131 struct flow base_flow;
132
133 /* Tunnel IP destination address as received. This is stored separately
134 * as the base_flow.tunnel is cleared on init to reflect the datapath
135 * behavior. Used to make sure not to send tunneled output to ourselves,
136 * which might lead to an infinite loop. This could happen easily
137 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
138 * actually set the tun_dst field. */
139 ovs_be32 orig_tunnel_ip_dst;
140
141 /* Stack for the push and pop actions. Each stack element is of type
142 * "union mf_subvalue". */
143 union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
144 struct ofpbuf stack;
145
146 /* The rule that we are currently translating, or NULL. */
147 struct rule_dpif *rule;
148
149 int recurse; /* Recursion level, via xlate_table_action. */
150 bool max_resubmit_trigger; /* Recursed too deeply during translation. */
151 uint32_t orig_skb_priority; /* Priority when packet arrived. */
152 uint8_t table_id; /* OpenFlow table ID where flow was found. */
153 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 154 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
4d0acc70
EJ
155 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
156 bool exit; /* No further actions should be processed. */
157};
158
9583bc14
EJ
159/* A controller may use OFPP_NONE as the ingress port to indicate that
160 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
161 * when an input bundle is needed for validation (e.g., mirroring or
162 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
163 * any 'port' structs, so care must be taken when dealing with it. */
46c88433 164static struct xbundle ofpp_none_bundle = {
9583bc14
EJ
165 .name = "OFPP_NONE",
166 .vlan_mode = PORT_VLAN_TRUNK
167};
168
46c88433
EJ
169static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
170static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
171static struct hmap xports = HMAP_INITIALIZER(&xports);
172
173static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14
EJ
174static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
175 struct xlate_ctx *);
176static void xlate_normal(struct xlate_ctx *);
177static void xlate_report(struct xlate_ctx *, const char *);
4e022ec0 178static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
9583bc14 179 uint8_t table_id, bool may_packet_in);
46c88433
EJ
180static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
181static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
182static void output_normal(struct xlate_ctx *, const struct xbundle *,
9583bc14 183 uint16_t vlan);
4e022ec0 184static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
9583bc14
EJ
185
186static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
187
46c88433
EJ
188static struct xbridge *xbridge_lookup(const struct ofproto_dpif *);
189static struct xbundle *xbundle_lookup(const struct ofbundle *);
190static struct xport *xport_lookup(struct ofport_dpif *);
191static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
192
193void
194xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
195 const struct mac_learning *ml, const struct mbridge *mbridge,
196 const struct dpif_sflow *sflow,
197 const struct dpif_ipfix *ipfix, enum ofp_config_flags frag,
198 bool forward_bpdu, bool has_in_band, bool has_netflow,
199 bool has_stp)
200{
201 struct xbridge *xbridge = xbridge_lookup(ofproto);
202
203 if (!xbridge) {
204 xbridge = xzalloc(sizeof *xbridge);
205 xbridge->ofproto = ofproto;
206
207 hmap_insert(&xbridges, &xbridge->hmap_node, hash_pointer(ofproto, 0));
208 hmap_init(&xbridge->xports);
209 list_init(&xbridge->xbundles);
210 }
211
212 if (xbridge->ml != ml) {
213 mac_learning_unref(xbridge->ml);
214 xbridge->ml = mac_learning_ref(ml);
215 }
216
217 if (xbridge->mbridge != mbridge) {
218 mbridge_unref(xbridge->mbridge);
219 xbridge->mbridge = mbridge_ref(mbridge);
220 }
221
222 if (xbridge->sflow != sflow) {
223 dpif_sflow_unref(xbridge->sflow);
224 xbridge->sflow = dpif_sflow_ref(sflow);
225 }
226
227 if (xbridge->ipfix != ipfix) {
228 dpif_ipfix_unref(xbridge->ipfix);
229 xbridge->ipfix = dpif_ipfix_ref(ipfix);
230 }
231
232 free(xbridge->name);
233 xbridge->name = xstrdup(name);
234
235 xbridge->forward_bpdu = forward_bpdu;
236 xbridge->has_in_band = has_in_band;
237 xbridge->has_netflow = has_netflow;
238 xbridge->has_stp = has_stp;
239 xbridge->frag = frag;
240}
241
242void
243xlate_remove_ofproto(struct ofproto_dpif *ofproto)
244{
245 struct xbridge *xbridge = xbridge_lookup(ofproto);
246 struct xbundle *xbundle, *next_xbundle;
247 struct xport *xport, *next_xport;
248
249 if (!xbridge) {
250 return;
251 }
252
253 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
254 xlate_ofport_remove(xport->ofport);
255 }
256
257 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
258 xlate_bundle_remove(xbundle->ofbundle);
259 }
260
261 hmap_remove(&xbridges, &xbridge->hmap_node);
262 free(xbridge->name);
263 free(xbridge);
264}
265
266void
267xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
268 const char *name, enum port_vlan_mode vlan_mode, int vlan,
269 unsigned long *trunks, bool use_priority_tags,
270 const struct bond *bond, const struct lacp *lacp,
271 bool floodable)
272{
273 struct xbundle *xbundle = xbundle_lookup(ofbundle);
274
275 if (!xbundle) {
276 xbundle = xzalloc(sizeof *xbundle);
277 xbundle->ofbundle = ofbundle;
278 xbundle->xbridge = xbridge_lookup(ofproto);
279
280 hmap_insert(&xbundles, &xbundle->hmap_node, hash_pointer(ofbundle, 0));
281 list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
282 list_init(&xbundle->xports);
283 }
284
285 ovs_assert(xbundle->xbridge);
286
287 free(xbundle->name);
288 xbundle->name = xstrdup(name);
289
290 xbundle->vlan_mode = vlan_mode;
291 xbundle->vlan = vlan;
292 xbundle->trunks = trunks;
293 xbundle->use_priority_tags = use_priority_tags;
294 xbundle->floodable = floodable;
295
296 if (xbundle->bond != bond) {
297 bond_unref(xbundle->bond);
298 xbundle->bond = bond_ref(bond);
299 }
300
301 if (xbundle->lacp != lacp) {
302 lacp_unref(xbundle->lacp);
303 xbundle->lacp = lacp_ref(lacp);
304 }
305}
306
307void
308xlate_bundle_remove(struct ofbundle *ofbundle)
309{
310 struct xbundle *xbundle = xbundle_lookup(ofbundle);
311 struct xport *xport, *next;
312
313 if (!xbundle) {
314 return;
315 }
316
317 LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
318 list_remove(&xport->bundle_node);
319 xport->xbundle = NULL;
320 }
321
322 hmap_remove(&xbundles, &xbundle->hmap_node);
323 list_remove(&xbundle->list_node);
324 bond_unref(xbundle->bond);
325 lacp_unref(xbundle->lacp);
326 free(xbundle->name);
327 free(xbundle);
328}
329
330void
331xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
332 struct ofport_dpif *ofport, ofp_port_t ofp_port,
333 odp_port_t odp_port, const struct netdev *netdev,
334 const struct cfm *cfm, const struct bfd *bfd,
335 struct ofport_dpif *peer, enum ofputil_port_config config,
336 enum stp_state stp_state, bool is_tunnel, bool may_enable)
337{
338 struct xport *xport = xport_lookup(ofport);
339
340 if (!xport) {
341 xport = xzalloc(sizeof *xport);
342 xport->ofport = ofport;
343 xport->xbridge = xbridge_lookup(ofproto);
344 xport->ofp_port = ofp_port;
345
346 hmap_insert(&xports, &xport->hmap_node, hash_pointer(ofport, 0));
347 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
348 hash_ofp_port(xport->ofp_port));
349 }
350
351 ovs_assert(xport->ofp_port == ofp_port);
352
353 xport->config = config;
354 xport->stp_state = stp_state;
355 xport->is_tunnel = is_tunnel;
356 xport->may_enable = may_enable;
357 xport->odp_port = odp_port;
358
359 if (xport->netdev != netdev) {
360 netdev_close(xport->netdev);
361 xport->netdev = netdev_ref(netdev);
362 }
363
364 if (xport->cfm != cfm) {
365 cfm_unref(xport->cfm);
366 xport->cfm = cfm_ref(cfm);
367 }
368
369 if (xport->bfd != bfd) {
370 bfd_unref(xport->bfd);
371 xport->bfd = bfd_ref(bfd);
372 }
373
374 if (xport->peer) {
375 xport->peer->peer = NULL;
376 }
377 xport->peer = peer ? xport_lookup(peer) : NULL;
378 if (xport->peer) {
379 xport->peer->peer = xport;
380 }
381
382 if (xport->xbundle) {
383 list_remove(&xport->bundle_node);
384 }
385 xport->xbundle = ofbundle ? xbundle_lookup(ofbundle) : NULL;
386 if (xport->xbundle) {
387 list_insert(&xport->xbundle->xports, &xport->bundle_node);
388 }
389}
390
391void
392xlate_ofport_remove(struct ofport_dpif *ofport)
393{
394 struct xport *xport = xport_lookup(ofport);
395
396 if (!xport) {
397 return;
398 }
399
400 if (xport->peer) {
401 xport->peer->peer = NULL;
402 xport->peer = NULL;
403 }
404
405 list_remove(&xport->bundle_node);
406 hmap_remove(&xports, &xport->hmap_node);
407 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
408
409 netdev_close(xport->netdev);
410 cfm_unref(xport->cfm);
411 bfd_unref(xport->bfd);
412 free(xport);
413}
414
415static struct xbridge *
416xbridge_lookup(const struct ofproto_dpif *ofproto)
417{
418 struct xbridge *xbridge;
419
420 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
421 &xbridges) {
422 if (xbridge->ofproto == ofproto) {
423 return xbridge;
424 }
425 }
426 return NULL;
427}
428
429static struct xbundle *
430xbundle_lookup(const struct ofbundle *ofbundle)
431{
432 struct xbundle *xbundle;
433
434 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
435 &xbundles) {
436 if (xbundle->ofbundle == ofbundle) {
437 return xbundle;
438 }
439 }
440 return NULL;
441}
442
443static struct xport *
444xport_lookup(struct ofport_dpif *ofport)
445{
446 struct xport *xport;
447
448 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
449 &xports) {
450 if (xport->ofport == ofport) {
451 return xport;
452 }
453 }
454 return NULL;
455}
456
457static struct xport *
458get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
459{
460 struct xport *xport;
461
462 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
463 &xbridge->xports) {
464 if (xport->ofp_port == ofp_port) {
465 return xport;
466 }
467 }
468 return NULL;
469}
470
471static odp_port_t
472ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
473{
474 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
475 return xport ? xport->odp_port : ODPP_NONE;
476}
477
9583bc14 478static bool
46c88433 479xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
480{
481 return (bundle->vlan_mode != PORT_VLAN_ACCESS
482 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
483}
484
485static bool
46c88433
EJ
486xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
487{
488 return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
489}
490
491static mirror_mask_t
492xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
493{
494 return xbundle != &ofpp_none_bundle
495 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
496 : 0;
497}
498
499static mirror_mask_t
500xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 501{
46c88433
EJ
502 return xbundle != &ofpp_none_bundle
503 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
504 : 0;
9583bc14
EJ
505}
506
46c88433
EJ
507static mirror_mask_t
508xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 509{
46c88433
EJ
510 return xbundle != &ofpp_none_bundle
511 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
512 : 0;
513}
514
515static struct xbundle *
516lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
517 bool warn, struct xport **in_xportp)
518{
519 struct xport *xport;
9583bc14
EJ
520
521 /* Find the port and bundle for the received packet. */
46c88433
EJ
522 xport = get_ofp_port(xbridge, in_port);
523 if (in_xportp) {
524 *in_xportp = xport;
9583bc14 525 }
46c88433
EJ
526 if (xport && xport->xbundle) {
527 return xport->xbundle;
9583bc14
EJ
528 }
529
530 /* Special-case OFPP_NONE, which a controller may use as the ingress
531 * port for traffic that it is sourcing. */
532 if (in_port == OFPP_NONE) {
533 return &ofpp_none_bundle;
534 }
535
536 /* Odd. A few possible reasons here:
537 *
538 * - We deleted a port but there are still a few packets queued up
539 * from it.
540 *
541 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
542 * we don't know about.
543 *
544 * - The ofproto client didn't configure the port as part of a bundle.
545 * This is particularly likely to happen if a packet was received on the
546 * port after it was created, but before the client had a chance to
547 * configure its bundle.
548 */
549 if (warn) {
550 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
551
552 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
46c88433 553 "port %"PRIu16, xbridge->name, in_port);
9583bc14
EJ
554 }
555 return NULL;
556}
557
558static void
559add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
560{
46c88433 561 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 562 mirror_mask_t mirrors;
46c88433 563 struct xbundle *in_xbundle;
9583bc14
EJ
564 uint16_t vlan;
565 uint16_t vid;
cdf5d3a5
EJ
566
567 mirrors = ctx->xout->mirrors;
568 ctx->xout->mirrors = 0;
9583bc14 569
46c88433
EJ
570 in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
571 ctx->xin->packet != NULL, NULL);
572 if (!in_xbundle) {
9583bc14
EJ
573 return;
574 }
46c88433 575 mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
9583bc14
EJ
576
577 /* Drop frames on bundles reserved for mirroring. */
46c88433 578 if (xbundle_mirror_out(xbridge, in_xbundle)) {
9583bc14
EJ
579 if (ctx->xin->packet != NULL) {
580 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
581 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
582 "%s, which is reserved exclusively for mirroring",
46c88433 583 ctx->xbridge->name, in_xbundle->name);
9583bc14 584 }
aaa0fbae 585 ofpbuf_clear(&ctx->xout->odp_actions);
9583bc14
EJ
586 return;
587 }
588
589 /* Check VLAN. */
590 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
46c88433 591 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
9583bc14
EJ
592 return;
593 }
46c88433 594 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14 595
9583bc14
EJ
596 if (!mirrors) {
597 return;
598 }
599
600 /* Restore the original packet before adding the mirror actions. */
601 ctx->xin->flow = *orig_flow;
602
603 while (mirrors) {
ec7ceaed
EJ
604 mirror_mask_t dup_mirrors;
605 struct ofbundle *out;
606 unsigned long *vlans;
607 bool vlan_mirrored;
608 bool has_mirror;
609 int out_vlan;
610
46c88433 611 has_mirror = mirror_get(xbridge->mbridge, mirror_mask_ffs(mirrors) - 1,
ec7ceaed
EJ
612 &vlans, &dup_mirrors, &out, &out_vlan);
613 ovs_assert(has_mirror);
614
615 if (vlans) {
9583bc14
EJ
616 ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
617 }
ec7ceaed
EJ
618 vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
619 free(vlans);
9583bc14 620
ec7ceaed 621 if (!vlan_mirrored) {
9583bc14
EJ
622 mirrors = zero_rightmost_1bit(mirrors);
623 continue;
624 }
625
ec7ceaed
EJ
626 mirrors &= ~dup_mirrors;
627 ctx->xout->mirrors |= dup_mirrors;
628 if (out) {
46c88433
EJ
629 struct xbundle *out_xbundle = xbundle_lookup(out);
630 if (out_xbundle) {
631 output_normal(ctx, out_xbundle, vlan);
632 }
ec7ceaed 633 } else if (vlan != out_vlan
9583bc14 634 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
46c88433 635 struct xbundle *xbundle;
9583bc14 636
46c88433
EJ
637 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
638 if (xbundle_includes_vlan(xbundle, out_vlan)
639 && !xbundle_mirror_out(xbridge, xbundle)) {
640 output_normal(ctx, xbundle, out_vlan);
9583bc14
EJ
641 }
642 }
643 }
644 }
645}
646
647/* Given 'vid', the VID obtained from the 802.1Q header that was received as
46c88433 648 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
9583bc14
EJ
649 * the bundle on which the packet was received, returns the VLAN to which the
650 * packet belongs.
651 *
652 * Both 'vid' and the return value are in the range 0...4095. */
653static uint16_t
46c88433 654input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
9583bc14 655{
46c88433 656 switch (in_xbundle->vlan_mode) {
9583bc14 657 case PORT_VLAN_ACCESS:
46c88433 658 return in_xbundle->vlan;
9583bc14
EJ
659 break;
660
661 case PORT_VLAN_TRUNK:
662 return vid;
663
664 case PORT_VLAN_NATIVE_UNTAGGED:
665 case PORT_VLAN_NATIVE_TAGGED:
46c88433 666 return vid ? vid : in_xbundle->vlan;
9583bc14
EJ
667
668 default:
669 NOT_REACHED();
670 }
671}
672
46c88433 673/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
9583bc14
EJ
674 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
675 * a warning.
676 *
677 * 'vid' should be the VID obtained from the 802.1Q header that was received as
678 * part of a packet (specify 0 if there was no 802.1Q header), in the range
679 * 0...4095. */
680static bool
46c88433 681input_vid_is_valid(uint16_t vid, struct xbundle *in_xbundle, bool warn)
9583bc14
EJ
682{
683 /* Allow any VID on the OFPP_NONE port. */
46c88433 684 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
685 return true;
686 }
687
46c88433 688 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
689 case PORT_VLAN_ACCESS:
690 if (vid) {
691 if (warn) {
692 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
46c88433 693 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" tagged "
9583bc14 694 "packet received on port %s configured as VLAN "
46c88433
EJ
695 "%"PRIu16" access port", vid, in_xbundle->name,
696 in_xbundle->vlan);
9583bc14
EJ
697 }
698 return false;
699 }
700 return true;
701
702 case PORT_VLAN_NATIVE_UNTAGGED:
703 case PORT_VLAN_NATIVE_TAGGED:
704 if (!vid) {
705 /* Port must always carry its native VLAN. */
706 return true;
707 }
708 /* Fall through. */
709 case PORT_VLAN_TRUNK:
46c88433 710 if (!xbundle_includes_vlan(in_xbundle, vid)) {
9583bc14
EJ
711 if (warn) {
712 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
46c88433 713 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" packet "
9583bc14 714 "received on port %s not configured for trunking "
46c88433 715 "VLAN %"PRIu16, vid, in_xbundle->name, vid);
9583bc14
EJ
716 }
717 return false;
718 }
719 return true;
720
721 default:
722 NOT_REACHED();
723 }
724
725}
726
727/* Given 'vlan', the VLAN that a packet belongs to, and
46c88433 728 * 'out_xbundle', a bundle on which the packet is to be output, returns the VID
9583bc14
EJ
729 * that should be included in the 802.1Q header. (If the return value is 0,
730 * then the 802.1Q header should only be included in the packet if there is a
731 * nonzero PCP.)
732 *
733 * Both 'vlan' and the return value are in the range 0...4095. */
734static uint16_t
46c88433 735output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
9583bc14 736{
46c88433 737 switch (out_xbundle->vlan_mode) {
9583bc14
EJ
738 case PORT_VLAN_ACCESS:
739 return 0;
740
741 case PORT_VLAN_TRUNK:
742 case PORT_VLAN_NATIVE_TAGGED:
743 return vlan;
744
745 case PORT_VLAN_NATIVE_UNTAGGED:
46c88433 746 return vlan == out_xbundle->vlan ? 0 : vlan;
9583bc14
EJ
747
748 default:
749 NOT_REACHED();
750 }
751}
752
753static void
46c88433 754output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
9583bc14
EJ
755 uint16_t vlan)
756{
33bf9176 757 ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
9583bc14
EJ
758 uint16_t vid;
759 ovs_be16 tci, old_tci;
46c88433 760 struct xport *xport;
9583bc14 761
46c88433
EJ
762 vid = output_vlan_to_vid(out_xbundle, vlan);
763 if (list_is_empty(&out_xbundle->xports)) {
764 /* Partially configured bundle with no slaves. Drop the packet. */
765 return;
766 } else if (!out_xbundle->bond) {
767 xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
768 bundle_node);
9583bc14 769 } else {
46c88433
EJ
770 struct ofport_dpif *ofport;
771
772 ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
773 &ctx->xout->wc, vid,
774 &ctx->xout->tags);
775 xport = ofport ? xport_lookup(ofport) : NULL;
776
777 if (!xport) {
9583bc14
EJ
778 /* No slaves enabled, so drop packet. */
779 return;
780 }
781 }
782
33bf9176 783 old_tci = *flow_tci;
9583bc14 784 tci = htons(vid);
46c88433 785 if (tci || out_xbundle->use_priority_tags) {
33bf9176 786 tci |= *flow_tci & htons(VLAN_PCP_MASK);
9583bc14
EJ
787 if (tci) {
788 tci |= htons(VLAN_CFI);
789 }
790 }
33bf9176 791 *flow_tci = tci;
9583bc14 792
46c88433 793 compose_output_action(ctx, xport->ofp_port);
33bf9176 794 *flow_tci = old_tci;
9583bc14
EJ
795}
796
797/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
798 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
799 * indicate this; newer upstream kernels use gratuitous ARP requests. */
800static bool
801is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
802{
803 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
804 return false;
805 }
806
807 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
808 if (!eth_addr_is_broadcast(flow->dl_dst)) {
809 return false;
810 }
811
812 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
813 if (flow->nw_proto == ARP_OP_REPLY) {
814 return true;
815 } else if (flow->nw_proto == ARP_OP_REQUEST) {
816 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
817 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
818
819 return flow->nw_src == flow->nw_dst;
820 } else {
821 return false;
822 }
823}
824
825static void
46c88433 826update_learning_table(const struct xbridge *xbridge,
9583bc14 827 const struct flow *flow, struct flow_wildcards *wc,
46c88433 828 int vlan, struct xbundle *in_xbundle)
9583bc14
EJ
829{
830 struct mac_entry *mac;
831
832 /* Don't learn the OFPP_NONE port. */
46c88433 833 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
834 return;
835 }
836
46c88433 837 if (!mac_learning_may_learn(xbridge->ml, flow->dl_src, vlan)) {
9583bc14
EJ
838 return;
839 }
840
46c88433 841 mac = mac_learning_insert(xbridge->ml, flow->dl_src, vlan);
9583bc14
EJ
842 if (is_gratuitous_arp(flow, wc)) {
843 /* We don't want to learn from gratuitous ARP packets that are
844 * reflected back over bond slaves so we lock the learning table. */
46c88433 845 if (!in_xbundle->bond) {
9583bc14
EJ
846 mac_entry_set_grat_arp_lock(mac);
847 } else if (mac_entry_is_grat_arp_locked(mac)) {
848 return;
849 }
850 }
851
46c88433 852 if (mac_entry_is_new(mac) || mac->port.p != in_xbundle->ofbundle) {
9583bc14
EJ
853 /* The log messages here could actually be useful in debugging,
854 * so keep the rate limit relatively high. */
855 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
856 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
857 "on port %s in VLAN %d",
46c88433
EJ
858 xbridge->name, ETH_ADDR_ARGS(flow->dl_src),
859 in_xbundle->name, vlan);
9583bc14 860
46c88433
EJ
861 mac->port.p = in_xbundle->ofbundle;
862 mac_learning_changed(xbridge->ml, mac);
9583bc14
EJ
863 }
864}
865
46c88433 866/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
9583bc14
EJ
867 * dropped. Returns true if they may be forwarded, false if they should be
868 * dropped.
869 *
46c88433 870 * 'in_port' must be the xport that corresponds to flow->in_port.
9583bc14
EJ
871 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
872 *
873 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
874 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
875 * checked by input_vid_is_valid().
876 *
877 * May also add tags to '*tags', although the current implementation only does
878 * so in one special case.
879 */
880static bool
46c88433 881is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
9583bc14
EJ
882 uint16_t vlan)
883{
46c88433
EJ
884 struct xbundle *in_xbundle = in_port->xbundle;
885 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 886 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
887
888 /* Drop frames for reserved multicast addresses
889 * only if forward_bpdu option is absent. */
46c88433 890 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
9583bc14
EJ
891 xlate_report(ctx, "packet has reserved destination MAC, dropping");
892 return false;
893 }
894
46c88433 895 if (in_xbundle->bond) {
9583bc14
EJ
896 struct mac_entry *mac;
897
46c88433 898 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
9583bc14
EJ
899 flow->dl_dst, &ctx->xout->tags)) {
900 case BV_ACCEPT:
901 break;
902
903 case BV_DROP:
904 xlate_report(ctx, "bonding refused admissibility, dropping");
905 return false;
906
907 case BV_DROP_IF_MOVED:
46c88433
EJ
908 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan, NULL);
909 if (mac && mac->port.p != in_xbundle->ofbundle &&
9583bc14
EJ
910 (!is_gratuitous_arp(flow, &ctx->xout->wc)
911 || mac_entry_is_grat_arp_locked(mac))) {
912 xlate_report(ctx, "SLB bond thinks this packet looped back, "
913 "dropping");
914 return false;
915 }
916 break;
917 }
918 }
919
920 return true;
921}
922
923static void
924xlate_normal(struct xlate_ctx *ctx)
925{
33bf9176
BP
926 struct flow_wildcards *wc = &ctx->xout->wc;
927 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
928 struct xbundle *in_xbundle;
929 struct xport *in_port;
9583bc14
EJ
930 struct mac_entry *mac;
931 uint16_t vlan;
932 uint16_t vid;
933
934 ctx->xout->has_normal = true;
935
33bf9176
BP
936 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
937 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1dd35f8a 938 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 939
46c88433
EJ
940 in_xbundle = lookup_input_bundle(ctx->xbridge, flow->in_port.ofp_port,
941 ctx->xin->packet != NULL, &in_port);
942 if (!in_xbundle) {
9583bc14
EJ
943 xlate_report(ctx, "no input bundle, dropping");
944 return;
945 }
946
947 /* Drop malformed frames. */
33bf9176
BP
948 if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
949 !(flow->vlan_tci & htons(VLAN_CFI))) {
9583bc14
EJ
950 if (ctx->xin->packet != NULL) {
951 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
952 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
953 "VLAN tag received on port %s",
46c88433 954 ctx->xbridge->name, in_xbundle->name);
9583bc14
EJ
955 }
956 xlate_report(ctx, "partial VLAN tag, dropping");
957 return;
958 }
959
960 /* Drop frames on bundles reserved for mirroring. */
46c88433 961 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14
EJ
962 if (ctx->xin->packet != NULL) {
963 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
964 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
965 "%s, which is reserved exclusively for mirroring",
46c88433 966 ctx->xbridge->name, in_xbundle->name);
9583bc14
EJ
967 }
968 xlate_report(ctx, "input port is mirror output port, dropping");
969 return;
970 }
971
972 /* Check VLAN. */
33bf9176 973 vid = vlan_tci_to_vid(flow->vlan_tci);
46c88433 974 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
9583bc14
EJ
975 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
976 return;
977 }
46c88433 978 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14
EJ
979
980 /* Check other admissibility requirements. */
981 if (in_port && !is_admissible(ctx, in_port, vlan)) {
982 return;
983 }
984
985 /* Learn source MAC. */
986 if (ctx->xin->may_learn) {
46c88433 987 update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
9583bc14
EJ
988 }
989
990 /* Determine output bundle. */
46c88433 991 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan,
9583bc14
EJ
992 &ctx->xout->tags);
993 if (mac) {
46c88433
EJ
994 struct xbundle *mac_xbundle = xbundle_lookup(mac->port.p);
995 if (mac_xbundle && mac_xbundle != in_xbundle) {
9583bc14 996 xlate_report(ctx, "forwarding to learned port");
46c88433
EJ
997 output_normal(ctx, mac_xbundle, vlan);
998 } else if (!mac_xbundle) {
999 xlate_report(ctx, "learned port is unknown, dropping");
9583bc14
EJ
1000 } else {
1001 xlate_report(ctx, "learned port is input port, dropping");
1002 }
1003 } else {
46c88433 1004 struct xbundle *xbundle;
9583bc14
EJ
1005
1006 xlate_report(ctx, "no learned MAC for destination, flooding");
46c88433
EJ
1007 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
1008 if (xbundle != in_xbundle
1009 && xbundle_includes_vlan(xbundle, vlan)
1010 && xbundle->floodable
1011 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
1012 output_normal(ctx, xbundle, vlan);
9583bc14
EJ
1013 }
1014 }
1015 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1016 }
1017}
1018
1019/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
1020 * the number of packets out of UINT32_MAX to sample. The given
1021 * cookie is passed back in the callback for each sampled packet.
1022 */
1023static size_t
46c88433 1024compose_sample_action(const struct xbridge *xbridge,
9583bc14
EJ
1025 struct ofpbuf *odp_actions,
1026 const struct flow *flow,
1027 const uint32_t probability,
1028 const union user_action_cookie *cookie,
1029 const size_t cookie_size)
1030{
1031 size_t sample_offset, actions_offset;
1032 int cookie_offset;
1033
1034 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
1035
1036 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
1037
1038 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
46c88433
EJ
1039 cookie_offset = put_userspace_action(xbridge->ofproto, odp_actions, flow,
1040 cookie, cookie_size);
9583bc14
EJ
1041
1042 nl_msg_end_nested(odp_actions, actions_offset);
1043 nl_msg_end_nested(odp_actions, sample_offset);
1044 return cookie_offset;
1045}
1046
1047static void
46c88433
EJ
1048compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
1049 odp_port_t odp_port, unsigned int n_outputs,
1050 union user_action_cookie *cookie)
9583bc14
EJ
1051{
1052 int ifindex;
1053
1054 cookie->type = USER_ACTION_COOKIE_SFLOW;
1055 cookie->sflow.vlan_tci = vlan_tci;
1056
1057 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
1058 * port information") for the interpretation of cookie->output. */
1059 switch (n_outputs) {
1060 case 0:
1061 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
1062 cookie->sflow.output = 0x40000000 | 256;
1063 break;
1064
1065 case 1:
46c88433 1066 ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
9583bc14
EJ
1067 if (ifindex) {
1068 cookie->sflow.output = ifindex;
1069 break;
1070 }
1071 /* Fall through. */
1072 default:
1073 /* 0x80000000 means "multiple output ports. */
1074 cookie->sflow.output = 0x80000000 | n_outputs;
1075 break;
1076 }
1077}
1078
1079/* Compose SAMPLE action for sFlow bridge sampling. */
1080static size_t
46c88433 1081compose_sflow_action(const struct xbridge *xbridge,
9583bc14
EJ
1082 struct ofpbuf *odp_actions,
1083 const struct flow *flow,
4e022ec0 1084 odp_port_t odp_port)
9583bc14
EJ
1085{
1086 uint32_t probability;
1087 union user_action_cookie cookie;
1088
46c88433 1089 if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
1090 return 0;
1091 }
1092
46c88433
EJ
1093 probability = dpif_sflow_get_probability(xbridge->sflow);
1094 compose_sflow_cookie(xbridge, htons(0), odp_port,
4e022ec0 1095 odp_port == ODPP_NONE ? 0 : 1, &cookie);
9583bc14 1096
46c88433 1097 return compose_sample_action(xbridge, odp_actions, flow, probability,
9583bc14
EJ
1098 &cookie, sizeof cookie.sflow);
1099}
1100
1101static void
1102compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
1103 uint32_t obs_domain_id, uint32_t obs_point_id,
1104 union user_action_cookie *cookie)
1105{
1106 cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
1107 cookie->flow_sample.probability = probability;
1108 cookie->flow_sample.collector_set_id = collector_set_id;
1109 cookie->flow_sample.obs_domain_id = obs_domain_id;
1110 cookie->flow_sample.obs_point_id = obs_point_id;
1111}
1112
1113static void
1114compose_ipfix_cookie(union user_action_cookie *cookie)
1115{
1116 cookie->type = USER_ACTION_COOKIE_IPFIX;
1117}
1118
1119/* Compose SAMPLE action for IPFIX bridge sampling. */
1120static void
46c88433 1121compose_ipfix_action(const struct xbridge *xbridge,
9583bc14
EJ
1122 struct ofpbuf *odp_actions,
1123 const struct flow *flow)
1124{
1125 uint32_t probability;
1126 union user_action_cookie cookie;
1127
46c88433 1128 if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
1129 return;
1130 }
1131
46c88433 1132 probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
9583bc14
EJ
1133 compose_ipfix_cookie(&cookie);
1134
46c88433 1135 compose_sample_action(xbridge, odp_actions, flow, probability,
9583bc14
EJ
1136 &cookie, sizeof cookie.ipfix);
1137}
1138
1139/* SAMPLE action for sFlow must be first action in any given list of
1140 * actions. At this point we do not have all information required to
1141 * build it. So try to build sample action as complete as possible. */
1142static void
1143add_sflow_action(struct xlate_ctx *ctx)
1144{
46c88433 1145 ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
9583bc14 1146 &ctx->xout->odp_actions,
4e022ec0 1147 &ctx->xin->flow, ODPP_NONE);
9583bc14
EJ
1148 ctx->sflow_odp_port = 0;
1149 ctx->sflow_n_outputs = 0;
1150}
1151
1152/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
1153 * of actions, eventually after the SAMPLE action for sFlow. */
1154static void
1155add_ipfix_action(struct xlate_ctx *ctx)
1156{
46c88433 1157 compose_ipfix_action(ctx->xbridge, &ctx->xout->odp_actions,
9583bc14
EJ
1158 &ctx->xin->flow);
1159}
1160
1161/* Fix SAMPLE action according to data collected while composing ODP actions.
1162 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
1163 * USERSPACE action's user-cookie which is required for sflow. */
1164static void
1165fix_sflow_action(struct xlate_ctx *ctx)
1166{
1167 const struct flow *base = &ctx->base_flow;
1168 union user_action_cookie *cookie;
1169
1170 if (!ctx->user_cookie_offset) {
1171 return;
1172 }
1173
1174 cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
1175 sizeof cookie->sflow);
1176 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
1177
46c88433 1178 compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
9583bc14
EJ
1179 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
1180}
1181
db7d4e46 1182static enum slow_path_reason
642dc74d 1183process_special(struct xlate_ctx *ctx, const struct flow *flow,
46c88433 1184 const struct xport *xport, const struct ofpbuf *packet)
db7d4e46 1185{
642dc74d 1186 struct flow_wildcards *wc = &ctx->xout->wc;
46c88433 1187 const struct xbridge *xbridge = ctx->xbridge;
642dc74d 1188
46c88433 1189 if (!xport) {
db7d4e46 1190 return 0;
46c88433 1191 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 1192 if (packet) {
46c88433 1193 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46
JP
1194 }
1195 return SLOW_CFM;
fab52e16 1196 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 1197 if (packet) {
46c88433 1198 bfd_process_packet(xport->bfd, flow, packet);
db7d4e46
JP
1199 }
1200 return SLOW_BFD;
46c88433 1201 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
1202 && flow->dl_type == htons(ETH_TYPE_LACP)) {
1203 if (packet) {
46c88433 1204 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
db7d4e46
JP
1205 }
1206 return SLOW_LACP;
46c88433 1207 } else if (xbridge->has_stp && stp_should_process_flow(flow, wc)) {
db7d4e46 1208 if (packet) {
46c88433 1209 stp_process_packet(xport->ofport, packet);
db7d4e46
JP
1210 }
1211 return SLOW_STP;
1212 } else {
1213 return 0;
1214 }
1215}
1216
9583bc14 1217static void
4e022ec0 1218compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
9583bc14
EJ
1219 bool check_stp)
1220{
46c88433 1221 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1dd35f8a 1222 struct flow_wildcards *wc = &ctx->xout->wc;
33bf9176 1223 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
1224 ovs_be16 flow_vlan_tci;
1225 uint32_t flow_skb_mark;
1226 uint8_t flow_nw_tos;
4e022ec0 1227 odp_port_t out_port, odp_port;
ca077186 1228 uint8_t dscp;
9583bc14
EJ
1229
1230 /* If 'struct flow' gets additional metadata, we'll need to zero it out
1231 * before traversing a patch port. */
1232 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
1233
46c88433 1234 if (!xport) {
9583bc14
EJ
1235 xlate_report(ctx, "Nonexistent output port");
1236 return;
46c88433 1237 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
9583bc14
EJ
1238 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
1239 return;
46c88433 1240 } else if (check_stp && !stp_forward_in_state(xport->stp_state)) {
9583bc14
EJ
1241 xlate_report(ctx, "STP not in forwarding state, skipping output");
1242 return;
1243 }
1244
46c88433
EJ
1245 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
1246 ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
1247 xport->xbundle);
cdf5d3a5
EJ
1248 }
1249
46c88433
EJ
1250 if (xport->peer) {
1251 const struct xport *peer = xport->peer;
9583bc14 1252 struct flow old_flow = ctx->xin->flow;
9583bc14 1253 enum slow_path_reason special;
9583bc14 1254
46c88433
EJ
1255 ctx->xbridge = peer->xbridge;
1256 flow->in_port.ofp_port = peer->ofp_port;
33bf9176
BP
1257 flow->metadata = htonll(0);
1258 memset(&flow->tunnel, 0, sizeof flow->tunnel);
1259 memset(flow->regs, 0, sizeof flow->regs);
9583bc14 1260
642dc74d 1261 special = process_special(ctx, &ctx->xin->flow, peer,
9583bc14
EJ
1262 ctx->xin->packet);
1263 if (special) {
1264 ctx->xout->slow = special;
ddd3c975
EJ
1265 } else if (may_receive(peer, ctx)) {
1266 if (stp_forward_in_state(peer->stp_state)) {
4e022ec0 1267 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
9583bc14
EJ
1268 } else {
1269 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
1270 * learning action look at the packet, then drop it. */
1271 struct flow old_base_flow = ctx->base_flow;
1272 size_t old_size = ctx->xout->odp_actions.size;
cdf5d3a5 1273 mirror_mask_t old_mirrors = ctx->xout->mirrors;
4e022ec0 1274 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
cdf5d3a5 1275 ctx->xout->mirrors = old_mirrors;
9583bc14
EJ
1276 ctx->base_flow = old_base_flow;
1277 ctx->xout->odp_actions.size = old_size;
1278 }
1279 }
1280
1281 ctx->xin->flow = old_flow;
46c88433 1282 ctx->xbridge = xport->xbundle->xbridge;
9583bc14
EJ
1283
1284 if (ctx->xin->resubmit_stats) {
46c88433
EJ
1285 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
1286 netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
9583bc14
EJ
1287 }
1288
1289 return;
1290 }
1291
33bf9176
BP
1292 flow_vlan_tci = flow->vlan_tci;
1293 flow_skb_mark = flow->skb_mark;
1294 flow_nw_tos = flow->nw_tos;
9583bc14 1295
46c88433
EJ
1296 if (ofproto_dpif_dscp_from_priority(xport->ofport, flow->skb_priority,
1297 &dscp)) {
1dd35f8a 1298 wc->masks.nw_tos |= IP_ECN_MASK;
33bf9176 1299 flow->nw_tos &= ~IP_DSCP_MASK;
ca077186 1300 flow->nw_tos |= dscp;
9583bc14
EJ
1301 }
1302
46c88433 1303 if (xport->is_tunnel) {
9583bc14
EJ
1304 /* Save tunnel metadata so that changes made due to
1305 * the Logical (tunnel) Port are not visible for any further
1306 * matches, while explicit set actions on tunnel metadata are.
1307 */
33bf9176 1308 struct flow_tnl flow_tnl = flow->tunnel;
46c88433 1309 odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
4e022ec0 1310 if (odp_port == ODPP_NONE) {
9583bc14
EJ
1311 xlate_report(ctx, "Tunneling decided against output");
1312 goto out; /* restore flow_nw_tos */
1313 }
33bf9176 1314 if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
9583bc14
EJ
1315 xlate_report(ctx, "Not tunneling to our own address");
1316 goto out; /* restore flow_nw_tos */
1317 }
1318 if (ctx->xin->resubmit_stats) {
46c88433 1319 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14
EJ
1320 }
1321 out_port = odp_port;
33bf9176 1322 commit_odp_tunnel_action(flow, &ctx->base_flow,
9583bc14 1323 &ctx->xout->odp_actions);
33bf9176 1324 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
9583bc14 1325 } else {
4e022ec0 1326 ofp_port_t vlandev_port;
1dd35f8a 1327
46c88433
EJ
1328 odp_port = xport->odp_port;
1329 if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
1dd35f8a
JP
1330 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
1331 }
46c88433 1332 vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, ofp_port,
33bf9176 1333 flow->vlan_tci);
9583bc14
EJ
1334 if (vlandev_port == ofp_port) {
1335 out_port = odp_port;
1336 } else {
46c88433 1337 out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
33bf9176 1338 flow->vlan_tci = htons(0);
9583bc14 1339 }
33bf9176 1340 flow->skb_mark &= ~IPSEC_MARK;
9583bc14 1341 }
9583bc14 1342
4e022ec0 1343 if (out_port != ODPP_NONE) {
1dd35f8a
JP
1344 commit_odp_actions(flow, &ctx->base_flow,
1345 &ctx->xout->odp_actions, &ctx->xout->wc);
4e022ec0
AW
1346 nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
1347 out_port);
9583bc14 1348
6cbbf4fa
EJ
1349 ctx->sflow_odp_port = odp_port;
1350 ctx->sflow_n_outputs++;
1351 ctx->xout->nf_output_iface = ofp_port;
1352 }
1353
1354 out:
9583bc14 1355 /* Restore flow */
33bf9176
BP
1356 flow->vlan_tci = flow_vlan_tci;
1357 flow->skb_mark = flow_skb_mark;
33bf9176 1358 flow->nw_tos = flow_nw_tos;
9583bc14
EJ
1359}
1360
1361static void
4e022ec0 1362compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port)
9583bc14
EJ
1363{
1364 compose_output_action__(ctx, ofp_port, true);
1365}
1366
9583bc14
EJ
1367/* Common rule processing in one place to avoid duplicating code. */
1368static struct rule_dpif *
1369ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
1370 bool may_packet_in)
1371{
1372 if (ctx->xin->resubmit_hook) {
4d0acc70 1373 ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
9583bc14
EJ
1374 }
1375 if (rule == NULL && may_packet_in) {
1376 /* XXX
1377 * check if table configuration flags
1378 * OFPTC_TABLE_MISS_CONTROLLER, default.
1379 * OFPTC_TABLE_MISS_CONTINUE,
1380 * OFPTC_TABLE_MISS_DROP
1381 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
1382 */
46c88433 1383 rule = rule_dpif_miss_rule(ctx->xbridge->ofproto, &ctx->xin->flow);
9583bc14
EJ
1384 }
1385 if (rule && ctx->xin->resubmit_stats) {
1386 rule_credit_stats(rule, ctx->xin->resubmit_stats);
1387 }
1388 return rule;
1389}
1390
1391static void
1392xlate_table_action(struct xlate_ctx *ctx,
4e022ec0 1393 ofp_port_t in_port, uint8_t table_id, bool may_packet_in)
9583bc14
EJ
1394{
1395 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
1396 struct rule_dpif *rule;
4e022ec0 1397 ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
1398 uint8_t old_table_id = ctx->table_id;
1399
1400 ctx->table_id = table_id;
1401
1402 /* Look up a flow with 'in_port' as the input port. */
4e022ec0 1403 ctx->xin->flow.in_port.ofp_port = in_port;
46c88433
EJ
1404 rule = rule_dpif_lookup_in_table(ctx->xbridge->ofproto,
1405 &ctx->xin->flow, &ctx->xout->wc,
1406 table_id);
9583bc14 1407
46c88433
EJ
1408 ctx->xout->tags |= calculate_flow_tag(ctx->xbridge->ofproto,
1409 &ctx->xin->flow, ctx->table_id,
1410 rule);
9583bc14
EJ
1411
1412 /* Restore the original input port. Otherwise OFPP_NORMAL and
1413 * OFPP_IN_PORT will have surprising behavior. */
4e022ec0 1414 ctx->xin->flow.in_port.ofp_port = old_in_port;
9583bc14
EJ
1415
1416 rule = ctx_rule_hooks(ctx, rule, may_packet_in);
1417
1418 if (rule) {
1419 struct rule_dpif *old_rule = ctx->rule;
1420
1421 ctx->recurse++;
1422 ctx->rule = rule;
1423 do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
1424 ctx->rule = old_rule;
1425 ctx->recurse--;
1426 }
1427
1428 ctx->table_id = old_table_id;
1429 } else {
1430 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
1431
1432 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
1433 MAX_RESUBMIT_RECURSION);
1434 ctx->max_resubmit_trigger = true;
1435 }
1436}
1437
1438static void
1439xlate_ofpact_resubmit(struct xlate_ctx *ctx,
1440 const struct ofpact_resubmit *resubmit)
1441{
4e022ec0 1442 ofp_port_t in_port;
9583bc14
EJ
1443 uint8_t table_id;
1444
1445 in_port = resubmit->in_port;
1446 if (in_port == OFPP_IN_PORT) {
4e022ec0 1447 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
1448 }
1449
1450 table_id = resubmit->table_id;
1451 if (table_id == 255) {
1452 table_id = ctx->table_id;
1453 }
1454
1455 xlate_table_action(ctx, in_port, table_id, false);
1456}
1457
1458static void
1459flood_packets(struct xlate_ctx *ctx, bool all)
1460{
46c88433 1461 const struct xport *xport;
9583bc14 1462
46c88433
EJ
1463 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
1464 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
1465 continue;
1466 }
1467
1468 if (all) {
46c88433
EJ
1469 compose_output_action__(ctx, xport->ofp_port, false);
1470 } else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
1471 compose_output_action(ctx, xport->ofp_port);
9583bc14
EJ
1472 }
1473 }
1474
1475 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1476}
1477
1478static void
1479execute_controller_action(struct xlate_ctx *ctx, int len,
1480 enum ofp_packet_in_reason reason,
1481 uint16_t controller_id)
1482{
1483 struct ofputil_packet_in pin;
1484 struct ofpbuf *packet;
1485 struct flow key;
1486
1487 ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
1488 ctx->xout->slow = SLOW_CONTROLLER;
1489 if (!ctx->xin->packet) {
1490 return;
1491 }
1492
1493 packet = ofpbuf_clone(ctx->xin->packet);
1494
1495 key.skb_priority = 0;
1496 key.skb_mark = 0;
1497 memset(&key.tunnel, 0, sizeof key.tunnel);
1498
1499 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1dd35f8a 1500 &ctx->xout->odp_actions, &ctx->xout->wc);
9583bc14
EJ
1501
1502 odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
1503 ctx->xout->odp_actions.size, NULL, NULL);
1504
1505 pin.packet = packet->data;
1506 pin.packet_len = packet->size;
1507 pin.reason = reason;
1508 pin.controller_id = controller_id;
1509 pin.table_id = ctx->table_id;
1510 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
1511
1512 pin.send_len = len;
1513 flow_get_metadata(&ctx->xin->flow, &pin.fmd);
1514
46c88433 1515 ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, &pin);
9583bc14
EJ
1516 ofpbuf_delete(packet);
1517}
1518
1519static void
9cfef3d0 1520compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 1521{
33bf9176
BP
1522 struct flow_wildcards *wc = &ctx->xout->wc;
1523 struct flow *flow = &ctx->xin->flow;
1524
9583bc14
EJ
1525 ovs_assert(eth_type_mpls(eth_type));
1526
33bf9176
BP
1527 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1528 memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
9583bc14 1529
33bf9176
BP
1530 if (flow->mpls_depth) {
1531 flow->mpls_lse &= ~htonl(MPLS_BOS_MASK);
1532 flow->mpls_depth++;
9583bc14
EJ
1533 } else {
1534 ovs_be32 label;
1535 uint8_t tc, ttl;
1536
33bf9176 1537 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
9583bc14
EJ
1538 label = htonl(0x2); /* IPV6 Explicit Null. */
1539 } else {
1540 label = htonl(0x0); /* IPV4 Explicit Null. */
1541 }
1dd35f8a
JP
1542 wc->masks.nw_tos |= IP_DSCP_MASK;
1543 wc->masks.nw_ttl = 0xff;
33bf9176
BP
1544 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1545 ttl = flow->nw_ttl ? flow->nw_ttl : 0x40;
1546 flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
1547 flow->mpls_depth = 1;
9583bc14 1548 }
33bf9176 1549 flow->dl_type = eth_type;
9583bc14
EJ
1550}
1551
1552static void
9cfef3d0 1553compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 1554{
33bf9176
BP
1555 struct flow_wildcards *wc = &ctx->xout->wc;
1556 struct flow *flow = &ctx->xin->flow;
1557
9583bc14
EJ
1558 ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
1559 ovs_assert(!eth_type_mpls(eth_type));
1560
33bf9176
BP
1561 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1562 memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
1563
1564 if (flow->mpls_depth) {
1565 flow->mpls_depth--;
1566 flow->mpls_lse = htonl(0);
1567 if (!flow->mpls_depth) {
1568 flow->dl_type = eth_type;
9583bc14
EJ
1569 }
1570 }
1571}
1572
1573static bool
1574compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
1575{
33bf9176
BP
1576 struct flow *flow = &ctx->xin->flow;
1577
1578 if (!is_ip_any(flow)) {
9583bc14
EJ
1579 return false;
1580 }
1581
1dd35f8a 1582 ctx->xout->wc.masks.nw_ttl = 0xff;
33bf9176
BP
1583 if (flow->nw_ttl > 1) {
1584 flow->nw_ttl--;
9583bc14
EJ
1585 return false;
1586 } else {
1587 size_t i;
1588
1589 for (i = 0; i < ids->n_controllers; i++) {
1590 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
1591 ids->cnt_ids[i]);
1592 }
1593
1594 /* Stop processing for current table. */
1595 return true;
1596 }
1597}
1598
1599static bool
9cfef3d0 1600compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14
EJ
1601{
1602 if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
1603 return true;
1604 }
1605
1606 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
1607 return false;
1608}
1609
1610static bool
9cfef3d0 1611compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 1612{
33bf9176
BP
1613 struct flow *flow = &ctx->xin->flow;
1614 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse);
1dd35f8a
JP
1615 struct flow_wildcards *wc = &ctx->xout->wc;
1616
1dd35f8a 1617 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
9583bc14 1618
33bf9176 1619 if (!eth_type_mpls(flow->dl_type)) {
9583bc14
EJ
1620 return false;
1621 }
1622
1623 if (ttl > 1) {
1624 ttl--;
33bf9176 1625 set_mpls_lse_ttl(&flow->mpls_lse, ttl);
9583bc14
EJ
1626 return false;
1627 } else {
1628 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
1629
1630 /* Stop processing for current table. */
1631 return true;
1632 }
1633}
1634
1635static void
1636xlate_output_action(struct xlate_ctx *ctx,
4e022ec0 1637 ofp_port_t port, uint16_t max_len, bool may_packet_in)
9583bc14 1638{
4e022ec0 1639 ofp_port_t prev_nf_output_iface = ctx->xout->nf_output_iface;
9583bc14
EJ
1640
1641 ctx->xout->nf_output_iface = NF_OUT_DROP;
1642
1643 switch (port) {
1644 case OFPP_IN_PORT:
4e022ec0 1645 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port);
9583bc14
EJ
1646 break;
1647 case OFPP_TABLE:
4e022ec0
AW
1648 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
1649 0, may_packet_in);
9583bc14
EJ
1650 break;
1651 case OFPP_NORMAL:
1652 xlate_normal(ctx);
1653 break;
1654 case OFPP_FLOOD:
1655 flood_packets(ctx, false);
1656 break;
1657 case OFPP_ALL:
1658 flood_packets(ctx, true);
1659 break;
1660 case OFPP_CONTROLLER:
1661 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
1662 break;
1663 case OFPP_NONE:
1664 break;
1665 case OFPP_LOCAL:
1666 default:
4e022ec0 1667 if (port != ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
1668 compose_output_action(ctx, port);
1669 } else {
1670 xlate_report(ctx, "skipping output to input port");
1671 }
1672 break;
1673 }
1674
1675 if (prev_nf_output_iface == NF_OUT_FLOOD) {
1676 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1677 } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1678 ctx->xout->nf_output_iface = prev_nf_output_iface;
1679 } else if (prev_nf_output_iface != NF_OUT_DROP &&
1680 ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1681 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1682 }
1683}
1684
1685static void
1686xlate_output_reg_action(struct xlate_ctx *ctx,
1687 const struct ofpact_output_reg *or)
1688{
1689 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
1690 if (port <= UINT16_MAX) {
1691 union mf_subvalue value;
1692
1693 memset(&value, 0xff, sizeof value);
1694 mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
4e022ec0
AW
1695 xlate_output_action(ctx, u16_to_ofp(port),
1696 or->max_len, false);
9583bc14
EJ
1697 }
1698}
1699
1700static void
1701xlate_enqueue_action(struct xlate_ctx *ctx,
1702 const struct ofpact_enqueue *enqueue)
1703{
4e022ec0 1704 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
1705 uint32_t queue_id = enqueue->queue;
1706 uint32_t flow_priority, priority;
1707 int error;
1708
1709 /* Translate queue to priority. */
46c88433
EJ
1710 error = ofproto_dpif_queue_to_priority(ctx->xbridge->ofproto, queue_id,
1711 &priority);
9583bc14
EJ
1712 if (error) {
1713 /* Fall back to ordinary output action. */
1714 xlate_output_action(ctx, enqueue->port, 0, false);
1715 return;
1716 }
1717
1718 /* Check output port. */
1719 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
1720 ofp_port = ctx->xin->flow.in_port.ofp_port;
1721 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
1722 return;
1723 }
1724
1725 /* Add datapath actions. */
1726 flow_priority = ctx->xin->flow.skb_priority;
1727 ctx->xin->flow.skb_priority = priority;
1728 compose_output_action(ctx, ofp_port);
1729 ctx->xin->flow.skb_priority = flow_priority;
1730
1731 /* Update NetFlow output port. */
1732 if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1733 ctx->xout->nf_output_iface = ofp_port;
1734 } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1735 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1736 }
1737}
1738
1739static void
1740xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
1741{
1742 uint32_t skb_priority;
1743
46c88433 1744 if (!ofproto_dpif_queue_to_priority(ctx->xbridge->ofproto, queue_id,
cdc3ab65 1745 &skb_priority)) {
9583bc14
EJ
1746 ctx->xin->flow.skb_priority = skb_priority;
1747 } else {
1748 /* Couldn't translate queue to a priority. Nothing to do. A warning
1749 * has already been logged. */
1750 }
1751}
1752
1753static bool
46c88433 1754slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 1755{
46c88433
EJ
1756 const struct xbridge *xbridge = xbridge_;
1757 struct xport *port;
9583bc14
EJ
1758
1759 switch (ofp_port) {
1760 case OFPP_IN_PORT:
1761 case OFPP_TABLE:
1762 case OFPP_NORMAL:
1763 case OFPP_FLOOD:
1764 case OFPP_ALL:
1765 case OFPP_NONE:
1766 return true;
1767 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
1768 return false;
1769 default:
46c88433 1770 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
1771 return port ? port->may_enable : false;
1772 }
1773}
1774
1775static void
1776xlate_bundle_action(struct xlate_ctx *ctx,
1777 const struct ofpact_bundle *bundle)
1778{
4e022ec0 1779 ofp_port_t port;
9583bc14
EJ
1780
1781 port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
46c88433
EJ
1782 slave_enabled_cb,
1783 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 1784 if (bundle->dst.field) {
4e022ec0 1785 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow);
9583bc14
EJ
1786 } else {
1787 xlate_output_action(ctx, port, 0, false);
1788 }
1789}
1790
1791static void
1792xlate_learn_action(struct xlate_ctx *ctx,
1793 const struct ofpact_learn *learn)
1794{
1795 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
1796 struct ofputil_flow_mod fm;
1797 uint64_t ofpacts_stub[1024 / 8];
1798 struct ofpbuf ofpacts;
1799 int error;
1800
1801 ctx->xout->has_learn = true;
1802
1803 learn_mask(learn, &ctx->xout->wc);
1804
1805 if (!ctx->xin->may_learn) {
1806 return;
1807 }
1808
1809 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
1810 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
1811
46c88433 1812 error = ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm);
9583bc14
EJ
1813 if (error && !VLOG_DROP_WARN(&rl)) {
1814 VLOG_WARN("learning action failed to modify flow table (%s)",
1815 ofperr_get_name(error));
1816 }
1817
1818 ofpbuf_uninit(&ofpacts);
1819}
1820
1821/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
1822 * means "infinite". */
1823static void
1824reduce_timeout(uint16_t max, uint16_t *timeout)
1825{
1826 if (max && (!*timeout || *timeout > max)) {
1827 *timeout = max;
1828 }
1829}
1830
1831static void
1832xlate_fin_timeout(struct xlate_ctx *ctx,
1833 const struct ofpact_fin_timeout *oft)
1834{
1835 if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
1836 struct rule_dpif *rule = ctx->rule;
1837
1838 reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
1839 reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
1840 }
1841}
1842
1843static void
1844xlate_sample_action(struct xlate_ctx *ctx,
1845 const struct ofpact_sample *os)
1846{
1847 union user_action_cookie cookie;
1848 /* Scale the probability from 16-bit to 32-bit while representing
1849 * the same percentage. */
1850 uint32_t probability = (os->probability << 16) | os->probability;
1851
1852 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1dd35f8a 1853 &ctx->xout->odp_actions, &ctx->xout->wc);
9583bc14
EJ
1854
1855 compose_flow_sample_cookie(os->probability, os->collector_set_id,
1856 os->obs_domain_id, os->obs_point_id, &cookie);
46c88433 1857 compose_sample_action(ctx->xbridge, &ctx->xout->odp_actions, &ctx->xin->flow,
9583bc14
EJ
1858 probability, &cookie, sizeof cookie.flow_sample);
1859}
1860
1861static bool
46c88433 1862may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 1863{
46c88433
EJ
1864 if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
1865 ? OFPUTIL_PC_NO_RECV_STP
1866 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
1867 return false;
1868 }
1869
1870 /* Only drop packets here if both forwarding and learning are
1871 * disabled. If just learning is enabled, we need to have
1872 * OFPP_NORMAL and the learning action have a look at the packet
1873 * before we can drop it. */
46c88433
EJ
1874 if (!stp_forward_in_state(xport->stp_state)
1875 && !stp_learn_in_state(xport->stp_state)) {
9583bc14
EJ
1876 return false;
1877 }
1878
1879 return true;
1880}
1881
1882static bool
1883tunnel_ecn_ok(struct xlate_ctx *ctx)
1884{
1885 if (is_ip_any(&ctx->base_flow)
1886 && (ctx->xin->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
1887 if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
1888 VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
1889 " but is not ECN capable");
1890 return false;
1891 } else {
1892 /* Set the ECN CE value in the tunneled packet. */
1893 ctx->xin->flow.nw_tos |= IP_ECN_CE;
1894 }
1895 }
1896
1897 return true;
1898}
1899
1900static void
1901do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
1902 struct xlate_ctx *ctx)
1903{
33bf9176
BP
1904 struct flow_wildcards *wc = &ctx->xout->wc;
1905 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
1906 bool was_evictable = true;
1907 const struct ofpact *a;
1908
1909 if (ctx->rule) {
1910 /* Don't let the rule we're working on get evicted underneath us. */
1911 was_evictable = ctx->rule->up.evictable;
1912 ctx->rule->up.evictable = false;
1913 }
1914
1915 do_xlate_actions_again:
1916 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
1917 struct ofpact_controller *controller;
1918 const struct ofpact_metadata *metadata;
1919
1920 if (ctx->exit) {
1921 break;
1922 }
1923
1924 switch (a->type) {
1925 case OFPACT_OUTPUT:
1926 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
1927 ofpact_get_OUTPUT(a)->max_len, true);
1928 break;
1929
1930 case OFPACT_CONTROLLER:
1931 controller = ofpact_get_CONTROLLER(a);
1932 execute_controller_action(ctx, controller->max_len,
1933 controller->reason,
1934 controller->controller_id);
1935 break;
1936
1937 case OFPACT_ENQUEUE:
1938 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
1939 break;
1940
1941 case OFPACT_SET_VLAN_VID:
33bf9176
BP
1942 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1943 flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
1944 | htons(VLAN_CFI));
9583bc14
EJ
1945 break;
1946
1947 case OFPACT_SET_VLAN_PCP:
33bf9176
BP
1948 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1949 flow->vlan_tci |=
9583bc14
EJ
1950 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
1951 | VLAN_CFI);
1952 break;
1953
1954 case OFPACT_STRIP_VLAN:
33bf9176 1955 flow->vlan_tci = htons(0);
9583bc14
EJ
1956 break;
1957
1958 case OFPACT_PUSH_VLAN:
1959 /* XXX 802.1AD(QinQ) */
33bf9176 1960 flow->vlan_tci = htons(VLAN_CFI);
9583bc14
EJ
1961 break;
1962
1963 case OFPACT_SET_ETH_SRC:
33bf9176 1964 memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
1965 break;
1966
1967 case OFPACT_SET_ETH_DST:
33bf9176 1968 memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
1969 break;
1970
1971 case OFPACT_SET_IPV4_SRC:
33bf9176
BP
1972 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1973 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
1974 }
1975 break;
1976
1977 case OFPACT_SET_IPV4_DST:
33bf9176
BP
1978 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1979 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
1980 }
1981 break;
1982
1983 case OFPACT_SET_IPV4_DSCP:
1984 /* OpenFlow 1.0 only supports IPv4. */
33bf9176
BP
1985 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1986 flow->nw_tos &= ~IP_DSCP_MASK;
1987 flow->nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
9583bc14
EJ
1988 }
1989 break;
1990
1991 case OFPACT_SET_L4_SRC_PORT:
33bf9176
BP
1992 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1993 if (is_ip_any(flow)) {
1994 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
1995 }
1996 break;
1997
1998 case OFPACT_SET_L4_DST_PORT:
33bf9176
BP
1999 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2000 if (is_ip_any(flow)) {
2001 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
2002 }
2003 break;
2004
2005 case OFPACT_RESUBMIT:
2006 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
2007 break;
2008
2009 case OFPACT_SET_TUNNEL:
33bf9176 2010 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
2011 break;
2012
2013 case OFPACT_SET_QUEUE:
2014 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
2015 break;
2016
2017 case OFPACT_POP_QUEUE:
33bf9176 2018 flow->skb_priority = ctx->orig_skb_priority;
9583bc14
EJ
2019 break;
2020
2021 case OFPACT_REG_MOVE:
33bf9176 2022 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
9583bc14
EJ
2023 break;
2024
2025 case OFPACT_REG_LOAD:
33bf9176 2026 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow);
9583bc14
EJ
2027 break;
2028
2029 case OFPACT_STACK_PUSH:
33bf9176
BP
2030 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
2031 &ctx->stack);
9583bc14
EJ
2032 break;
2033
2034 case OFPACT_STACK_POP:
33bf9176 2035 nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, &ctx->stack);
9583bc14
EJ
2036 break;
2037
2038 case OFPACT_PUSH_MPLS:
9cfef3d0 2039 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
9583bc14
EJ
2040 break;
2041
2042 case OFPACT_POP_MPLS:
9cfef3d0 2043 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
2044 break;
2045
2046 case OFPACT_SET_MPLS_TTL:
9cfef3d0 2047 if (compose_set_mpls_ttl_action(ctx,
9583bc14
EJ
2048 ofpact_get_SET_MPLS_TTL(a)->ttl)) {
2049 goto out;
2050 }
2051 break;
2052
2053 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 2054 if (compose_dec_mpls_ttl_action(ctx)) {
9583bc14
EJ
2055 goto out;
2056 }
2057 break;
2058
2059 case OFPACT_DEC_TTL:
9583bc14
EJ
2060 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
2061 goto out;
2062 }
2063 break;
2064
2065 case OFPACT_NOTE:
2066 /* Nothing to do. */
2067 break;
2068
2069 case OFPACT_MULTIPATH:
33bf9176 2070 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
9583bc14
EJ
2071 break;
2072
2073 case OFPACT_BUNDLE:
9583bc14
EJ
2074 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
2075 break;
2076
2077 case OFPACT_OUTPUT_REG:
2078 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
2079 break;
2080
2081 case OFPACT_LEARN:
2082 xlate_learn_action(ctx, ofpact_get_LEARN(a));
2083 break;
2084
2085 case OFPACT_EXIT:
2086 ctx->exit = true;
2087 break;
2088
2089 case OFPACT_FIN_TIMEOUT:
33bf9176 2090 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
2091 ctx->xout->has_fin_timeout = true;
2092 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
2093 break;
2094
2095 case OFPACT_CLEAR_ACTIONS:
2096 /* XXX
2097 * Nothing to do because writa-actions is not supported for now.
2098 * When writa-actions is supported, clear-actions also must
2099 * be supported at the same time.
2100 */
2101 break;
2102
2103 case OFPACT_WRITE_METADATA:
2104 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
2105 flow->metadata &= ~metadata->mask;
2106 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
2107 break;
2108
638a19b0
JR
2109 case OFPACT_METER:
2110 /* Not implemented yet. */
2111 break;
2112
9583bc14
EJ
2113 case OFPACT_GOTO_TABLE: {
2114 /* It is assumed that goto-table is the last action. */
2115 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
2116 struct rule_dpif *rule;
2117
2118 ovs_assert(ctx->table_id < ogt->table_id);
2119
2120 ctx->table_id = ogt->table_id;
2121
2122 /* Look up a flow from the new table. */
46c88433 2123 rule = rule_dpif_lookup_in_table(ctx->xbridge->ofproto, flow, wc,
33bf9176 2124 ctx->table_id);
9583bc14 2125
46c88433
EJ
2126 ctx->xout->tags |= calculate_flow_tag(ctx->xbridge->ofproto,
2127 &ctx->xin->flow,
2128 ctx->table_id, rule);
9583bc14
EJ
2129
2130 rule = ctx_rule_hooks(ctx, rule, true);
2131
2132 if (rule) {
2133 if (ctx->rule) {
2134 ctx->rule->up.evictable = was_evictable;
2135 }
2136 ctx->rule = rule;
2137 was_evictable = rule->up.evictable;
2138 rule->up.evictable = false;
2139
2140 /* Tail recursion removal. */
2141 ofpacts = rule->up.ofpacts;
2142 ofpacts_len = rule->up.ofpacts_len;
2143 goto do_xlate_actions_again;
2144 }
2145 break;
2146 }
2147
2148 case OFPACT_SAMPLE:
2149 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
2150 break;
2151 }
2152 }
2153
2154out:
2155 if (ctx->rule) {
2156 ctx->rule->up.evictable = was_evictable;
2157 }
2158}
2159
2160void
2161xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
2162 const struct flow *flow, struct rule_dpif *rule,
2163 uint8_t tcp_flags, const struct ofpbuf *packet)
2164{
2165 xin->ofproto = ofproto;
2166 xin->flow = *flow;
2167 xin->packet = packet;
2168 xin->may_learn = packet != NULL;
2169 xin->rule = rule;
2170 xin->ofpacts = NULL;
2171 xin->ofpacts_len = 0;
2172 xin->tcp_flags = tcp_flags;
2173 xin->resubmit_hook = NULL;
2174 xin->report_hook = NULL;
2175 xin->resubmit_stats = NULL;
2176}
2177
2178void
2179xlate_out_uninit(struct xlate_out *xout)
2180{
2181 if (xout) {
2182 ofpbuf_uninit(&xout->odp_actions);
2183 }
2184}
2185
2186/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
2187 * into datapath actions, using 'ctx', and discards the datapath actions. */
2188void
2189xlate_actions_for_side_effects(struct xlate_in *xin)
2190{
2191 struct xlate_out xout;
2192
2193 xlate_actions(xin, &xout);
2194 xlate_out_uninit(&xout);
2195}
2196
2197static void
2198xlate_report(struct xlate_ctx *ctx, const char *s)
2199{
2200 if (ctx->xin->report_hook) {
4d0acc70 2201 ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
9583bc14
EJ
2202 }
2203}
2204
2205void
2206xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
2207{
2208 dst->wc = src->wc;
2209 dst->tags = src->tags;
2210 dst->slow = src->slow;
2211 dst->has_learn = src->has_learn;
2212 dst->has_normal = src->has_normal;
2213 dst->has_fin_timeout = src->has_fin_timeout;
2214 dst->nf_output_iface = src->nf_output_iface;
2215 dst->mirrors = src->mirrors;
2216
2217 ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
2218 sizeof dst->odp_actions_stub);
2219 ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
2220 src->odp_actions.size);
2221}
2222\f
ce4a6b76
BP
2223static bool
2224actions_output_to_local_port(const struct xlate_ctx *ctx)
2225{
46c88433 2226 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
2227 const struct nlattr *a;
2228 unsigned int left;
2229
2230 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
2231 ctx->xout->odp_actions.size) {
2232 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
2233 && nl_attr_get_odp_port(a) == local_odp_port) {
2234 return true;
2235 }
2236 }
2237 return false;
2238}
9583bc14
EJ
2239
2240/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
2241 * into datapath actions in 'odp_actions', using 'ctx'. */
2242void
2243xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
2244{
2245 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
2246 * that in the future we always keep a copy of the original flow for
2247 * tracing purposes. */
2248 static bool hit_resubmit_limit;
2249
33bf9176
BP
2250 struct flow_wildcards *wc = &xout->wc;
2251 struct flow *flow = &xin->flow;
2252
9583bc14
EJ
2253 enum slow_path_reason special;
2254 const struct ofpact *ofpacts;
46c88433 2255 struct xport *in_port;
9583bc14
EJ
2256 struct flow orig_flow;
2257 struct xlate_ctx ctx;
2258 size_t ofpacts_len;
2259
46c88433 2260 COVERAGE_INC(xlate_actions);
9583bc14
EJ
2261
2262 /* Flow initialization rules:
2263 * - 'base_flow' must match the kernel's view of the packet at the
2264 * time that action processing starts. 'flow' represents any
2265 * transformations we wish to make through actions.
2266 * - By default 'base_flow' and 'flow' are the same since the input
2267 * packet matches the output before any actions are applied.
2268 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
2269 * of the received packet as seen by the kernel. If we later output
2270 * to another device without any modifications this will cause us to
2271 * insert a new tag since the original one was stripped off by the
2272 * VLAN device.
2273 * - Tunnel metadata as received is retained in 'flow'. This allows
2274 * tunnel metadata matching also in later tables.
2275 * Since a kernel action for setting the tunnel metadata will only be
2276 * generated with actual tunnel output, changing the tunnel metadata
2277 * values in 'flow' (such as tun_id) will only have effect with a later
2278 * tunnel output action.
2279 * - Tunnel 'base_flow' is completely cleared since that is what the
2280 * kernel does. If we wish to maintain the original values an action
2281 * needs to be generated. */
2282
2283 ctx.xin = xin;
2284 ctx.xout = xout;
46c88433
EJ
2285 ctx.xout->tags = 0;
2286 ctx.xout->slow = 0;
2287 ctx.xout->has_learn = false;
2288 ctx.xout->has_normal = false;
2289 ctx.xout->has_fin_timeout = false;
2290 ctx.xout->nf_output_iface = NF_OUT_DROP;
2291 ctx.xout->mirrors = 0;
2292 ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
2293 sizeof ctx.xout->odp_actions_stub);
2294 ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
2295
2296 ctx.xbridge = xbridge_lookup(xin->ofproto);
2297 if (!ctx.xbridge) {
2298 return;
2299 }
9583bc14 2300
9583bc14
EJ
2301 ctx.rule = xin->rule;
2302
33bf9176 2303 ctx.base_flow = *flow;
9583bc14 2304 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
33bf9176 2305 ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
9583bc14 2306
33bf9176
BP
2307 flow_wildcards_init_catchall(wc);
2308 memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
1dd35f8a 2309 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
7431e171 2310 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1dd35f8a 2311 wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
9583bc14
EJ
2312
2313 if (tnl_port_should_receive(&ctx.xin->flow)) {
33bf9176 2314 memset(&wc->masks.tunnel, 0xff, sizeof wc->masks.tunnel);
9583bc14 2315 }
46c88433 2316 if (ctx.xbridge->has_netflow) {
9b658910 2317 netflow_mask_wc(flow, wc);
9583bc14
EJ
2318 }
2319
9583bc14
EJ
2320 ctx.recurse = 0;
2321 ctx.max_resubmit_trigger = false;
33bf9176 2322 ctx.orig_skb_priority = flow->skb_priority;
9583bc14
EJ
2323 ctx.table_id = 0;
2324 ctx.exit = false;
2325
2326 if (xin->ofpacts) {
2327 ofpacts = xin->ofpacts;
2328 ofpacts_len = xin->ofpacts_len;
2329 } else if (xin->rule) {
2330 ofpacts = xin->rule->up.ofpacts;
2331 ofpacts_len = xin->rule->up.ofpacts_len;
2332 } else {
2333 NOT_REACHED();
2334 }
2335
2336 ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
2337
46c88433 2338 if (mbridge_has_mirrors(ctx.xbridge->mbridge) || hit_resubmit_limit) {
9583bc14
EJ
2339 /* Do this conditionally because the copy is expensive enough that it
2340 * shows up in profiles. */
33bf9176 2341 orig_flow = *flow;
9583bc14
EJ
2342 }
2343
33bf9176 2344 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
46c88433 2345 switch (ctx.xbridge->frag) {
9583bc14
EJ
2346 case OFPC_FRAG_NORMAL:
2347 /* We must pretend that transport ports are unavailable. */
33bf9176
BP
2348 flow->tp_src = ctx.base_flow.tp_src = htons(0);
2349 flow->tp_dst = ctx.base_flow.tp_dst = htons(0);
9583bc14
EJ
2350 break;
2351
2352 case OFPC_FRAG_DROP:
2353 return;
2354
2355 case OFPC_FRAG_REASM:
2356 NOT_REACHED();
2357
2358 case OFPC_FRAG_NX_MATCH:
2359 /* Nothing to do. */
2360 break;
2361
2362 case OFPC_INVALID_TTL_TO_CONTROLLER:
2363 NOT_REACHED();
2364 }
2365 }
2366
46c88433 2367 in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
642dc74d 2368 special = process_special(&ctx, flow, in_port, ctx.xin->packet);
9583bc14
EJ
2369 if (special) {
2370 ctx.xout->slow = special;
2371 } else {
2372 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
2373 size_t sample_actions_len;
9583bc14 2374
4e022ec0 2375 if (flow->in_port.ofp_port
46c88433
EJ
2376 != vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
2377 flow->in_port.ofp_port,
33bf9176 2378 flow->vlan_tci)) {
9583bc14
EJ
2379 ctx.base_flow.vlan_tci = 0;
2380 }
2381
2382 add_sflow_action(&ctx);
2383 add_ipfix_action(&ctx);
2384 sample_actions_len = ctx.xout->odp_actions.size;
2385
2386 if (tunnel_ecn_ok(&ctx) && (!in_port || may_receive(in_port, &ctx))) {
2387 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
2388
2389 /* We've let OFPP_NORMAL and the learning action look at the
2390 * packet, so drop it now if forwarding is disabled. */
2391 if (in_port && !stp_forward_in_state(in_port->stp_state)) {
2392 ctx.xout->odp_actions.size = sample_actions_len;
2393 }
2394 }
2395
2396 if (ctx.max_resubmit_trigger && !ctx.xin->resubmit_hook) {
2397 if (!hit_resubmit_limit) {
2398 /* We didn't record the original flow. Make sure we do from
2399 * now on. */
2400 hit_resubmit_limit = true;
2401 } else if (!VLOG_DROP_ERR(&trace_rl)) {
2402 struct ds ds = DS_EMPTY_INITIALIZER;
2403
46c88433
EJ
2404 ofproto_trace(ctx.xbridge->ofproto, &orig_flow,
2405 ctx.xin->packet, &ds);
9583bc14
EJ
2406 VLOG_ERR("Trace triggered by excessive resubmit "
2407 "recursion:\n%s", ds_cstr(&ds));
2408 ds_destroy(&ds);
2409 }
2410 }
2411
46c88433 2412 if (ctx.xbridge->has_in_band
ce4a6b76
BP
2413 && in_band_must_output_to_local_port(flow)
2414 && !actions_output_to_local_port(&ctx)) {
9583bc14
EJ
2415 compose_output_action(&ctx, OFPP_LOCAL);
2416 }
aaa0fbae
EJ
2417
2418 fix_sflow_action(&ctx);
2419
46c88433 2420 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
9583bc14
EJ
2421 add_mirror_actions(&ctx, &orig_flow);
2422 }
9583bc14
EJ
2423 }
2424
2425 ofpbuf_uninit(&ctx.stack);
2426
2427 /* Clear the metadata and register wildcard masks, because we won't
2428 * use non-header fields as part of the cache. */
33bf9176
BP
2429 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
2430 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
9583bc14 2431}