]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto-dpif-xlate.c
ofproto-dpif: Hide rule_calculate_tag().
[mirror_ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
9583bc14
EJ
1/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
db7d4e46 19#include "bfd.h"
9583bc14
EJ
20#include "bitmap.h"
21#include "bond.h"
22#include "bundle.h"
23#include "byte-order.h"
db7d4e46 24#include "cfm.h"
9583bc14
EJ
25#include "connmgr.h"
26#include "coverage.h"
27#include "dpif.h"
28#include "dynamic-string.h"
f7f1ea29 29#include "in-band.h"
db7d4e46 30#include "lacp.h"
9583bc14 31#include "learn.h"
46c88433 32#include "list.h"
9583bc14
EJ
33#include "mac-learning.h"
34#include "meta-flow.h"
35#include "multipath.h"
36#include "netdev-vport.h"
37#include "netlink.h"
38#include "nx-match.h"
39#include "odp-execute.h"
40#include "ofp-actions.h"
41#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 42#include "ofproto/ofproto-dpif-mirror.h"
9583bc14
EJ
43#include "ofproto/ofproto-dpif-sflow.h"
44#include "ofproto/ofproto-dpif.h"
45#include "tunnel.h"
46#include "vlog.h"
47
46c88433 48COVERAGE_DEFINE(xlate_actions);
9583bc14
EJ
49
50VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
51
8a553e9a
EJ
52/* Maximum depth of flow table recursion (due to resubmit actions) in a
53 * flow translation. */
54#define MAX_RESUBMIT_RECURSION 64
55
46c88433
EJ
56struct xbridge {
57 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
58 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
59
60 struct list xbundles; /* Owned xbundles. */
61 struct hmap xports; /* Indexed by ofp_port. */
62
63 char *name; /* Name used in log messages. */
64 struct mac_learning *ml; /* Mac learning handle. */
65 struct mbridge *mbridge; /* Mirroring. */
66 struct dpif_sflow *sflow; /* SFlow handle, or null. */
67 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
68
69 enum ofp_config_flags frag; /* Fragmentation handling. */
70 bool has_stp; /* Bridge runs stp? */
71 bool has_netflow; /* Bridge runs netflow? */
72 bool has_in_band; /* Bridge has in band control? */
73 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
74};
75
76struct xbundle {
77 struct hmap_node hmap_node; /* In global 'xbundles' map. */
78 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
79
80 struct list list_node; /* In parent 'xbridges' list. */
81 struct xbridge *xbridge; /* Parent xbridge. */
82
83 struct list xports; /* Contains "struct xport"s. */
84
85 char *name; /* Name used in log messages. */
86 struct bond *bond; /* Nonnull iff more than one port. */
87 struct lacp *lacp; /* LACP handle or null. */
88
89 enum port_vlan_mode vlan_mode; /* VLAN mode. */
90 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
91 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
92 * NULL if all VLANs are trunked. */
93 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
94 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
95};
96
97struct xport {
98 struct hmap_node hmap_node; /* Node in global 'xports' map. */
99 struct ofport_dpif *ofport; /* Key in global 'xports map. */
100
101 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
102 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
103
104 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
105
106 struct list bundle_node; /* In parent xbundle (if it exists). */
107 struct xbundle *xbundle; /* Parent xbundle or null. */
108
109 struct netdev *netdev; /* 'ofport''s netdev. */
110
111 struct xbridge *xbridge; /* Parent bridge. */
112 struct xport *peer; /* Patch port peer or null. */
113
114 enum ofputil_port_config config; /* OpenFlow port configuration. */
115 enum stp_state stp_state; /* STP_DISABLED if STP not in use. */
116
117 bool may_enable; /* May be enabled in bonds. */
118 bool is_tunnel; /* Is a tunnel port. */
119
120 struct cfm *cfm; /* CFM handle or null. */
121 struct bfd *bfd; /* BFD handle or null. */
122};
123
4d0acc70
EJ
124struct xlate_ctx {
125 struct xlate_in *xin;
126 struct xlate_out *xout;
127
46c88433 128 const struct xbridge *xbridge;
4d0acc70
EJ
129
130 /* Flow at the last commit. */
131 struct flow base_flow;
132
133 /* Tunnel IP destination address as received. This is stored separately
134 * as the base_flow.tunnel is cleared on init to reflect the datapath
135 * behavior. Used to make sure not to send tunneled output to ourselves,
136 * which might lead to an infinite loop. This could happen easily
137 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
138 * actually set the tun_dst field. */
139 ovs_be32 orig_tunnel_ip_dst;
140
141 /* Stack for the push and pop actions. Each stack element is of type
142 * "union mf_subvalue". */
143 union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
144 struct ofpbuf stack;
145
146 /* The rule that we are currently translating, or NULL. */
147 struct rule_dpif *rule;
148
149 int recurse; /* Recursion level, via xlate_table_action. */
150 bool max_resubmit_trigger; /* Recursed too deeply during translation. */
151 uint32_t orig_skb_priority; /* Priority when packet arrived. */
152 uint8_t table_id; /* OpenFlow table ID where flow was found. */
153 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 154 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
4d0acc70
EJ
155 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
156 bool exit; /* No further actions should be processed. */
157};
158
9583bc14
EJ
159/* A controller may use OFPP_NONE as the ingress port to indicate that
160 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
161 * when an input bundle is needed for validation (e.g., mirroring or
162 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
3815d6c2
LS
163 * any 'port' structs, so care must be taken when dealing with it.
164 * The bundle's name and vlan mode are initialized in lookup_input_bundle() */
165static struct xbundle ofpp_none_bundle;
9583bc14 166
46c88433
EJ
167static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
168static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
169static struct hmap xports = HMAP_INITIALIZER(&xports);
170
171static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14
EJ
172static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
173 struct xlate_ctx *);
174static void xlate_normal(struct xlate_ctx *);
175static void xlate_report(struct xlate_ctx *, const char *);
4e022ec0 176static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
9583bc14 177 uint8_t table_id, bool may_packet_in);
46c88433
EJ
178static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
179static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
180static void output_normal(struct xlate_ctx *, const struct xbundle *,
9583bc14 181 uint16_t vlan);
4e022ec0 182static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
9583bc14
EJ
183
184static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
185
46c88433
EJ
186static struct xbridge *xbridge_lookup(const struct ofproto_dpif *);
187static struct xbundle *xbundle_lookup(const struct ofbundle *);
188static struct xport *xport_lookup(struct ofport_dpif *);
189static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
190
191void
192xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
193 const struct mac_learning *ml, const struct mbridge *mbridge,
194 const struct dpif_sflow *sflow,
195 const struct dpif_ipfix *ipfix, enum ofp_config_flags frag,
196 bool forward_bpdu, bool has_in_band, bool has_netflow,
197 bool has_stp)
198{
199 struct xbridge *xbridge = xbridge_lookup(ofproto);
200
201 if (!xbridge) {
202 xbridge = xzalloc(sizeof *xbridge);
203 xbridge->ofproto = ofproto;
204
205 hmap_insert(&xbridges, &xbridge->hmap_node, hash_pointer(ofproto, 0));
206 hmap_init(&xbridge->xports);
207 list_init(&xbridge->xbundles);
208 }
209
210 if (xbridge->ml != ml) {
211 mac_learning_unref(xbridge->ml);
212 xbridge->ml = mac_learning_ref(ml);
213 }
214
215 if (xbridge->mbridge != mbridge) {
216 mbridge_unref(xbridge->mbridge);
217 xbridge->mbridge = mbridge_ref(mbridge);
218 }
219
220 if (xbridge->sflow != sflow) {
221 dpif_sflow_unref(xbridge->sflow);
222 xbridge->sflow = dpif_sflow_ref(sflow);
223 }
224
225 if (xbridge->ipfix != ipfix) {
226 dpif_ipfix_unref(xbridge->ipfix);
227 xbridge->ipfix = dpif_ipfix_ref(ipfix);
228 }
229
230 free(xbridge->name);
231 xbridge->name = xstrdup(name);
232
233 xbridge->forward_bpdu = forward_bpdu;
234 xbridge->has_in_band = has_in_band;
235 xbridge->has_netflow = has_netflow;
236 xbridge->has_stp = has_stp;
237 xbridge->frag = frag;
238}
239
240void
241xlate_remove_ofproto(struct ofproto_dpif *ofproto)
242{
243 struct xbridge *xbridge = xbridge_lookup(ofproto);
244 struct xbundle *xbundle, *next_xbundle;
245 struct xport *xport, *next_xport;
246
247 if (!xbridge) {
248 return;
249 }
250
251 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
252 xlate_ofport_remove(xport->ofport);
253 }
254
255 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
256 xlate_bundle_remove(xbundle->ofbundle);
257 }
258
259 hmap_remove(&xbridges, &xbridge->hmap_node);
260 free(xbridge->name);
261 free(xbridge);
262}
263
264void
265xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
266 const char *name, enum port_vlan_mode vlan_mode, int vlan,
267 unsigned long *trunks, bool use_priority_tags,
268 const struct bond *bond, const struct lacp *lacp,
269 bool floodable)
270{
271 struct xbundle *xbundle = xbundle_lookup(ofbundle);
272
273 if (!xbundle) {
274 xbundle = xzalloc(sizeof *xbundle);
275 xbundle->ofbundle = ofbundle;
276 xbundle->xbridge = xbridge_lookup(ofproto);
277
278 hmap_insert(&xbundles, &xbundle->hmap_node, hash_pointer(ofbundle, 0));
279 list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
280 list_init(&xbundle->xports);
281 }
282
283 ovs_assert(xbundle->xbridge);
284
285 free(xbundle->name);
286 xbundle->name = xstrdup(name);
287
288 xbundle->vlan_mode = vlan_mode;
289 xbundle->vlan = vlan;
290 xbundle->trunks = trunks;
291 xbundle->use_priority_tags = use_priority_tags;
292 xbundle->floodable = floodable;
293
294 if (xbundle->bond != bond) {
295 bond_unref(xbundle->bond);
296 xbundle->bond = bond_ref(bond);
297 }
298
299 if (xbundle->lacp != lacp) {
300 lacp_unref(xbundle->lacp);
301 xbundle->lacp = lacp_ref(lacp);
302 }
303}
304
305void
306xlate_bundle_remove(struct ofbundle *ofbundle)
307{
308 struct xbundle *xbundle = xbundle_lookup(ofbundle);
309 struct xport *xport, *next;
310
311 if (!xbundle) {
312 return;
313 }
314
315 LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
316 list_remove(&xport->bundle_node);
317 xport->xbundle = NULL;
318 }
319
320 hmap_remove(&xbundles, &xbundle->hmap_node);
321 list_remove(&xbundle->list_node);
322 bond_unref(xbundle->bond);
323 lacp_unref(xbundle->lacp);
324 free(xbundle->name);
325 free(xbundle);
326}
327
328void
329xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
330 struct ofport_dpif *ofport, ofp_port_t ofp_port,
331 odp_port_t odp_port, const struct netdev *netdev,
332 const struct cfm *cfm, const struct bfd *bfd,
333 struct ofport_dpif *peer, enum ofputil_port_config config,
334 enum stp_state stp_state, bool is_tunnel, bool may_enable)
335{
336 struct xport *xport = xport_lookup(ofport);
337
338 if (!xport) {
339 xport = xzalloc(sizeof *xport);
340 xport->ofport = ofport;
341 xport->xbridge = xbridge_lookup(ofproto);
342 xport->ofp_port = ofp_port;
343
344 hmap_insert(&xports, &xport->hmap_node, hash_pointer(ofport, 0));
345 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
346 hash_ofp_port(xport->ofp_port));
347 }
348
349 ovs_assert(xport->ofp_port == ofp_port);
350
351 xport->config = config;
352 xport->stp_state = stp_state;
353 xport->is_tunnel = is_tunnel;
354 xport->may_enable = may_enable;
355 xport->odp_port = odp_port;
356
357 if (xport->netdev != netdev) {
358 netdev_close(xport->netdev);
359 xport->netdev = netdev_ref(netdev);
360 }
361
362 if (xport->cfm != cfm) {
363 cfm_unref(xport->cfm);
364 xport->cfm = cfm_ref(cfm);
365 }
366
367 if (xport->bfd != bfd) {
368 bfd_unref(xport->bfd);
369 xport->bfd = bfd_ref(bfd);
370 }
371
372 if (xport->peer) {
373 xport->peer->peer = NULL;
374 }
375 xport->peer = peer ? xport_lookup(peer) : NULL;
376 if (xport->peer) {
377 xport->peer->peer = xport;
378 }
379
380 if (xport->xbundle) {
381 list_remove(&xport->bundle_node);
382 }
383 xport->xbundle = ofbundle ? xbundle_lookup(ofbundle) : NULL;
384 if (xport->xbundle) {
385 list_insert(&xport->xbundle->xports, &xport->bundle_node);
386 }
387}
388
389void
390xlate_ofport_remove(struct ofport_dpif *ofport)
391{
392 struct xport *xport = xport_lookup(ofport);
393
394 if (!xport) {
395 return;
396 }
397
398 if (xport->peer) {
399 xport->peer->peer = NULL;
400 xport->peer = NULL;
401 }
402
e621a12d
EJ
403 if (xport->xbundle) {
404 list_remove(&xport->bundle_node);
405 }
406
46c88433
EJ
407 hmap_remove(&xports, &xport->hmap_node);
408 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
409
410 netdev_close(xport->netdev);
411 cfm_unref(xport->cfm);
412 bfd_unref(xport->bfd);
413 free(xport);
414}
415
416static struct xbridge *
417xbridge_lookup(const struct ofproto_dpif *ofproto)
418{
419 struct xbridge *xbridge;
420
421 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
422 &xbridges) {
423 if (xbridge->ofproto == ofproto) {
424 return xbridge;
425 }
426 }
427 return NULL;
428}
429
430static struct xbundle *
431xbundle_lookup(const struct ofbundle *ofbundle)
432{
433 struct xbundle *xbundle;
434
435 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
436 &xbundles) {
437 if (xbundle->ofbundle == ofbundle) {
438 return xbundle;
439 }
440 }
441 return NULL;
442}
443
444static struct xport *
445xport_lookup(struct ofport_dpif *ofport)
446{
447 struct xport *xport;
448
449 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
450 &xports) {
451 if (xport->ofport == ofport) {
452 return xport;
453 }
454 }
455 return NULL;
456}
457
458static struct xport *
459get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
460{
461 struct xport *xport;
462
463 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
464 &xbridge->xports) {
465 if (xport->ofp_port == ofp_port) {
466 return xport;
467 }
468 }
469 return NULL;
470}
471
472static odp_port_t
473ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
474{
475 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
476 return xport ? xport->odp_port : ODPP_NONE;
477}
478
9583bc14 479static bool
46c88433 480xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
481{
482 return (bundle->vlan_mode != PORT_VLAN_ACCESS
483 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
484}
485
486static bool
46c88433
EJ
487xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
488{
489 return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
490}
491
492static mirror_mask_t
493xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
494{
495 return xbundle != &ofpp_none_bundle
496 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
497 : 0;
498}
499
500static mirror_mask_t
501xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 502{
46c88433
EJ
503 return xbundle != &ofpp_none_bundle
504 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
505 : 0;
9583bc14
EJ
506}
507
46c88433
EJ
508static mirror_mask_t
509xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 510{
46c88433
EJ
511 return xbundle != &ofpp_none_bundle
512 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
513 : 0;
514}
515
516static struct xbundle *
517lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
518 bool warn, struct xport **in_xportp)
519{
520 struct xport *xport;
9583bc14
EJ
521
522 /* Find the port and bundle for the received packet. */
46c88433
EJ
523 xport = get_ofp_port(xbridge, in_port);
524 if (in_xportp) {
525 *in_xportp = xport;
9583bc14 526 }
46c88433
EJ
527 if (xport && xport->xbundle) {
528 return xport->xbundle;
9583bc14
EJ
529 }
530
531 /* Special-case OFPP_NONE, which a controller may use as the ingress
532 * port for traffic that it is sourcing. */
533 if (in_port == OFPP_NONE) {
3815d6c2
LS
534 ofpp_none_bundle.name = "OFPP_NONE";
535 ofpp_none_bundle.vlan_mode = PORT_VLAN_TRUNK;
9583bc14
EJ
536 return &ofpp_none_bundle;
537 }
538
539 /* Odd. A few possible reasons here:
540 *
541 * - We deleted a port but there are still a few packets queued up
542 * from it.
543 *
544 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
545 * we don't know about.
546 *
547 * - The ofproto client didn't configure the port as part of a bundle.
548 * This is particularly likely to happen if a packet was received on the
549 * port after it was created, but before the client had a chance to
550 * configure its bundle.
551 */
552 if (warn) {
553 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
554
555 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
46c88433 556 "port %"PRIu16, xbridge->name, in_port);
9583bc14
EJ
557 }
558 return NULL;
559}
560
561static void
562add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
563{
46c88433 564 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 565 mirror_mask_t mirrors;
46c88433 566 struct xbundle *in_xbundle;
9583bc14
EJ
567 uint16_t vlan;
568 uint16_t vid;
cdf5d3a5
EJ
569
570 mirrors = ctx->xout->mirrors;
571 ctx->xout->mirrors = 0;
9583bc14 572
46c88433
EJ
573 in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
574 ctx->xin->packet != NULL, NULL);
575 if (!in_xbundle) {
9583bc14
EJ
576 return;
577 }
46c88433 578 mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
9583bc14
EJ
579
580 /* Drop frames on bundles reserved for mirroring. */
46c88433 581 if (xbundle_mirror_out(xbridge, in_xbundle)) {
9583bc14
EJ
582 if (ctx->xin->packet != NULL) {
583 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
584 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
585 "%s, which is reserved exclusively for mirroring",
46c88433 586 ctx->xbridge->name, in_xbundle->name);
9583bc14 587 }
aaa0fbae 588 ofpbuf_clear(&ctx->xout->odp_actions);
9583bc14
EJ
589 return;
590 }
591
592 /* Check VLAN. */
593 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
46c88433 594 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
9583bc14
EJ
595 return;
596 }
46c88433 597 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14 598
9583bc14
EJ
599 if (!mirrors) {
600 return;
601 }
602
603 /* Restore the original packet before adding the mirror actions. */
604 ctx->xin->flow = *orig_flow;
605
606 while (mirrors) {
ec7ceaed
EJ
607 mirror_mask_t dup_mirrors;
608 struct ofbundle *out;
609 unsigned long *vlans;
610 bool vlan_mirrored;
611 bool has_mirror;
612 int out_vlan;
613
46c88433 614 has_mirror = mirror_get(xbridge->mbridge, mirror_mask_ffs(mirrors) - 1,
ec7ceaed
EJ
615 &vlans, &dup_mirrors, &out, &out_vlan);
616 ovs_assert(has_mirror);
617
618 if (vlans) {
9583bc14
EJ
619 ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
620 }
ec7ceaed
EJ
621 vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
622 free(vlans);
9583bc14 623
ec7ceaed 624 if (!vlan_mirrored) {
9583bc14
EJ
625 mirrors = zero_rightmost_1bit(mirrors);
626 continue;
627 }
628
ec7ceaed
EJ
629 mirrors &= ~dup_mirrors;
630 ctx->xout->mirrors |= dup_mirrors;
631 if (out) {
46c88433
EJ
632 struct xbundle *out_xbundle = xbundle_lookup(out);
633 if (out_xbundle) {
634 output_normal(ctx, out_xbundle, vlan);
635 }
ec7ceaed 636 } else if (vlan != out_vlan
9583bc14 637 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
46c88433 638 struct xbundle *xbundle;
9583bc14 639
46c88433
EJ
640 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
641 if (xbundle_includes_vlan(xbundle, out_vlan)
642 && !xbundle_mirror_out(xbridge, xbundle)) {
643 output_normal(ctx, xbundle, out_vlan);
9583bc14
EJ
644 }
645 }
646 }
647 }
648}
649
650/* Given 'vid', the VID obtained from the 802.1Q header that was received as
46c88433 651 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
9583bc14
EJ
652 * the bundle on which the packet was received, returns the VLAN to which the
653 * packet belongs.
654 *
655 * Both 'vid' and the return value are in the range 0...4095. */
656static uint16_t
46c88433 657input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
9583bc14 658{
46c88433 659 switch (in_xbundle->vlan_mode) {
9583bc14 660 case PORT_VLAN_ACCESS:
46c88433 661 return in_xbundle->vlan;
9583bc14
EJ
662 break;
663
664 case PORT_VLAN_TRUNK:
665 return vid;
666
667 case PORT_VLAN_NATIVE_UNTAGGED:
668 case PORT_VLAN_NATIVE_TAGGED:
46c88433 669 return vid ? vid : in_xbundle->vlan;
9583bc14
EJ
670
671 default:
672 NOT_REACHED();
673 }
674}
675
46c88433 676/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
9583bc14
EJ
677 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
678 * a warning.
679 *
680 * 'vid' should be the VID obtained from the 802.1Q header that was received as
681 * part of a packet (specify 0 if there was no 802.1Q header), in the range
682 * 0...4095. */
683static bool
46c88433 684input_vid_is_valid(uint16_t vid, struct xbundle *in_xbundle, bool warn)
9583bc14
EJ
685{
686 /* Allow any VID on the OFPP_NONE port. */
46c88433 687 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
688 return true;
689 }
690
46c88433 691 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
692 case PORT_VLAN_ACCESS:
693 if (vid) {
694 if (warn) {
695 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
46c88433 696 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" tagged "
9583bc14 697 "packet received on port %s configured as VLAN "
46c88433
EJ
698 "%"PRIu16" access port", vid, in_xbundle->name,
699 in_xbundle->vlan);
9583bc14
EJ
700 }
701 return false;
702 }
703 return true;
704
705 case PORT_VLAN_NATIVE_UNTAGGED:
706 case PORT_VLAN_NATIVE_TAGGED:
707 if (!vid) {
708 /* Port must always carry its native VLAN. */
709 return true;
710 }
711 /* Fall through. */
712 case PORT_VLAN_TRUNK:
46c88433 713 if (!xbundle_includes_vlan(in_xbundle, vid)) {
9583bc14
EJ
714 if (warn) {
715 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
46c88433 716 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" packet "
9583bc14 717 "received on port %s not configured for trunking "
46c88433 718 "VLAN %"PRIu16, vid, in_xbundle->name, vid);
9583bc14
EJ
719 }
720 return false;
721 }
722 return true;
723
724 default:
725 NOT_REACHED();
726 }
727
728}
729
730/* Given 'vlan', the VLAN that a packet belongs to, and
46c88433 731 * 'out_xbundle', a bundle on which the packet is to be output, returns the VID
9583bc14
EJ
732 * that should be included in the 802.1Q header. (If the return value is 0,
733 * then the 802.1Q header should only be included in the packet if there is a
734 * nonzero PCP.)
735 *
736 * Both 'vlan' and the return value are in the range 0...4095. */
737static uint16_t
46c88433 738output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
9583bc14 739{
46c88433 740 switch (out_xbundle->vlan_mode) {
9583bc14
EJ
741 case PORT_VLAN_ACCESS:
742 return 0;
743
744 case PORT_VLAN_TRUNK:
745 case PORT_VLAN_NATIVE_TAGGED:
746 return vlan;
747
748 case PORT_VLAN_NATIVE_UNTAGGED:
46c88433 749 return vlan == out_xbundle->vlan ? 0 : vlan;
9583bc14
EJ
750
751 default:
752 NOT_REACHED();
753 }
754}
755
756static void
46c88433 757output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
9583bc14
EJ
758 uint16_t vlan)
759{
33bf9176 760 ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
9583bc14
EJ
761 uint16_t vid;
762 ovs_be16 tci, old_tci;
46c88433 763 struct xport *xport;
9583bc14 764
46c88433
EJ
765 vid = output_vlan_to_vid(out_xbundle, vlan);
766 if (list_is_empty(&out_xbundle->xports)) {
767 /* Partially configured bundle with no slaves. Drop the packet. */
768 return;
769 } else if (!out_xbundle->bond) {
770 xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
771 bundle_node);
9583bc14 772 } else {
46c88433
EJ
773 struct ofport_dpif *ofport;
774
775 ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
776 &ctx->xout->wc, vid,
777 &ctx->xout->tags);
778 xport = ofport ? xport_lookup(ofport) : NULL;
779
780 if (!xport) {
9583bc14
EJ
781 /* No slaves enabled, so drop packet. */
782 return;
783 }
784 }
785
33bf9176 786 old_tci = *flow_tci;
9583bc14 787 tci = htons(vid);
46c88433 788 if (tci || out_xbundle->use_priority_tags) {
33bf9176 789 tci |= *flow_tci & htons(VLAN_PCP_MASK);
9583bc14
EJ
790 if (tci) {
791 tci |= htons(VLAN_CFI);
792 }
793 }
33bf9176 794 *flow_tci = tci;
9583bc14 795
46c88433 796 compose_output_action(ctx, xport->ofp_port);
33bf9176 797 *flow_tci = old_tci;
9583bc14
EJ
798}
799
800/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
801 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
802 * indicate this; newer upstream kernels use gratuitous ARP requests. */
803static bool
804is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
805{
806 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
807 return false;
808 }
809
810 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
811 if (!eth_addr_is_broadcast(flow->dl_dst)) {
812 return false;
813 }
814
815 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
816 if (flow->nw_proto == ARP_OP_REPLY) {
817 return true;
818 } else if (flow->nw_proto == ARP_OP_REQUEST) {
819 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
820 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
821
822 return flow->nw_src == flow->nw_dst;
823 } else {
824 return false;
825 }
826}
827
828static void
46c88433 829update_learning_table(const struct xbridge *xbridge,
9583bc14 830 const struct flow *flow, struct flow_wildcards *wc,
46c88433 831 int vlan, struct xbundle *in_xbundle)
9583bc14
EJ
832{
833 struct mac_entry *mac;
834
835 /* Don't learn the OFPP_NONE port. */
46c88433 836 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
837 return;
838 }
839
509c0149 840 ovs_rwlock_wrlock(&xbridge->ml->rwlock);
46c88433 841 if (!mac_learning_may_learn(xbridge->ml, flow->dl_src, vlan)) {
509c0149 842 goto out;
9583bc14
EJ
843 }
844
46c88433 845 mac = mac_learning_insert(xbridge->ml, flow->dl_src, vlan);
9583bc14
EJ
846 if (is_gratuitous_arp(flow, wc)) {
847 /* We don't want to learn from gratuitous ARP packets that are
848 * reflected back over bond slaves so we lock the learning table. */
46c88433 849 if (!in_xbundle->bond) {
9583bc14
EJ
850 mac_entry_set_grat_arp_lock(mac);
851 } else if (mac_entry_is_grat_arp_locked(mac)) {
509c0149 852 goto out;
9583bc14
EJ
853 }
854 }
855
46c88433 856 if (mac_entry_is_new(mac) || mac->port.p != in_xbundle->ofbundle) {
9583bc14
EJ
857 /* The log messages here could actually be useful in debugging,
858 * so keep the rate limit relatively high. */
859 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
860 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
861 "on port %s in VLAN %d",
46c88433
EJ
862 xbridge->name, ETH_ADDR_ARGS(flow->dl_src),
863 in_xbundle->name, vlan);
9583bc14 864
46c88433
EJ
865 mac->port.p = in_xbundle->ofbundle;
866 mac_learning_changed(xbridge->ml, mac);
9583bc14 867 }
509c0149
EJ
868out:
869 ovs_rwlock_unlock(&xbridge->ml->rwlock);
9583bc14
EJ
870}
871
46c88433 872/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
9583bc14
EJ
873 * dropped. Returns true if they may be forwarded, false if they should be
874 * dropped.
875 *
46c88433 876 * 'in_port' must be the xport that corresponds to flow->in_port.
9583bc14
EJ
877 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
878 *
879 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
880 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
881 * checked by input_vid_is_valid().
882 *
883 * May also add tags to '*tags', although the current implementation only does
884 * so in one special case.
885 */
886static bool
46c88433 887is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
9583bc14
EJ
888 uint16_t vlan)
889{
46c88433
EJ
890 struct xbundle *in_xbundle = in_port->xbundle;
891 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 892 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
893
894 /* Drop frames for reserved multicast addresses
895 * only if forward_bpdu option is absent. */
46c88433 896 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
9583bc14
EJ
897 xlate_report(ctx, "packet has reserved destination MAC, dropping");
898 return false;
899 }
900
46c88433 901 if (in_xbundle->bond) {
9583bc14
EJ
902 struct mac_entry *mac;
903
46c88433 904 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
9583bc14
EJ
905 flow->dl_dst, &ctx->xout->tags)) {
906 case BV_ACCEPT:
907 break;
908
909 case BV_DROP:
910 xlate_report(ctx, "bonding refused admissibility, dropping");
911 return false;
912
913 case BV_DROP_IF_MOVED:
509c0149 914 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
46c88433
EJ
915 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan, NULL);
916 if (mac && mac->port.p != in_xbundle->ofbundle &&
9583bc14
EJ
917 (!is_gratuitous_arp(flow, &ctx->xout->wc)
918 || mac_entry_is_grat_arp_locked(mac))) {
509c0149 919 ovs_rwlock_unlock(&xbridge->ml->rwlock);
9583bc14
EJ
920 xlate_report(ctx, "SLB bond thinks this packet looped back, "
921 "dropping");
922 return false;
923 }
509c0149 924 ovs_rwlock_unlock(&xbridge->ml->rwlock);
9583bc14
EJ
925 break;
926 }
927 }
928
929 return true;
930}
931
932static void
933xlate_normal(struct xlate_ctx *ctx)
934{
33bf9176
BP
935 struct flow_wildcards *wc = &ctx->xout->wc;
936 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
937 struct xbundle *in_xbundle;
938 struct xport *in_port;
9583bc14
EJ
939 struct mac_entry *mac;
940 uint16_t vlan;
941 uint16_t vid;
942
943 ctx->xout->has_normal = true;
944
33bf9176
BP
945 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
946 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1dd35f8a 947 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 948
46c88433
EJ
949 in_xbundle = lookup_input_bundle(ctx->xbridge, flow->in_port.ofp_port,
950 ctx->xin->packet != NULL, &in_port);
951 if (!in_xbundle) {
9583bc14
EJ
952 xlate_report(ctx, "no input bundle, dropping");
953 return;
954 }
955
956 /* Drop malformed frames. */
33bf9176
BP
957 if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
958 !(flow->vlan_tci & htons(VLAN_CFI))) {
9583bc14
EJ
959 if (ctx->xin->packet != NULL) {
960 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
961 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
962 "VLAN tag received on port %s",
46c88433 963 ctx->xbridge->name, in_xbundle->name);
9583bc14
EJ
964 }
965 xlate_report(ctx, "partial VLAN tag, dropping");
966 return;
967 }
968
969 /* Drop frames on bundles reserved for mirroring. */
46c88433 970 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14
EJ
971 if (ctx->xin->packet != NULL) {
972 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
973 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
974 "%s, which is reserved exclusively for mirroring",
46c88433 975 ctx->xbridge->name, in_xbundle->name);
9583bc14
EJ
976 }
977 xlate_report(ctx, "input port is mirror output port, dropping");
978 return;
979 }
980
981 /* Check VLAN. */
33bf9176 982 vid = vlan_tci_to_vid(flow->vlan_tci);
46c88433 983 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
9583bc14
EJ
984 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
985 return;
986 }
46c88433 987 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14
EJ
988
989 /* Check other admissibility requirements. */
990 if (in_port && !is_admissible(ctx, in_port, vlan)) {
991 return;
992 }
993
994 /* Learn source MAC. */
995 if (ctx->xin->may_learn) {
46c88433 996 update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
9583bc14
EJ
997 }
998
999 /* Determine output bundle. */
509c0149 1000 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
46c88433 1001 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan,
9583bc14
EJ
1002 &ctx->xout->tags);
1003 if (mac) {
46c88433
EJ
1004 struct xbundle *mac_xbundle = xbundle_lookup(mac->port.p);
1005 if (mac_xbundle && mac_xbundle != in_xbundle) {
9583bc14 1006 xlate_report(ctx, "forwarding to learned port");
46c88433
EJ
1007 output_normal(ctx, mac_xbundle, vlan);
1008 } else if (!mac_xbundle) {
1009 xlate_report(ctx, "learned port is unknown, dropping");
9583bc14
EJ
1010 } else {
1011 xlate_report(ctx, "learned port is input port, dropping");
1012 }
1013 } else {
46c88433 1014 struct xbundle *xbundle;
9583bc14
EJ
1015
1016 xlate_report(ctx, "no learned MAC for destination, flooding");
46c88433
EJ
1017 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
1018 if (xbundle != in_xbundle
1019 && xbundle_includes_vlan(xbundle, vlan)
1020 && xbundle->floodable
1021 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
1022 output_normal(ctx, xbundle, vlan);
9583bc14
EJ
1023 }
1024 }
1025 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1026 }
509c0149 1027 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
9583bc14
EJ
1028}
1029
1030/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
1031 * the number of packets out of UINT32_MAX to sample. The given
1032 * cookie is passed back in the callback for each sampled packet.
1033 */
1034static size_t
46c88433 1035compose_sample_action(const struct xbridge *xbridge,
9583bc14
EJ
1036 struct ofpbuf *odp_actions,
1037 const struct flow *flow,
1038 const uint32_t probability,
1039 const union user_action_cookie *cookie,
1040 const size_t cookie_size)
1041{
1042 size_t sample_offset, actions_offset;
1043 int cookie_offset;
1044
1045 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
1046
1047 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
1048
1049 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
46c88433
EJ
1050 cookie_offset = put_userspace_action(xbridge->ofproto, odp_actions, flow,
1051 cookie, cookie_size);
9583bc14
EJ
1052
1053 nl_msg_end_nested(odp_actions, actions_offset);
1054 nl_msg_end_nested(odp_actions, sample_offset);
1055 return cookie_offset;
1056}
1057
1058static void
46c88433
EJ
1059compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
1060 odp_port_t odp_port, unsigned int n_outputs,
1061 union user_action_cookie *cookie)
9583bc14
EJ
1062{
1063 int ifindex;
1064
1065 cookie->type = USER_ACTION_COOKIE_SFLOW;
1066 cookie->sflow.vlan_tci = vlan_tci;
1067
1068 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
1069 * port information") for the interpretation of cookie->output. */
1070 switch (n_outputs) {
1071 case 0:
1072 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
1073 cookie->sflow.output = 0x40000000 | 256;
1074 break;
1075
1076 case 1:
46c88433 1077 ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
9583bc14
EJ
1078 if (ifindex) {
1079 cookie->sflow.output = ifindex;
1080 break;
1081 }
1082 /* Fall through. */
1083 default:
1084 /* 0x80000000 means "multiple output ports. */
1085 cookie->sflow.output = 0x80000000 | n_outputs;
1086 break;
1087 }
1088}
1089
1090/* Compose SAMPLE action for sFlow bridge sampling. */
1091static size_t
46c88433 1092compose_sflow_action(const struct xbridge *xbridge,
9583bc14
EJ
1093 struct ofpbuf *odp_actions,
1094 const struct flow *flow,
4e022ec0 1095 odp_port_t odp_port)
9583bc14
EJ
1096{
1097 uint32_t probability;
1098 union user_action_cookie cookie;
1099
46c88433 1100 if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
1101 return 0;
1102 }
1103
46c88433
EJ
1104 probability = dpif_sflow_get_probability(xbridge->sflow);
1105 compose_sflow_cookie(xbridge, htons(0), odp_port,
4e022ec0 1106 odp_port == ODPP_NONE ? 0 : 1, &cookie);
9583bc14 1107
46c88433 1108 return compose_sample_action(xbridge, odp_actions, flow, probability,
9583bc14
EJ
1109 &cookie, sizeof cookie.sflow);
1110}
1111
1112static void
1113compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
1114 uint32_t obs_domain_id, uint32_t obs_point_id,
1115 union user_action_cookie *cookie)
1116{
1117 cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
1118 cookie->flow_sample.probability = probability;
1119 cookie->flow_sample.collector_set_id = collector_set_id;
1120 cookie->flow_sample.obs_domain_id = obs_domain_id;
1121 cookie->flow_sample.obs_point_id = obs_point_id;
1122}
1123
1124static void
1125compose_ipfix_cookie(union user_action_cookie *cookie)
1126{
1127 cookie->type = USER_ACTION_COOKIE_IPFIX;
1128}
1129
1130/* Compose SAMPLE action for IPFIX bridge sampling. */
1131static void
46c88433 1132compose_ipfix_action(const struct xbridge *xbridge,
9583bc14
EJ
1133 struct ofpbuf *odp_actions,
1134 const struct flow *flow)
1135{
1136 uint32_t probability;
1137 union user_action_cookie cookie;
1138
46c88433 1139 if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
1140 return;
1141 }
1142
46c88433 1143 probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
9583bc14
EJ
1144 compose_ipfix_cookie(&cookie);
1145
46c88433 1146 compose_sample_action(xbridge, odp_actions, flow, probability,
9583bc14
EJ
1147 &cookie, sizeof cookie.ipfix);
1148}
1149
1150/* SAMPLE action for sFlow must be first action in any given list of
1151 * actions. At this point we do not have all information required to
1152 * build it. So try to build sample action as complete as possible. */
1153static void
1154add_sflow_action(struct xlate_ctx *ctx)
1155{
46c88433 1156 ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
9583bc14 1157 &ctx->xout->odp_actions,
4e022ec0 1158 &ctx->xin->flow, ODPP_NONE);
9583bc14
EJ
1159 ctx->sflow_odp_port = 0;
1160 ctx->sflow_n_outputs = 0;
1161}
1162
1163/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
1164 * of actions, eventually after the SAMPLE action for sFlow. */
1165static void
1166add_ipfix_action(struct xlate_ctx *ctx)
1167{
46c88433 1168 compose_ipfix_action(ctx->xbridge, &ctx->xout->odp_actions,
9583bc14
EJ
1169 &ctx->xin->flow);
1170}
1171
1172/* Fix SAMPLE action according to data collected while composing ODP actions.
1173 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
1174 * USERSPACE action's user-cookie which is required for sflow. */
1175static void
1176fix_sflow_action(struct xlate_ctx *ctx)
1177{
1178 const struct flow *base = &ctx->base_flow;
1179 union user_action_cookie *cookie;
1180
1181 if (!ctx->user_cookie_offset) {
1182 return;
1183 }
1184
1185 cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
1186 sizeof cookie->sflow);
1187 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
1188
46c88433 1189 compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
9583bc14
EJ
1190 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
1191}
1192
db7d4e46 1193static enum slow_path_reason
642dc74d 1194process_special(struct xlate_ctx *ctx, const struct flow *flow,
46c88433 1195 const struct xport *xport, const struct ofpbuf *packet)
db7d4e46 1196{
642dc74d 1197 struct flow_wildcards *wc = &ctx->xout->wc;
46c88433 1198 const struct xbridge *xbridge = ctx->xbridge;
642dc74d 1199
46c88433 1200 if (!xport) {
db7d4e46 1201 return 0;
46c88433 1202 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 1203 if (packet) {
46c88433 1204 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46
JP
1205 }
1206 return SLOW_CFM;
fab52e16 1207 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 1208 if (packet) {
46c88433 1209 bfd_process_packet(xport->bfd, flow, packet);
db7d4e46
JP
1210 }
1211 return SLOW_BFD;
46c88433 1212 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
1213 && flow->dl_type == htons(ETH_TYPE_LACP)) {
1214 if (packet) {
46c88433 1215 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
db7d4e46
JP
1216 }
1217 return SLOW_LACP;
46c88433 1218 } else if (xbridge->has_stp && stp_should_process_flow(flow, wc)) {
db7d4e46 1219 if (packet) {
46c88433 1220 stp_process_packet(xport->ofport, packet);
db7d4e46
JP
1221 }
1222 return SLOW_STP;
1223 } else {
1224 return 0;
1225 }
1226}
1227
9583bc14 1228static void
4e022ec0 1229compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
9583bc14
EJ
1230 bool check_stp)
1231{
46c88433 1232 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1dd35f8a 1233 struct flow_wildcards *wc = &ctx->xout->wc;
33bf9176 1234 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
1235 ovs_be16 flow_vlan_tci;
1236 uint32_t flow_skb_mark;
1237 uint8_t flow_nw_tos;
4e022ec0 1238 odp_port_t out_port, odp_port;
ca077186 1239 uint8_t dscp;
9583bc14
EJ
1240
1241 /* If 'struct flow' gets additional metadata, we'll need to zero it out
1242 * before traversing a patch port. */
1243 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
1244
46c88433 1245 if (!xport) {
9583bc14
EJ
1246 xlate_report(ctx, "Nonexistent output port");
1247 return;
46c88433 1248 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
9583bc14
EJ
1249 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
1250 return;
46c88433 1251 } else if (check_stp && !stp_forward_in_state(xport->stp_state)) {
9583bc14
EJ
1252 xlate_report(ctx, "STP not in forwarding state, skipping output");
1253 return;
1254 }
1255
46c88433
EJ
1256 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
1257 ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
1258 xport->xbundle);
cdf5d3a5
EJ
1259 }
1260
46c88433
EJ
1261 if (xport->peer) {
1262 const struct xport *peer = xport->peer;
9583bc14 1263 struct flow old_flow = ctx->xin->flow;
9583bc14 1264 enum slow_path_reason special;
9583bc14 1265
46c88433
EJ
1266 ctx->xbridge = peer->xbridge;
1267 flow->in_port.ofp_port = peer->ofp_port;
33bf9176
BP
1268 flow->metadata = htonll(0);
1269 memset(&flow->tunnel, 0, sizeof flow->tunnel);
1270 memset(flow->regs, 0, sizeof flow->regs);
9583bc14 1271
642dc74d 1272 special = process_special(ctx, &ctx->xin->flow, peer,
9583bc14
EJ
1273 ctx->xin->packet);
1274 if (special) {
1275 ctx->xout->slow = special;
ddd3c975
EJ
1276 } else if (may_receive(peer, ctx)) {
1277 if (stp_forward_in_state(peer->stp_state)) {
4e022ec0 1278 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
9583bc14
EJ
1279 } else {
1280 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
1281 * learning action look at the packet, then drop it. */
1282 struct flow old_base_flow = ctx->base_flow;
1283 size_t old_size = ctx->xout->odp_actions.size;
cdf5d3a5 1284 mirror_mask_t old_mirrors = ctx->xout->mirrors;
4e022ec0 1285 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
cdf5d3a5 1286 ctx->xout->mirrors = old_mirrors;
9583bc14
EJ
1287 ctx->base_flow = old_base_flow;
1288 ctx->xout->odp_actions.size = old_size;
1289 }
1290 }
1291
1292 ctx->xin->flow = old_flow;
46c88433 1293 ctx->xbridge = xport->xbundle->xbridge;
9583bc14
EJ
1294
1295 if (ctx->xin->resubmit_stats) {
46c88433
EJ
1296 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
1297 netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
9583bc14
EJ
1298 }
1299
1300 return;
1301 }
1302
33bf9176
BP
1303 flow_vlan_tci = flow->vlan_tci;
1304 flow_skb_mark = flow->skb_mark;
1305 flow_nw_tos = flow->nw_tos;
9583bc14 1306
46c88433
EJ
1307 if (ofproto_dpif_dscp_from_priority(xport->ofport, flow->skb_priority,
1308 &dscp)) {
1dd35f8a 1309 wc->masks.nw_tos |= IP_ECN_MASK;
33bf9176 1310 flow->nw_tos &= ~IP_DSCP_MASK;
ca077186 1311 flow->nw_tos |= dscp;
9583bc14
EJ
1312 }
1313
46c88433 1314 if (xport->is_tunnel) {
9583bc14
EJ
1315 /* Save tunnel metadata so that changes made due to
1316 * the Logical (tunnel) Port are not visible for any further
1317 * matches, while explicit set actions on tunnel metadata are.
1318 */
33bf9176 1319 struct flow_tnl flow_tnl = flow->tunnel;
46c88433 1320 odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
4e022ec0 1321 if (odp_port == ODPP_NONE) {
9583bc14
EJ
1322 xlate_report(ctx, "Tunneling decided against output");
1323 goto out; /* restore flow_nw_tos */
1324 }
33bf9176 1325 if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
9583bc14
EJ
1326 xlate_report(ctx, "Not tunneling to our own address");
1327 goto out; /* restore flow_nw_tos */
1328 }
1329 if (ctx->xin->resubmit_stats) {
46c88433 1330 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14
EJ
1331 }
1332 out_port = odp_port;
33bf9176 1333 commit_odp_tunnel_action(flow, &ctx->base_flow,
9583bc14 1334 &ctx->xout->odp_actions);
33bf9176 1335 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
9583bc14 1336 } else {
4e022ec0 1337 ofp_port_t vlandev_port;
1dd35f8a 1338
46c88433
EJ
1339 odp_port = xport->odp_port;
1340 if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
1dd35f8a
JP
1341 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
1342 }
46c88433 1343 vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, ofp_port,
33bf9176 1344 flow->vlan_tci);
9583bc14
EJ
1345 if (vlandev_port == ofp_port) {
1346 out_port = odp_port;
1347 } else {
46c88433 1348 out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
33bf9176 1349 flow->vlan_tci = htons(0);
9583bc14 1350 }
33bf9176 1351 flow->skb_mark &= ~IPSEC_MARK;
9583bc14 1352 }
9583bc14 1353
4e022ec0 1354 if (out_port != ODPP_NONE) {
1dd35f8a
JP
1355 commit_odp_actions(flow, &ctx->base_flow,
1356 &ctx->xout->odp_actions, &ctx->xout->wc);
4e022ec0
AW
1357 nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
1358 out_port);
9583bc14 1359
6cbbf4fa
EJ
1360 ctx->sflow_odp_port = odp_port;
1361 ctx->sflow_n_outputs++;
1362 ctx->xout->nf_output_iface = ofp_port;
1363 }
1364
1365 out:
9583bc14 1366 /* Restore flow */
33bf9176
BP
1367 flow->vlan_tci = flow_vlan_tci;
1368 flow->skb_mark = flow_skb_mark;
33bf9176 1369 flow->nw_tos = flow_nw_tos;
9583bc14
EJ
1370}
1371
1372static void
4e022ec0 1373compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port)
9583bc14
EJ
1374{
1375 compose_output_action__(ctx, ofp_port, true);
1376}
1377
9583bc14
EJ
1378/* Common rule processing in one place to avoid duplicating code. */
1379static struct rule_dpif *
1380ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
1381 bool may_packet_in)
1382{
1383 if (ctx->xin->resubmit_hook) {
4d0acc70 1384 ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
9583bc14
EJ
1385 }
1386 if (rule == NULL && may_packet_in) {
1387 /* XXX
1388 * check if table configuration flags
1389 * OFPTC_TABLE_MISS_CONTROLLER, default.
1390 * OFPTC_TABLE_MISS_CONTINUE,
1391 * OFPTC_TABLE_MISS_DROP
1392 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
1393 */
46c88433 1394 rule = rule_dpif_miss_rule(ctx->xbridge->ofproto, &ctx->xin->flow);
9583bc14
EJ
1395 }
1396 if (rule && ctx->xin->resubmit_stats) {
1397 rule_credit_stats(rule, ctx->xin->resubmit_stats);
1398 }
1399 return rule;
1400}
1401
1402static void
1403xlate_table_action(struct xlate_ctx *ctx,
4e022ec0 1404 ofp_port_t in_port, uint8_t table_id, bool may_packet_in)
9583bc14
EJ
1405{
1406 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
1407 struct rule_dpif *rule;
4e022ec0 1408 ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
1409 uint8_t old_table_id = ctx->table_id;
1410
1411 ctx->table_id = table_id;
1412
1413 /* Look up a flow with 'in_port' as the input port. */
4e022ec0 1414 ctx->xin->flow.in_port.ofp_port = in_port;
46c88433
EJ
1415 rule = rule_dpif_lookup_in_table(ctx->xbridge->ofproto,
1416 &ctx->xin->flow, &ctx->xout->wc,
1417 table_id);
9583bc14 1418
46c88433
EJ
1419 ctx->xout->tags |= calculate_flow_tag(ctx->xbridge->ofproto,
1420 &ctx->xin->flow, ctx->table_id,
1421 rule);
9583bc14
EJ
1422
1423 /* Restore the original input port. Otherwise OFPP_NORMAL and
1424 * OFPP_IN_PORT will have surprising behavior. */
4e022ec0 1425 ctx->xin->flow.in_port.ofp_port = old_in_port;
9583bc14
EJ
1426
1427 rule = ctx_rule_hooks(ctx, rule, may_packet_in);
1428
1429 if (rule) {
1430 struct rule_dpif *old_rule = ctx->rule;
1431
1432 ctx->recurse++;
1433 ctx->rule = rule;
1434 do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
1435 ctx->rule = old_rule;
1436 ctx->recurse--;
1437 }
1438
1439 ctx->table_id = old_table_id;
1440 } else {
1441 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
1442
1443 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
1444 MAX_RESUBMIT_RECURSION);
1445 ctx->max_resubmit_trigger = true;
1446 }
1447}
1448
1449static void
1450xlate_ofpact_resubmit(struct xlate_ctx *ctx,
1451 const struct ofpact_resubmit *resubmit)
1452{
4e022ec0 1453 ofp_port_t in_port;
9583bc14
EJ
1454 uint8_t table_id;
1455
1456 in_port = resubmit->in_port;
1457 if (in_port == OFPP_IN_PORT) {
4e022ec0 1458 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
1459 }
1460
1461 table_id = resubmit->table_id;
1462 if (table_id == 255) {
1463 table_id = ctx->table_id;
1464 }
1465
1466 xlate_table_action(ctx, in_port, table_id, false);
1467}
1468
1469static void
1470flood_packets(struct xlate_ctx *ctx, bool all)
1471{
46c88433 1472 const struct xport *xport;
9583bc14 1473
46c88433
EJ
1474 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
1475 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
1476 continue;
1477 }
1478
1479 if (all) {
46c88433
EJ
1480 compose_output_action__(ctx, xport->ofp_port, false);
1481 } else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
1482 compose_output_action(ctx, xport->ofp_port);
9583bc14
EJ
1483 }
1484 }
1485
1486 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1487}
1488
1489static void
1490execute_controller_action(struct xlate_ctx *ctx, int len,
1491 enum ofp_packet_in_reason reason,
1492 uint16_t controller_id)
1493{
1494 struct ofputil_packet_in pin;
1495 struct ofpbuf *packet;
1496 struct flow key;
1497
1498 ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
1499 ctx->xout->slow = SLOW_CONTROLLER;
1500 if (!ctx->xin->packet) {
1501 return;
1502 }
1503
1504 packet = ofpbuf_clone(ctx->xin->packet);
1505
1506 key.skb_priority = 0;
1507 key.skb_mark = 0;
1508 memset(&key.tunnel, 0, sizeof key.tunnel);
1509
1510 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1dd35f8a 1511 &ctx->xout->odp_actions, &ctx->xout->wc);
9583bc14
EJ
1512
1513 odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
1514 ctx->xout->odp_actions.size, NULL, NULL);
1515
1516 pin.packet = packet->data;
1517 pin.packet_len = packet->size;
1518 pin.reason = reason;
1519 pin.controller_id = controller_id;
1520 pin.table_id = ctx->table_id;
1521 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
1522
1523 pin.send_len = len;
1524 flow_get_metadata(&ctx->xin->flow, &pin.fmd);
1525
46c88433 1526 ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, &pin);
9583bc14
EJ
1527 ofpbuf_delete(packet);
1528}
1529
1530static void
9cfef3d0 1531compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 1532{
33bf9176
BP
1533 struct flow_wildcards *wc = &ctx->xout->wc;
1534 struct flow *flow = &ctx->xin->flow;
1535
9583bc14
EJ
1536 ovs_assert(eth_type_mpls(eth_type));
1537
33bf9176
BP
1538 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1539 memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
9583bc14 1540
33bf9176
BP
1541 if (flow->mpls_depth) {
1542 flow->mpls_lse &= ~htonl(MPLS_BOS_MASK);
1543 flow->mpls_depth++;
9583bc14
EJ
1544 } else {
1545 ovs_be32 label;
1546 uint8_t tc, ttl;
1547
33bf9176 1548 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
9583bc14
EJ
1549 label = htonl(0x2); /* IPV6 Explicit Null. */
1550 } else {
1551 label = htonl(0x0); /* IPV4 Explicit Null. */
1552 }
1dd35f8a
JP
1553 wc->masks.nw_tos |= IP_DSCP_MASK;
1554 wc->masks.nw_ttl = 0xff;
33bf9176
BP
1555 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1556 ttl = flow->nw_ttl ? flow->nw_ttl : 0x40;
1557 flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
1558 flow->mpls_depth = 1;
9583bc14 1559 }
33bf9176 1560 flow->dl_type = eth_type;
9583bc14
EJ
1561}
1562
1563static void
9cfef3d0 1564compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 1565{
33bf9176
BP
1566 struct flow_wildcards *wc = &ctx->xout->wc;
1567 struct flow *flow = &ctx->xin->flow;
1568
9583bc14
EJ
1569 ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
1570 ovs_assert(!eth_type_mpls(eth_type));
1571
33bf9176
BP
1572 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1573 memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
1574
1575 if (flow->mpls_depth) {
1576 flow->mpls_depth--;
1577 flow->mpls_lse = htonl(0);
1578 if (!flow->mpls_depth) {
1579 flow->dl_type = eth_type;
9583bc14
EJ
1580 }
1581 }
1582}
1583
1584static bool
1585compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
1586{
33bf9176
BP
1587 struct flow *flow = &ctx->xin->flow;
1588
1589 if (!is_ip_any(flow)) {
9583bc14
EJ
1590 return false;
1591 }
1592
1dd35f8a 1593 ctx->xout->wc.masks.nw_ttl = 0xff;
33bf9176
BP
1594 if (flow->nw_ttl > 1) {
1595 flow->nw_ttl--;
9583bc14
EJ
1596 return false;
1597 } else {
1598 size_t i;
1599
1600 for (i = 0; i < ids->n_controllers; i++) {
1601 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
1602 ids->cnt_ids[i]);
1603 }
1604
1605 /* Stop processing for current table. */
1606 return true;
1607 }
1608}
1609
1610static bool
9cfef3d0 1611compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14
EJ
1612{
1613 if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
1614 return true;
1615 }
1616
1617 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
1618 return false;
1619}
1620
1621static bool
9cfef3d0 1622compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 1623{
33bf9176
BP
1624 struct flow *flow = &ctx->xin->flow;
1625 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse);
1dd35f8a
JP
1626 struct flow_wildcards *wc = &ctx->xout->wc;
1627
1dd35f8a 1628 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
9583bc14 1629
33bf9176 1630 if (!eth_type_mpls(flow->dl_type)) {
9583bc14
EJ
1631 return false;
1632 }
1633
1634 if (ttl > 1) {
1635 ttl--;
33bf9176 1636 set_mpls_lse_ttl(&flow->mpls_lse, ttl);
9583bc14
EJ
1637 return false;
1638 } else {
1639 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
1640
1641 /* Stop processing for current table. */
1642 return true;
1643 }
1644}
1645
1646static void
1647xlate_output_action(struct xlate_ctx *ctx,
4e022ec0 1648 ofp_port_t port, uint16_t max_len, bool may_packet_in)
9583bc14 1649{
4e022ec0 1650 ofp_port_t prev_nf_output_iface = ctx->xout->nf_output_iface;
9583bc14
EJ
1651
1652 ctx->xout->nf_output_iface = NF_OUT_DROP;
1653
1654 switch (port) {
1655 case OFPP_IN_PORT:
4e022ec0 1656 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port);
9583bc14
EJ
1657 break;
1658 case OFPP_TABLE:
4e022ec0
AW
1659 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
1660 0, may_packet_in);
9583bc14
EJ
1661 break;
1662 case OFPP_NORMAL:
1663 xlate_normal(ctx);
1664 break;
1665 case OFPP_FLOOD:
1666 flood_packets(ctx, false);
1667 break;
1668 case OFPP_ALL:
1669 flood_packets(ctx, true);
1670 break;
1671 case OFPP_CONTROLLER:
1672 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
1673 break;
1674 case OFPP_NONE:
1675 break;
1676 case OFPP_LOCAL:
1677 default:
4e022ec0 1678 if (port != ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
1679 compose_output_action(ctx, port);
1680 } else {
1681 xlate_report(ctx, "skipping output to input port");
1682 }
1683 break;
1684 }
1685
1686 if (prev_nf_output_iface == NF_OUT_FLOOD) {
1687 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1688 } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1689 ctx->xout->nf_output_iface = prev_nf_output_iface;
1690 } else if (prev_nf_output_iface != NF_OUT_DROP &&
1691 ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1692 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1693 }
1694}
1695
1696static void
1697xlate_output_reg_action(struct xlate_ctx *ctx,
1698 const struct ofpact_output_reg *or)
1699{
1700 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
1701 if (port <= UINT16_MAX) {
1702 union mf_subvalue value;
1703
1704 memset(&value, 0xff, sizeof value);
1705 mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
4e022ec0
AW
1706 xlate_output_action(ctx, u16_to_ofp(port),
1707 or->max_len, false);
9583bc14
EJ
1708 }
1709}
1710
1711static void
1712xlate_enqueue_action(struct xlate_ctx *ctx,
1713 const struct ofpact_enqueue *enqueue)
1714{
4e022ec0 1715 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
1716 uint32_t queue_id = enqueue->queue;
1717 uint32_t flow_priority, priority;
1718 int error;
1719
1720 /* Translate queue to priority. */
46c88433
EJ
1721 error = ofproto_dpif_queue_to_priority(ctx->xbridge->ofproto, queue_id,
1722 &priority);
9583bc14
EJ
1723 if (error) {
1724 /* Fall back to ordinary output action. */
1725 xlate_output_action(ctx, enqueue->port, 0, false);
1726 return;
1727 }
1728
1729 /* Check output port. */
1730 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
1731 ofp_port = ctx->xin->flow.in_port.ofp_port;
1732 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
1733 return;
1734 }
1735
1736 /* Add datapath actions. */
1737 flow_priority = ctx->xin->flow.skb_priority;
1738 ctx->xin->flow.skb_priority = priority;
1739 compose_output_action(ctx, ofp_port);
1740 ctx->xin->flow.skb_priority = flow_priority;
1741
1742 /* Update NetFlow output port. */
1743 if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1744 ctx->xout->nf_output_iface = ofp_port;
1745 } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1746 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1747 }
1748}
1749
1750static void
1751xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
1752{
1753 uint32_t skb_priority;
1754
46c88433 1755 if (!ofproto_dpif_queue_to_priority(ctx->xbridge->ofproto, queue_id,
cdc3ab65 1756 &skb_priority)) {
9583bc14
EJ
1757 ctx->xin->flow.skb_priority = skb_priority;
1758 } else {
1759 /* Couldn't translate queue to a priority. Nothing to do. A warning
1760 * has already been logged. */
1761 }
1762}
1763
1764static bool
46c88433 1765slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 1766{
46c88433
EJ
1767 const struct xbridge *xbridge = xbridge_;
1768 struct xport *port;
9583bc14
EJ
1769
1770 switch (ofp_port) {
1771 case OFPP_IN_PORT:
1772 case OFPP_TABLE:
1773 case OFPP_NORMAL:
1774 case OFPP_FLOOD:
1775 case OFPP_ALL:
1776 case OFPP_NONE:
1777 return true;
1778 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
1779 return false;
1780 default:
46c88433 1781 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
1782 return port ? port->may_enable : false;
1783 }
1784}
1785
1786static void
1787xlate_bundle_action(struct xlate_ctx *ctx,
1788 const struct ofpact_bundle *bundle)
1789{
4e022ec0 1790 ofp_port_t port;
9583bc14
EJ
1791
1792 port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
46c88433
EJ
1793 slave_enabled_cb,
1794 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 1795 if (bundle->dst.field) {
4e022ec0 1796 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow);
9583bc14
EJ
1797 } else {
1798 xlate_output_action(ctx, port, 0, false);
1799 }
1800}
1801
1802static void
1803xlate_learn_action(struct xlate_ctx *ctx,
1804 const struct ofpact_learn *learn)
1805{
1806 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
1807 struct ofputil_flow_mod fm;
1808 uint64_t ofpacts_stub[1024 / 8];
1809 struct ofpbuf ofpacts;
1810 int error;
1811
1812 ctx->xout->has_learn = true;
1813
1814 learn_mask(learn, &ctx->xout->wc);
1815
1816 if (!ctx->xin->may_learn) {
1817 return;
1818 }
1819
1820 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
1821 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
1822
46c88433 1823 error = ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm);
9583bc14
EJ
1824 if (error && !VLOG_DROP_WARN(&rl)) {
1825 VLOG_WARN("learning action failed to modify flow table (%s)",
1826 ofperr_get_name(error));
1827 }
1828
1829 ofpbuf_uninit(&ofpacts);
1830}
1831
1832/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
1833 * means "infinite". */
1834static void
1835reduce_timeout(uint16_t max, uint16_t *timeout)
1836{
1837 if (max && (!*timeout || *timeout > max)) {
1838 *timeout = max;
1839 }
1840}
1841
1842static void
1843xlate_fin_timeout(struct xlate_ctx *ctx,
1844 const struct ofpact_fin_timeout *oft)
1845{
1846 if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
1847 struct rule_dpif *rule = ctx->rule;
1848
2a5183ac
JP
1849 if (list_is_empty(&rule->up.expirable)) {
1850 list_insert(&rule->up.ofproto->expirable, &rule->up.expirable);
1851 }
1852
9583bc14
EJ
1853 reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
1854 reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
1855 }
1856}
1857
1858static void
1859xlate_sample_action(struct xlate_ctx *ctx,
1860 const struct ofpact_sample *os)
1861{
1862 union user_action_cookie cookie;
1863 /* Scale the probability from 16-bit to 32-bit while representing
1864 * the same percentage. */
1865 uint32_t probability = (os->probability << 16) | os->probability;
1866
1867 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1dd35f8a 1868 &ctx->xout->odp_actions, &ctx->xout->wc);
9583bc14
EJ
1869
1870 compose_flow_sample_cookie(os->probability, os->collector_set_id,
1871 os->obs_domain_id, os->obs_point_id, &cookie);
46c88433 1872 compose_sample_action(ctx->xbridge, &ctx->xout->odp_actions, &ctx->xin->flow,
9583bc14
EJ
1873 probability, &cookie, sizeof cookie.flow_sample);
1874}
1875
1876static bool
46c88433 1877may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 1878{
46c88433
EJ
1879 if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
1880 ? OFPUTIL_PC_NO_RECV_STP
1881 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
1882 return false;
1883 }
1884
1885 /* Only drop packets here if both forwarding and learning are
1886 * disabled. If just learning is enabled, we need to have
1887 * OFPP_NORMAL and the learning action have a look at the packet
1888 * before we can drop it. */
46c88433
EJ
1889 if (!stp_forward_in_state(xport->stp_state)
1890 && !stp_learn_in_state(xport->stp_state)) {
9583bc14
EJ
1891 return false;
1892 }
1893
1894 return true;
1895}
1896
1897static bool
1898tunnel_ecn_ok(struct xlate_ctx *ctx)
1899{
1900 if (is_ip_any(&ctx->base_flow)
1901 && (ctx->xin->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
1902 if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
1903 VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
1904 " but is not ECN capable");
1905 return false;
1906 } else {
1907 /* Set the ECN CE value in the tunneled packet. */
1908 ctx->xin->flow.nw_tos |= IP_ECN_CE;
1909 }
1910 }
1911
1912 return true;
1913}
1914
1915static void
1916do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
1917 struct xlate_ctx *ctx)
1918{
33bf9176
BP
1919 struct flow_wildcards *wc = &ctx->xout->wc;
1920 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
1921 bool was_evictable = true;
1922 const struct ofpact *a;
1923
1924 if (ctx->rule) {
1925 /* Don't let the rule we're working on get evicted underneath us. */
1926 was_evictable = ctx->rule->up.evictable;
1927 ctx->rule->up.evictable = false;
1928 }
1929
9583bc14
EJ
1930 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
1931 struct ofpact_controller *controller;
1932 const struct ofpact_metadata *metadata;
1933
1934 if (ctx->exit) {
1935 break;
1936 }
1937
1938 switch (a->type) {
1939 case OFPACT_OUTPUT:
1940 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
1941 ofpact_get_OUTPUT(a)->max_len, true);
1942 break;
1943
1944 case OFPACT_CONTROLLER:
1945 controller = ofpact_get_CONTROLLER(a);
1946 execute_controller_action(ctx, controller->max_len,
1947 controller->reason,
1948 controller->controller_id);
1949 break;
1950
1951 case OFPACT_ENQUEUE:
1952 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
1953 break;
1954
1955 case OFPACT_SET_VLAN_VID:
33bf9176
BP
1956 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1957 flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
1958 | htons(VLAN_CFI));
9583bc14
EJ
1959 break;
1960
1961 case OFPACT_SET_VLAN_PCP:
33bf9176
BP
1962 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1963 flow->vlan_tci |=
9583bc14
EJ
1964 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
1965 | VLAN_CFI);
1966 break;
1967
1968 case OFPACT_STRIP_VLAN:
33bf9176 1969 flow->vlan_tci = htons(0);
9583bc14
EJ
1970 break;
1971
1972 case OFPACT_PUSH_VLAN:
1973 /* XXX 802.1AD(QinQ) */
33bf9176 1974 flow->vlan_tci = htons(VLAN_CFI);
9583bc14
EJ
1975 break;
1976
1977 case OFPACT_SET_ETH_SRC:
33bf9176 1978 memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
1979 break;
1980
1981 case OFPACT_SET_ETH_DST:
33bf9176 1982 memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
1983 break;
1984
1985 case OFPACT_SET_IPV4_SRC:
33bf9176
BP
1986 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1987 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
1988 }
1989 break;
1990
1991 case OFPACT_SET_IPV4_DST:
33bf9176
BP
1992 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1993 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
1994 }
1995 break;
1996
1997 case OFPACT_SET_IPV4_DSCP:
1998 /* OpenFlow 1.0 only supports IPv4. */
33bf9176
BP
1999 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2000 flow->nw_tos &= ~IP_DSCP_MASK;
2001 flow->nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
9583bc14
EJ
2002 }
2003 break;
2004
2005 case OFPACT_SET_L4_SRC_PORT:
33bf9176
BP
2006 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2007 if (is_ip_any(flow)) {
2008 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
2009 }
2010 break;
2011
2012 case OFPACT_SET_L4_DST_PORT:
33bf9176
BP
2013 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
2014 if (is_ip_any(flow)) {
2015 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
2016 }
2017 break;
2018
2019 case OFPACT_RESUBMIT:
2020 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
2021 break;
2022
2023 case OFPACT_SET_TUNNEL:
33bf9176 2024 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
2025 break;
2026
2027 case OFPACT_SET_QUEUE:
2028 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
2029 break;
2030
2031 case OFPACT_POP_QUEUE:
33bf9176 2032 flow->skb_priority = ctx->orig_skb_priority;
9583bc14
EJ
2033 break;
2034
2035 case OFPACT_REG_MOVE:
33bf9176 2036 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
9583bc14
EJ
2037 break;
2038
2039 case OFPACT_REG_LOAD:
33bf9176 2040 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow);
9583bc14
EJ
2041 break;
2042
2043 case OFPACT_STACK_PUSH:
33bf9176
BP
2044 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
2045 &ctx->stack);
9583bc14
EJ
2046 break;
2047
2048 case OFPACT_STACK_POP:
33bf9176 2049 nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, &ctx->stack);
9583bc14
EJ
2050 break;
2051
2052 case OFPACT_PUSH_MPLS:
9cfef3d0 2053 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
9583bc14
EJ
2054 break;
2055
2056 case OFPACT_POP_MPLS:
9cfef3d0 2057 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
2058 break;
2059
2060 case OFPACT_SET_MPLS_TTL:
9cfef3d0 2061 if (compose_set_mpls_ttl_action(ctx,
9583bc14
EJ
2062 ofpact_get_SET_MPLS_TTL(a)->ttl)) {
2063 goto out;
2064 }
2065 break;
2066
2067 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 2068 if (compose_dec_mpls_ttl_action(ctx)) {
9583bc14
EJ
2069 goto out;
2070 }
2071 break;
2072
2073 case OFPACT_DEC_TTL:
9583bc14
EJ
2074 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
2075 goto out;
2076 }
2077 break;
2078
2079 case OFPACT_NOTE:
2080 /* Nothing to do. */
2081 break;
2082
2083 case OFPACT_MULTIPATH:
33bf9176 2084 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
9583bc14
EJ
2085 break;
2086
2087 case OFPACT_BUNDLE:
9583bc14
EJ
2088 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
2089 break;
2090
2091 case OFPACT_OUTPUT_REG:
2092 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
2093 break;
2094
2095 case OFPACT_LEARN:
2096 xlate_learn_action(ctx, ofpact_get_LEARN(a));
2097 break;
2098
2099 case OFPACT_EXIT:
2100 ctx->exit = true;
2101 break;
2102
2103 case OFPACT_FIN_TIMEOUT:
33bf9176 2104 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
2105 ctx->xout->has_fin_timeout = true;
2106 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
2107 break;
2108
2109 case OFPACT_CLEAR_ACTIONS:
2110 /* XXX
2111 * Nothing to do because writa-actions is not supported for now.
2112 * When writa-actions is supported, clear-actions also must
2113 * be supported at the same time.
2114 */
2115 break;
2116
2117 case OFPACT_WRITE_METADATA:
2118 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
2119 flow->metadata &= ~metadata->mask;
2120 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
2121 break;
2122
638a19b0
JR
2123 case OFPACT_METER:
2124 /* Not implemented yet. */
2125 break;
2126
9583bc14
EJ
2127 case OFPACT_GOTO_TABLE: {
2128 /* It is assumed that goto-table is the last action. */
2129 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
9583bc14
EJ
2130
2131 ovs_assert(ctx->table_id < ogt->table_id);
4468099e
EJ
2132 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
2133 ogt->table_id, true);
9583bc14
EJ
2134 break;
2135 }
2136
2137 case OFPACT_SAMPLE:
2138 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
2139 break;
2140 }
2141 }
2142
2143out:
2144 if (ctx->rule) {
2145 ctx->rule->up.evictable = was_evictable;
2146 }
2147}
2148
2149void
2150xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
2151 const struct flow *flow, struct rule_dpif *rule,
2152 uint8_t tcp_flags, const struct ofpbuf *packet)
2153{
2154 xin->ofproto = ofproto;
2155 xin->flow = *flow;
2156 xin->packet = packet;
2157 xin->may_learn = packet != NULL;
2158 xin->rule = rule;
2159 xin->ofpacts = NULL;
2160 xin->ofpacts_len = 0;
2161 xin->tcp_flags = tcp_flags;
2162 xin->resubmit_hook = NULL;
2163 xin->report_hook = NULL;
2164 xin->resubmit_stats = NULL;
2165}
2166
2167void
2168xlate_out_uninit(struct xlate_out *xout)
2169{
2170 if (xout) {
2171 ofpbuf_uninit(&xout->odp_actions);
2172 }
2173}
2174
2175/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
2176 * into datapath actions, using 'ctx', and discards the datapath actions. */
2177void
2178xlate_actions_for_side_effects(struct xlate_in *xin)
2179{
2180 struct xlate_out xout;
2181
2182 xlate_actions(xin, &xout);
2183 xlate_out_uninit(&xout);
2184}
2185
2186static void
2187xlate_report(struct xlate_ctx *ctx, const char *s)
2188{
2189 if (ctx->xin->report_hook) {
4d0acc70 2190 ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
9583bc14
EJ
2191 }
2192}
2193
2194void
2195xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
2196{
2197 dst->wc = src->wc;
2198 dst->tags = src->tags;
2199 dst->slow = src->slow;
2200 dst->has_learn = src->has_learn;
2201 dst->has_normal = src->has_normal;
2202 dst->has_fin_timeout = src->has_fin_timeout;
2203 dst->nf_output_iface = src->nf_output_iface;
2204 dst->mirrors = src->mirrors;
2205
2206 ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
2207 sizeof dst->odp_actions_stub);
2208 ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
2209 src->odp_actions.size);
2210}
2211\f
ce4a6b76
BP
2212static bool
2213actions_output_to_local_port(const struct xlate_ctx *ctx)
2214{
46c88433 2215 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
2216 const struct nlattr *a;
2217 unsigned int left;
2218
2219 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
2220 ctx->xout->odp_actions.size) {
2221 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
2222 && nl_attr_get_odp_port(a) == local_odp_port) {
2223 return true;
2224 }
2225 }
2226 return false;
2227}
9583bc14
EJ
2228
2229/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
2230 * into datapath actions in 'odp_actions', using 'ctx'. */
2231void
2232xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
2233{
2234 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
2235 * that in the future we always keep a copy of the original flow for
2236 * tracing purposes. */
2237 static bool hit_resubmit_limit;
2238
33bf9176
BP
2239 struct flow_wildcards *wc = &xout->wc;
2240 struct flow *flow = &xin->flow;
2241
9583bc14
EJ
2242 enum slow_path_reason special;
2243 const struct ofpact *ofpacts;
46c88433 2244 struct xport *in_port;
9583bc14
EJ
2245 struct flow orig_flow;
2246 struct xlate_ctx ctx;
2247 size_t ofpacts_len;
2248
46c88433 2249 COVERAGE_INC(xlate_actions);
9583bc14
EJ
2250
2251 /* Flow initialization rules:
2252 * - 'base_flow' must match the kernel's view of the packet at the
2253 * time that action processing starts. 'flow' represents any
2254 * transformations we wish to make through actions.
2255 * - By default 'base_flow' and 'flow' are the same since the input
2256 * packet matches the output before any actions are applied.
2257 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
2258 * of the received packet as seen by the kernel. If we later output
2259 * to another device without any modifications this will cause us to
2260 * insert a new tag since the original one was stripped off by the
2261 * VLAN device.
2262 * - Tunnel metadata as received is retained in 'flow'. This allows
2263 * tunnel metadata matching also in later tables.
2264 * Since a kernel action for setting the tunnel metadata will only be
2265 * generated with actual tunnel output, changing the tunnel metadata
2266 * values in 'flow' (such as tun_id) will only have effect with a later
2267 * tunnel output action.
2268 * - Tunnel 'base_flow' is completely cleared since that is what the
2269 * kernel does. If we wish to maintain the original values an action
2270 * needs to be generated. */
2271
2272 ctx.xin = xin;
2273 ctx.xout = xout;
46c88433
EJ
2274 ctx.xout->tags = 0;
2275 ctx.xout->slow = 0;
2276 ctx.xout->has_learn = false;
2277 ctx.xout->has_normal = false;
2278 ctx.xout->has_fin_timeout = false;
2279 ctx.xout->nf_output_iface = NF_OUT_DROP;
2280 ctx.xout->mirrors = 0;
2281 ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
2282 sizeof ctx.xout->odp_actions_stub);
2283 ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
2284
2285 ctx.xbridge = xbridge_lookup(xin->ofproto);
2286 if (!ctx.xbridge) {
2287 return;
2288 }
9583bc14 2289
9583bc14
EJ
2290 ctx.rule = xin->rule;
2291
33bf9176 2292 ctx.base_flow = *flow;
9583bc14 2293 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
33bf9176 2294 ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
9583bc14 2295
33bf9176
BP
2296 flow_wildcards_init_catchall(wc);
2297 memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
1dd35f8a 2298 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
7431e171 2299 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1dd35f8a 2300 wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
9583bc14
EJ
2301
2302 if (tnl_port_should_receive(&ctx.xin->flow)) {
33bf9176 2303 memset(&wc->masks.tunnel, 0xff, sizeof wc->masks.tunnel);
9583bc14 2304 }
46c88433 2305 if (ctx.xbridge->has_netflow) {
9b658910 2306 netflow_mask_wc(flow, wc);
9583bc14
EJ
2307 }
2308
9583bc14
EJ
2309 ctx.recurse = 0;
2310 ctx.max_resubmit_trigger = false;
33bf9176 2311 ctx.orig_skb_priority = flow->skb_priority;
9583bc14
EJ
2312 ctx.table_id = 0;
2313 ctx.exit = false;
2314
2315 if (xin->ofpacts) {
2316 ofpacts = xin->ofpacts;
2317 ofpacts_len = xin->ofpacts_len;
2318 } else if (xin->rule) {
2319 ofpacts = xin->rule->up.ofpacts;
2320 ofpacts_len = xin->rule->up.ofpacts_len;
2321 } else {
2322 NOT_REACHED();
2323 }
2324
2325 ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
2326
46c88433 2327 if (mbridge_has_mirrors(ctx.xbridge->mbridge) || hit_resubmit_limit) {
9583bc14
EJ
2328 /* Do this conditionally because the copy is expensive enough that it
2329 * shows up in profiles. */
33bf9176 2330 orig_flow = *flow;
9583bc14
EJ
2331 }
2332
33bf9176 2333 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
46c88433 2334 switch (ctx.xbridge->frag) {
9583bc14
EJ
2335 case OFPC_FRAG_NORMAL:
2336 /* We must pretend that transport ports are unavailable. */
33bf9176
BP
2337 flow->tp_src = ctx.base_flow.tp_src = htons(0);
2338 flow->tp_dst = ctx.base_flow.tp_dst = htons(0);
9583bc14
EJ
2339 break;
2340
2341 case OFPC_FRAG_DROP:
2342 return;
2343
2344 case OFPC_FRAG_REASM:
2345 NOT_REACHED();
2346
2347 case OFPC_FRAG_NX_MATCH:
2348 /* Nothing to do. */
2349 break;
2350
2351 case OFPC_INVALID_TTL_TO_CONTROLLER:
2352 NOT_REACHED();
2353 }
2354 }
2355
46c88433 2356 in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
642dc74d 2357 special = process_special(&ctx, flow, in_port, ctx.xin->packet);
9583bc14
EJ
2358 if (special) {
2359 ctx.xout->slow = special;
2360 } else {
2361 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
2362 size_t sample_actions_len;
9583bc14 2363
4e022ec0 2364 if (flow->in_port.ofp_port
46c88433
EJ
2365 != vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
2366 flow->in_port.ofp_port,
33bf9176 2367 flow->vlan_tci)) {
9583bc14
EJ
2368 ctx.base_flow.vlan_tci = 0;
2369 }
2370
2371 add_sflow_action(&ctx);
2372 add_ipfix_action(&ctx);
2373 sample_actions_len = ctx.xout->odp_actions.size;
2374
2375 if (tunnel_ecn_ok(&ctx) && (!in_port || may_receive(in_port, &ctx))) {
2376 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
2377
2378 /* We've let OFPP_NORMAL and the learning action look at the
2379 * packet, so drop it now if forwarding is disabled. */
2380 if (in_port && !stp_forward_in_state(in_port->stp_state)) {
2381 ctx.xout->odp_actions.size = sample_actions_len;
2382 }
2383 }
2384
2385 if (ctx.max_resubmit_trigger && !ctx.xin->resubmit_hook) {
2386 if (!hit_resubmit_limit) {
2387 /* We didn't record the original flow. Make sure we do from
2388 * now on. */
2389 hit_resubmit_limit = true;
2390 } else if (!VLOG_DROP_ERR(&trace_rl)) {
2391 struct ds ds = DS_EMPTY_INITIALIZER;
2392
46c88433
EJ
2393 ofproto_trace(ctx.xbridge->ofproto, &orig_flow,
2394 ctx.xin->packet, &ds);
9583bc14
EJ
2395 VLOG_ERR("Trace triggered by excessive resubmit "
2396 "recursion:\n%s", ds_cstr(&ds));
2397 ds_destroy(&ds);
2398 }
2399 }
2400
46c88433 2401 if (ctx.xbridge->has_in_band
ce4a6b76
BP
2402 && in_band_must_output_to_local_port(flow)
2403 && !actions_output_to_local_port(&ctx)) {
9583bc14
EJ
2404 compose_output_action(&ctx, OFPP_LOCAL);
2405 }
aaa0fbae
EJ
2406
2407 fix_sflow_action(&ctx);
2408
46c88433 2409 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
9583bc14
EJ
2410 add_mirror_actions(&ctx, &orig_flow);
2411 }
9583bc14
EJ
2412 }
2413
2414 ofpbuf_uninit(&ctx.stack);
2415
2416 /* Clear the metadata and register wildcard masks, because we won't
2417 * use non-header fields as part of the cache. */
33bf9176
BP
2418 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
2419 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
9583bc14 2420}