]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto-dpif-xlate.c
ofproto-dpif-xlate: Hide MAX_RESUBMIT_RECURSION.
[ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
9583bc14
EJ
1/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
19#include "bitmap.h"
20#include "bond.h"
21#include "bundle.h"
22#include "byte-order.h"
23#include "connmgr.h"
24#include "coverage.h"
25#include "dpif.h"
26#include "dynamic-string.h"
27#include "learn.h"
28#include "mac-learning.h"
29#include "meta-flow.h"
30#include "multipath.h"
31#include "netdev-vport.h"
32#include "netlink.h"
33#include "nx-match.h"
34#include "odp-execute.h"
35#include "ofp-actions.h"
36#include "ofproto/ofproto-dpif-ipfix.h"
37#include "ofproto/ofproto-dpif-sflow.h"
38#include "ofproto/ofproto-dpif.h"
39#include "tunnel.h"
40#include "vlog.h"
41
42COVERAGE_DEFINE(ofproto_dpif_xlate);
43
44VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
45
8a553e9a
EJ
46/* Maximum depth of flow table recursion (due to resubmit actions) in a
47 * flow translation. */
48#define MAX_RESUBMIT_RECURSION 64
49
4d0acc70
EJ
50struct xlate_ctx {
51 struct xlate_in *xin;
52 struct xlate_out *xout;
53
54 struct ofproto_dpif *ofproto;
55
56 /* Flow at the last commit. */
57 struct flow base_flow;
58
59 /* Tunnel IP destination address as received. This is stored separately
60 * as the base_flow.tunnel is cleared on init to reflect the datapath
61 * behavior. Used to make sure not to send tunneled output to ourselves,
62 * which might lead to an infinite loop. This could happen easily
63 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
64 * actually set the tun_dst field. */
65 ovs_be32 orig_tunnel_ip_dst;
66
67 /* Stack for the push and pop actions. Each stack element is of type
68 * "union mf_subvalue". */
69 union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
70 struct ofpbuf stack;
71
72 /* The rule that we are currently translating, or NULL. */
73 struct rule_dpif *rule;
74
75 int recurse; /* Recursion level, via xlate_table_action. */
76 bool max_resubmit_trigger; /* Recursed too deeply during translation. */
77 uint32_t orig_skb_priority; /* Priority when packet arrived. */
78 uint8_t table_id; /* OpenFlow table ID where flow was found. */
79 uint32_t sflow_n_outputs; /* Number of output ports. */
80 uint32_t sflow_odp_port; /* Output port for composing sFlow action. */
81 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
82 bool exit; /* No further actions should be processed. */
83};
84
9583bc14
EJ
85/* A controller may use OFPP_NONE as the ingress port to indicate that
86 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
87 * when an input bundle is needed for validation (e.g., mirroring or
88 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
89 * any 'port' structs, so care must be taken when dealing with it. */
90static struct ofbundle ofpp_none_bundle = {
91 .name = "OFPP_NONE",
92 .vlan_mode = PORT_VLAN_TRUNK
93};
94
95static bool may_receive(const struct ofport_dpif *, struct xlate_ctx *);
96static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
97 struct xlate_ctx *);
98static void xlate_normal(struct xlate_ctx *);
99static void xlate_report(struct xlate_ctx *, const char *);
100static void xlate_table_action(struct xlate_ctx *, uint16_t in_port,
101 uint8_t table_id, bool may_packet_in);
102static bool input_vid_is_valid(uint16_t vid, struct ofbundle *, bool warn);
103static uint16_t input_vid_to_vlan(const struct ofbundle *, uint16_t vid);
104static void output_normal(struct xlate_ctx *, const struct ofbundle *,
105 uint16_t vlan);
106static void compose_output_action(struct xlate_ctx *, uint16_t ofp_port);
107
108static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
109
110static bool
111ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
112{
113 return (bundle->vlan_mode != PORT_VLAN_ACCESS
114 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
115}
116
117static bool
118ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
119{
120 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
121}
122
123static bool
124vlan_is_mirrored(const struct ofmirror *m, int vlan)
125{
126 return !m->vlans || bitmap_is_set(m->vlans, vlan);
127}
128
129static struct ofbundle *
130lookup_input_bundle(const struct ofproto_dpif *ofproto, uint16_t in_port,
131 bool warn, struct ofport_dpif **in_ofportp)
132{
133 struct ofport_dpif *ofport;
134
135 /* Find the port and bundle for the received packet. */
136 ofport = get_ofp_port(ofproto, in_port);
137 if (in_ofportp) {
138 *in_ofportp = ofport;
139 }
140 if (ofport && ofport->bundle) {
141 return ofport->bundle;
142 }
143
144 /* Special-case OFPP_NONE, which a controller may use as the ingress
145 * port for traffic that it is sourcing. */
146 if (in_port == OFPP_NONE) {
147 return &ofpp_none_bundle;
148 }
149
150 /* Odd. A few possible reasons here:
151 *
152 * - We deleted a port but there are still a few packets queued up
153 * from it.
154 *
155 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
156 * we don't know about.
157 *
158 * - The ofproto client didn't configure the port as part of a bundle.
159 * This is particularly likely to happen if a packet was received on the
160 * port after it was created, but before the client had a chance to
161 * configure its bundle.
162 */
163 if (warn) {
164 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
165
166 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
167 "port %"PRIu16, ofproto->up.name, in_port);
168 }
169 return NULL;
170}
171
172static void
173add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
174{
175 struct ofproto_dpif *ofproto = ctx->ofproto;
176 mirror_mask_t mirrors;
177 struct ofbundle *in_bundle;
178 uint16_t vlan;
179 uint16_t vid;
180 const struct nlattr *a;
181 size_t left;
182
183 in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
184 ctx->xin->packet != NULL, NULL);
185 if (!in_bundle) {
186 return;
187 }
188 mirrors = in_bundle->src_mirrors;
189
190 /* Drop frames on bundles reserved for mirroring. */
191 if (in_bundle->mirror_out) {
192 if (ctx->xin->packet != NULL) {
193 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
194 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
195 "%s, which is reserved exclusively for mirroring",
196 ctx->ofproto->up.name, in_bundle->name);
197 }
198 return;
199 }
200
201 /* Check VLAN. */
202 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
203 if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
204 return;
205 }
206 vlan = input_vid_to_vlan(in_bundle, vid);
207
208 /* Look at the output ports to check for destination selections. */
209
210 NL_ATTR_FOR_EACH (a, left, ctx->xout->odp_actions.data,
211 ctx->xout->odp_actions.size) {
212 enum ovs_action_attr type = nl_attr_type(a);
213 struct ofport_dpif *ofport;
214
215 if (type != OVS_ACTION_ATTR_OUTPUT) {
216 continue;
217 }
218
219 ofport = get_odp_port(ofproto, nl_attr_get_u32(a));
220 if (ofport && ofport->bundle) {
221 mirrors |= ofport->bundle->dst_mirrors;
222 }
223 }
224
225 if (!mirrors) {
226 return;
227 }
228
229 /* Restore the original packet before adding the mirror actions. */
230 ctx->xin->flow = *orig_flow;
231
232 while (mirrors) {
233 struct ofmirror *m;
234
235 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
236
237 if (m->vlans) {
238 ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
239 }
240
241 if (!vlan_is_mirrored(m, vlan)) {
242 mirrors = zero_rightmost_1bit(mirrors);
243 continue;
244 }
245
246 mirrors &= ~m->dup_mirrors;
247 ctx->xout->mirrors |= m->dup_mirrors;
248 if (m->out) {
249 output_normal(ctx, m->out, vlan);
250 } else if (vlan != m->out_vlan
251 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
252 struct ofbundle *bundle;
253
254 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
255 if (ofbundle_includes_vlan(bundle, m->out_vlan)
256 && !bundle->mirror_out) {
257 output_normal(ctx, bundle, m->out_vlan);
258 }
259 }
260 }
261 }
262}
263
264/* Given 'vid', the VID obtained from the 802.1Q header that was received as
265 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
266 * the bundle on which the packet was received, returns the VLAN to which the
267 * packet belongs.
268 *
269 * Both 'vid' and the return value are in the range 0...4095. */
270static uint16_t
271input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
272{
273 switch (in_bundle->vlan_mode) {
274 case PORT_VLAN_ACCESS:
275 return in_bundle->vlan;
276 break;
277
278 case PORT_VLAN_TRUNK:
279 return vid;
280
281 case PORT_VLAN_NATIVE_UNTAGGED:
282 case PORT_VLAN_NATIVE_TAGGED:
283 return vid ? vid : in_bundle->vlan;
284
285 default:
286 NOT_REACHED();
287 }
288}
289
290/* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
291 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
292 * a warning.
293 *
294 * 'vid' should be the VID obtained from the 802.1Q header that was received as
295 * part of a packet (specify 0 if there was no 802.1Q header), in the range
296 * 0...4095. */
297static bool
298input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
299{
300 /* Allow any VID on the OFPP_NONE port. */
301 if (in_bundle == &ofpp_none_bundle) {
302 return true;
303 }
304
305 switch (in_bundle->vlan_mode) {
306 case PORT_VLAN_ACCESS:
307 if (vid) {
308 if (warn) {
309 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
310 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
311 "packet received on port %s configured as VLAN "
312 "%"PRIu16" access port",
313 in_bundle->ofproto->up.name, vid,
314 in_bundle->name, in_bundle->vlan);
315 }
316 return false;
317 }
318 return true;
319
320 case PORT_VLAN_NATIVE_UNTAGGED:
321 case PORT_VLAN_NATIVE_TAGGED:
322 if (!vid) {
323 /* Port must always carry its native VLAN. */
324 return true;
325 }
326 /* Fall through. */
327 case PORT_VLAN_TRUNK:
328 if (!ofbundle_includes_vlan(in_bundle, vid)) {
329 if (warn) {
330 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
331 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet "
332 "received on port %s not configured for trunking "
333 "VLAN %"PRIu16,
334 in_bundle->ofproto->up.name, vid,
335 in_bundle->name, vid);
336 }
337 return false;
338 }
339 return true;
340
341 default:
342 NOT_REACHED();
343 }
344
345}
346
347/* Given 'vlan', the VLAN that a packet belongs to, and
348 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
349 * that should be included in the 802.1Q header. (If the return value is 0,
350 * then the 802.1Q header should only be included in the packet if there is a
351 * nonzero PCP.)
352 *
353 * Both 'vlan' and the return value are in the range 0...4095. */
354static uint16_t
355output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
356{
357 switch (out_bundle->vlan_mode) {
358 case PORT_VLAN_ACCESS:
359 return 0;
360
361 case PORT_VLAN_TRUNK:
362 case PORT_VLAN_NATIVE_TAGGED:
363 return vlan;
364
365 case PORT_VLAN_NATIVE_UNTAGGED:
366 return vlan == out_bundle->vlan ? 0 : vlan;
367
368 default:
369 NOT_REACHED();
370 }
371}
372
373static void
374output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle,
375 uint16_t vlan)
376{
33bf9176 377 ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
9583bc14
EJ
378 struct ofport_dpif *port;
379 uint16_t vid;
380 ovs_be16 tci, old_tci;
381
382 vid = output_vlan_to_vid(out_bundle, vlan);
383 if (!out_bundle->bond) {
384 port = ofbundle_get_a_port(out_bundle);
385 } else {
386 port = bond_choose_output_slave(out_bundle->bond, &ctx->xin->flow,
387 &ctx->xout->wc, vid, &ctx->xout->tags);
388 if (!port) {
389 /* No slaves enabled, so drop packet. */
390 return;
391 }
392 }
393
33bf9176 394 old_tci = *flow_tci;
9583bc14
EJ
395 tci = htons(vid);
396 if (tci || out_bundle->use_priority_tags) {
33bf9176 397 tci |= *flow_tci & htons(VLAN_PCP_MASK);
9583bc14
EJ
398 if (tci) {
399 tci |= htons(VLAN_CFI);
400 }
401 }
33bf9176 402 *flow_tci = tci;
9583bc14
EJ
403
404 compose_output_action(ctx, port->up.ofp_port);
33bf9176 405 *flow_tci = old_tci;
9583bc14
EJ
406}
407
408/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
409 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
410 * indicate this; newer upstream kernels use gratuitous ARP requests. */
411static bool
412is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
413{
414 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
415 return false;
416 }
417
418 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
419 if (!eth_addr_is_broadcast(flow->dl_dst)) {
420 return false;
421 }
422
423 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
424 if (flow->nw_proto == ARP_OP_REPLY) {
425 return true;
426 } else if (flow->nw_proto == ARP_OP_REQUEST) {
427 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
428 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
429
430 return flow->nw_src == flow->nw_dst;
431 } else {
432 return false;
433 }
434}
435
436static void
437update_learning_table(struct ofproto_dpif *ofproto,
438 const struct flow *flow, struct flow_wildcards *wc,
439 int vlan, struct ofbundle *in_bundle)
440{
441 struct mac_entry *mac;
442
443 /* Don't learn the OFPP_NONE port. */
444 if (in_bundle == &ofpp_none_bundle) {
445 return;
446 }
447
448 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
449 return;
450 }
451
452 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
453 if (is_gratuitous_arp(flow, wc)) {
454 /* We don't want to learn from gratuitous ARP packets that are
455 * reflected back over bond slaves so we lock the learning table. */
456 if (!in_bundle->bond) {
457 mac_entry_set_grat_arp_lock(mac);
458 } else if (mac_entry_is_grat_arp_locked(mac)) {
459 return;
460 }
461 }
462
463 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
464 /* The log messages here could actually be useful in debugging,
465 * so keep the rate limit relatively high. */
466 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
467 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
468 "on port %s in VLAN %d",
469 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
470 in_bundle->name, vlan);
471
472 mac->port.p = in_bundle;
ae1736c0 473 mac_learning_changed(ofproto->ml, mac);
9583bc14
EJ
474 }
475}
476
477/* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
478 * dropped. Returns true if they may be forwarded, false if they should be
479 * dropped.
480 *
481 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
482 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
483 *
484 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
485 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
486 * checked by input_vid_is_valid().
487 *
488 * May also add tags to '*tags', although the current implementation only does
489 * so in one special case.
490 */
491static bool
492is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port,
493 uint16_t vlan)
494{
495 struct ofproto_dpif *ofproto = ctx->ofproto;
496 struct flow *flow = &ctx->xin->flow;
497 struct ofbundle *in_bundle = in_port->bundle;
498
499 /* Drop frames for reserved multicast addresses
500 * only if forward_bpdu option is absent. */
501 if (!ofproto->up.forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
502 xlate_report(ctx, "packet has reserved destination MAC, dropping");
503 return false;
504 }
505
506 if (in_bundle->bond) {
507 struct mac_entry *mac;
508
509 switch (bond_check_admissibility(in_bundle->bond, in_port,
510 flow->dl_dst, &ctx->xout->tags)) {
511 case BV_ACCEPT:
512 break;
513
514 case BV_DROP:
515 xlate_report(ctx, "bonding refused admissibility, dropping");
516 return false;
517
518 case BV_DROP_IF_MOVED:
519 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
520 if (mac && mac->port.p != in_bundle &&
521 (!is_gratuitous_arp(flow, &ctx->xout->wc)
522 || mac_entry_is_grat_arp_locked(mac))) {
523 xlate_report(ctx, "SLB bond thinks this packet looped back, "
524 "dropping");
525 return false;
526 }
527 break;
528 }
529 }
530
531 return true;
532}
533
534static void
535xlate_normal(struct xlate_ctx *ctx)
536{
33bf9176
BP
537 struct flow_wildcards *wc = &ctx->xout->wc;
538 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
539 struct ofport_dpif *in_port;
540 struct ofbundle *in_bundle;
541 struct mac_entry *mac;
542 uint16_t vlan;
543 uint16_t vid;
544
545 ctx->xout->has_normal = true;
546
547 /* Check the dl_type, since we may check for gratuituous ARP. */
33bf9176 548 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
9583bc14 549
33bf9176
BP
550 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
551 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
552 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
9583bc14 553
33bf9176 554 in_bundle = lookup_input_bundle(ctx->ofproto, flow->in_port,
9583bc14
EJ
555 ctx->xin->packet != NULL, &in_port);
556 if (!in_bundle) {
557 xlate_report(ctx, "no input bundle, dropping");
558 return;
559 }
560
561 /* Drop malformed frames. */
33bf9176
BP
562 if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
563 !(flow->vlan_tci & htons(VLAN_CFI))) {
9583bc14
EJ
564 if (ctx->xin->packet != NULL) {
565 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
566 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
567 "VLAN tag received on port %s",
568 ctx->ofproto->up.name, in_bundle->name);
569 }
570 xlate_report(ctx, "partial VLAN tag, dropping");
571 return;
572 }
573
574 /* Drop frames on bundles reserved for mirroring. */
575 if (in_bundle->mirror_out) {
576 if (ctx->xin->packet != NULL) {
577 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
578 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
579 "%s, which is reserved exclusively for mirroring",
580 ctx->ofproto->up.name, in_bundle->name);
581 }
582 xlate_report(ctx, "input port is mirror output port, dropping");
583 return;
584 }
585
586 /* Check VLAN. */
33bf9176 587 vid = vlan_tci_to_vid(flow->vlan_tci);
9583bc14
EJ
588 if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
589 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
590 return;
591 }
592 vlan = input_vid_to_vlan(in_bundle, vid);
593
594 /* Check other admissibility requirements. */
595 if (in_port && !is_admissible(ctx, in_port, vlan)) {
596 return;
597 }
598
599 /* Learn source MAC. */
600 if (ctx->xin->may_learn) {
33bf9176 601 update_learning_table(ctx->ofproto, flow, wc, vlan, in_bundle);
9583bc14
EJ
602 }
603
604 /* Determine output bundle. */
33bf9176 605 mac = mac_learning_lookup(ctx->ofproto->ml, flow->dl_dst, vlan,
9583bc14
EJ
606 &ctx->xout->tags);
607 if (mac) {
608 if (mac->port.p != in_bundle) {
609 xlate_report(ctx, "forwarding to learned port");
610 output_normal(ctx, mac->port.p, vlan);
611 } else {
612 xlate_report(ctx, "learned port is input port, dropping");
613 }
614 } else {
615 struct ofbundle *bundle;
616
617 xlate_report(ctx, "no learned MAC for destination, flooding");
618 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
619 if (bundle != in_bundle
620 && ofbundle_includes_vlan(bundle, vlan)
621 && bundle->floodable
622 && !bundle->mirror_out) {
623 output_normal(ctx, bundle, vlan);
624 }
625 }
626 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
627 }
628}
629
630/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
631 * the number of packets out of UINT32_MAX to sample. The given
632 * cookie is passed back in the callback for each sampled packet.
633 */
634static size_t
635compose_sample_action(const struct ofproto_dpif *ofproto,
636 struct ofpbuf *odp_actions,
637 const struct flow *flow,
638 const uint32_t probability,
639 const union user_action_cookie *cookie,
640 const size_t cookie_size)
641{
642 size_t sample_offset, actions_offset;
643 int cookie_offset;
644
645 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
646
647 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
648
649 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
650 cookie_offset = put_userspace_action(ofproto, odp_actions, flow, cookie,
651 cookie_size);
652
653 nl_msg_end_nested(odp_actions, actions_offset);
654 nl_msg_end_nested(odp_actions, sample_offset);
655 return cookie_offset;
656}
657
658static void
659compose_sflow_cookie(const struct ofproto_dpif *ofproto,
660 ovs_be16 vlan_tci, uint32_t odp_port,
661 unsigned int n_outputs, union user_action_cookie *cookie)
662{
663 int ifindex;
664
665 cookie->type = USER_ACTION_COOKIE_SFLOW;
666 cookie->sflow.vlan_tci = vlan_tci;
667
668 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
669 * port information") for the interpretation of cookie->output. */
670 switch (n_outputs) {
671 case 0:
672 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
673 cookie->sflow.output = 0x40000000 | 256;
674 break;
675
676 case 1:
677 ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
678 if (ifindex) {
679 cookie->sflow.output = ifindex;
680 break;
681 }
682 /* Fall through. */
683 default:
684 /* 0x80000000 means "multiple output ports. */
685 cookie->sflow.output = 0x80000000 | n_outputs;
686 break;
687 }
688}
689
690/* Compose SAMPLE action for sFlow bridge sampling. */
691static size_t
692compose_sflow_action(const struct ofproto_dpif *ofproto,
693 struct ofpbuf *odp_actions,
694 const struct flow *flow,
695 uint32_t odp_port)
696{
697 uint32_t probability;
698 union user_action_cookie cookie;
699
700 if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
701 return 0;
702 }
703
704 probability = dpif_sflow_get_probability(ofproto->sflow);
705 compose_sflow_cookie(ofproto, htons(0), odp_port,
706 odp_port == OVSP_NONE ? 0 : 1, &cookie);
707
708 return compose_sample_action(ofproto, odp_actions, flow, probability,
709 &cookie, sizeof cookie.sflow);
710}
711
712static void
713compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
714 uint32_t obs_domain_id, uint32_t obs_point_id,
715 union user_action_cookie *cookie)
716{
717 cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
718 cookie->flow_sample.probability = probability;
719 cookie->flow_sample.collector_set_id = collector_set_id;
720 cookie->flow_sample.obs_domain_id = obs_domain_id;
721 cookie->flow_sample.obs_point_id = obs_point_id;
722}
723
724static void
725compose_ipfix_cookie(union user_action_cookie *cookie)
726{
727 cookie->type = USER_ACTION_COOKIE_IPFIX;
728}
729
730/* Compose SAMPLE action for IPFIX bridge sampling. */
731static void
732compose_ipfix_action(const struct ofproto_dpif *ofproto,
733 struct ofpbuf *odp_actions,
734 const struct flow *flow)
735{
736 uint32_t probability;
737 union user_action_cookie cookie;
738
739 if (!ofproto->ipfix || flow->in_port == OFPP_NONE) {
740 return;
741 }
742
743 probability = dpif_ipfix_get_bridge_exporter_probability(ofproto->ipfix);
744 compose_ipfix_cookie(&cookie);
745
746 compose_sample_action(ofproto, odp_actions, flow, probability,
747 &cookie, sizeof cookie.ipfix);
748}
749
750/* SAMPLE action for sFlow must be first action in any given list of
751 * actions. At this point we do not have all information required to
752 * build it. So try to build sample action as complete as possible. */
753static void
754add_sflow_action(struct xlate_ctx *ctx)
755{
756 ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
757 &ctx->xout->odp_actions,
758 &ctx->xin->flow, OVSP_NONE);
759 ctx->sflow_odp_port = 0;
760 ctx->sflow_n_outputs = 0;
761}
762
763/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
764 * of actions, eventually after the SAMPLE action for sFlow. */
765static void
766add_ipfix_action(struct xlate_ctx *ctx)
767{
768 compose_ipfix_action(ctx->ofproto, &ctx->xout->odp_actions,
769 &ctx->xin->flow);
770}
771
772/* Fix SAMPLE action according to data collected while composing ODP actions.
773 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
774 * USERSPACE action's user-cookie which is required for sflow. */
775static void
776fix_sflow_action(struct xlate_ctx *ctx)
777{
778 const struct flow *base = &ctx->base_flow;
779 union user_action_cookie *cookie;
780
781 if (!ctx->user_cookie_offset) {
782 return;
783 }
784
785 cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
786 sizeof cookie->sflow);
787 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
788
789 compose_sflow_cookie(ctx->ofproto, base->vlan_tci,
790 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
791}
792
793static void
794compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port,
795 bool check_stp)
796{
797 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
33bf9176 798 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
799 ovs_be16 flow_vlan_tci;
800 uint32_t flow_skb_mark;
801 uint8_t flow_nw_tos;
9583bc14 802 uint32_t out_port, odp_port;
ca077186 803 uint8_t dscp;
9583bc14
EJ
804
805 /* If 'struct flow' gets additional metadata, we'll need to zero it out
806 * before traversing a patch port. */
807 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
808
809 if (!ofport) {
810 xlate_report(ctx, "Nonexistent output port");
811 return;
812 } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
813 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
814 return;
815 } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
816 xlate_report(ctx, "STP not in forwarding state, skipping output");
817 return;
818 }
819
820 if (netdev_vport_is_patch(ofport->up.netdev)) {
821 struct ofport_dpif *peer = ofport_get_peer(ofport);
822 struct flow old_flow = ctx->xin->flow;
9583bc14 823 enum slow_path_reason special;
9583bc14
EJ
824
825 if (!peer) {
826 xlate_report(ctx, "Nonexistent patch port peer");
827 return;
828 }
829
9583bc14 830 ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
33bf9176
BP
831 flow->in_port = peer->up.ofp_port;
832 flow->metadata = htonll(0);
833 memset(&flow->tunnel, 0, sizeof flow->tunnel);
834 memset(flow->regs, 0, sizeof flow->regs);
9583bc14 835
ddd3c975 836 special = process_special(ctx->ofproto, &ctx->xin->flow, peer,
9583bc14
EJ
837 ctx->xin->packet);
838 if (special) {
839 ctx->xout->slow = special;
ddd3c975
EJ
840 } else if (may_receive(peer, ctx)) {
841 if (stp_forward_in_state(peer->stp_state)) {
33bf9176 842 xlate_table_action(ctx, flow->in_port, 0, true);
9583bc14
EJ
843 } else {
844 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
845 * learning action look at the packet, then drop it. */
846 struct flow old_base_flow = ctx->base_flow;
847 size_t old_size = ctx->xout->odp_actions.size;
33bf9176 848 xlate_table_action(ctx, flow->in_port, 0, true);
9583bc14
EJ
849 ctx->base_flow = old_base_flow;
850 ctx->xout->odp_actions.size = old_size;
851 }
852 }
853
854 ctx->xin->flow = old_flow;
855 ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
856
857 if (ctx->xin->resubmit_stats) {
858 netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
859 netdev_vport_inc_rx(peer->up.netdev, ctx->xin->resubmit_stats);
860 }
861
862 return;
863 }
864
33bf9176
BP
865 flow_vlan_tci = flow->vlan_tci;
866 flow_skb_mark = flow->skb_mark;
867 flow_nw_tos = flow->nw_tos;
9583bc14 868
ca077186 869 if (ofproto_dpif_dscp_from_priority(ofport, flow->skb_priority, &dscp)) {
33bf9176 870 flow->nw_tos &= ~IP_DSCP_MASK;
ca077186 871 flow->nw_tos |= dscp;
9583bc14
EJ
872 }
873
874 if (ofport->tnl_port) {
875 /* Save tunnel metadata so that changes made due to
876 * the Logical (tunnel) Port are not visible for any further
877 * matches, while explicit set actions on tunnel metadata are.
878 */
33bf9176 879 struct flow_tnl flow_tnl = flow->tunnel;
d4f4a9b2 880 odp_port = tnl_port_send(ofport->tnl_port, flow, &ctx->xout->wc);
9583bc14
EJ
881 if (odp_port == OVSP_NONE) {
882 xlate_report(ctx, "Tunneling decided against output");
883 goto out; /* restore flow_nw_tos */
884 }
33bf9176 885 if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
9583bc14
EJ
886 xlate_report(ctx, "Not tunneling to our own address");
887 goto out; /* restore flow_nw_tos */
888 }
889 if (ctx->xin->resubmit_stats) {
890 netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
891 }
892 out_port = odp_port;
33bf9176 893 commit_odp_tunnel_action(flow, &ctx->base_flow,
9583bc14 894 &ctx->xout->odp_actions);
33bf9176 895 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
9583bc14
EJ
896 } else {
897 uint16_t vlandev_port;
898 odp_port = ofport->odp_port;
899 vlandev_port = vsp_realdev_to_vlandev(ctx->ofproto, ofp_port,
33bf9176 900 flow->vlan_tci);
9583bc14
EJ
901 if (vlandev_port == ofp_port) {
902 out_port = odp_port;
903 } else {
904 out_port = ofp_port_to_odp_port(ctx->ofproto, vlandev_port);
33bf9176 905 flow->vlan_tci = htons(0);
9583bc14 906 }
33bf9176 907 flow->skb_mark &= ~IPSEC_MARK;
9583bc14 908 }
33bf9176 909 commit_odp_actions(flow, &ctx->base_flow, &ctx->xout->odp_actions);
9583bc14
EJ
910 nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
911
912 ctx->sflow_odp_port = odp_port;
913 ctx->sflow_n_outputs++;
914 ctx->xout->nf_output_iface = ofp_port;
915
916 /* Restore flow */
33bf9176
BP
917 flow->vlan_tci = flow_vlan_tci;
918 flow->skb_mark = flow_skb_mark;
9583bc14 919 out:
33bf9176 920 flow->nw_tos = flow_nw_tos;
9583bc14
EJ
921}
922
923static void
924compose_output_action(struct xlate_ctx *ctx, uint16_t ofp_port)
925{
926 compose_output_action__(ctx, ofp_port, true);
927}
928
929static void
930tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule)
931{
932 struct ofproto_dpif *ofproto = ctx->ofproto;
933 uint8_t table_id = ctx->table_id;
934
935 if (table_id > 0 && table_id < N_TABLES) {
936 struct table_dpif *table = &ofproto->tables[table_id];
937 if (table->other_table) {
938 ctx->xout->tags |= (rule && rule->tag
939 ? rule->tag
940 : rule_calculate_tag(&ctx->xin->flow,
941 &table->other_table->mask,
942 table->basis));
943 }
944 }
945}
946
947/* Common rule processing in one place to avoid duplicating code. */
948static struct rule_dpif *
949ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
950 bool may_packet_in)
951{
952 if (ctx->xin->resubmit_hook) {
4d0acc70 953 ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
9583bc14
EJ
954 }
955 if (rule == NULL && may_packet_in) {
956 /* XXX
957 * check if table configuration flags
958 * OFPTC_TABLE_MISS_CONTROLLER, default.
959 * OFPTC_TABLE_MISS_CONTINUE,
960 * OFPTC_TABLE_MISS_DROP
961 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
962 */
963 rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->xin->flow);
964 }
965 if (rule && ctx->xin->resubmit_stats) {
966 rule_credit_stats(rule, ctx->xin->resubmit_stats);
967 }
968 return rule;
969}
970
971static void
972xlate_table_action(struct xlate_ctx *ctx,
973 uint16_t in_port, uint8_t table_id, bool may_packet_in)
974{
975 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
976 struct rule_dpif *rule;
977 uint16_t old_in_port = ctx->xin->flow.in_port;
978 uint8_t old_table_id = ctx->table_id;
979
980 ctx->table_id = table_id;
981
982 /* Look up a flow with 'in_port' as the input port. */
983 ctx->xin->flow.in_port = in_port;
984 rule = rule_dpif_lookup_in_table(ctx->ofproto, &ctx->xin->flow,
985 &ctx->xout->wc, table_id);
986
987 tag_the_flow(ctx, rule);
988
989 /* Restore the original input port. Otherwise OFPP_NORMAL and
990 * OFPP_IN_PORT will have surprising behavior. */
991 ctx->xin->flow.in_port = old_in_port;
992
993 rule = ctx_rule_hooks(ctx, rule, may_packet_in);
994
995 if (rule) {
996 struct rule_dpif *old_rule = ctx->rule;
997
998 ctx->recurse++;
999 ctx->rule = rule;
1000 do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
1001 ctx->rule = old_rule;
1002 ctx->recurse--;
1003 }
1004
1005 ctx->table_id = old_table_id;
1006 } else {
1007 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
1008
1009 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
1010 MAX_RESUBMIT_RECURSION);
1011 ctx->max_resubmit_trigger = true;
1012 }
1013}
1014
1015static void
1016xlate_ofpact_resubmit(struct xlate_ctx *ctx,
1017 const struct ofpact_resubmit *resubmit)
1018{
1019 uint16_t in_port;
1020 uint8_t table_id;
1021
1022 in_port = resubmit->in_port;
1023 if (in_port == OFPP_IN_PORT) {
1024 in_port = ctx->xin->flow.in_port;
1025 }
1026
1027 table_id = resubmit->table_id;
1028 if (table_id == 255) {
1029 table_id = ctx->table_id;
1030 }
1031
1032 xlate_table_action(ctx, in_port, table_id, false);
1033}
1034
1035static void
1036flood_packets(struct xlate_ctx *ctx, bool all)
1037{
1038 struct ofport_dpif *ofport;
1039
1040 HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
1041 uint16_t ofp_port = ofport->up.ofp_port;
1042
1043 if (ofp_port == ctx->xin->flow.in_port) {
1044 continue;
1045 }
1046
1047 if (all) {
1048 compose_output_action__(ctx, ofp_port, false);
1049 } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
1050 compose_output_action(ctx, ofp_port);
1051 }
1052 }
1053
1054 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1055}
1056
1057static void
1058execute_controller_action(struct xlate_ctx *ctx, int len,
1059 enum ofp_packet_in_reason reason,
1060 uint16_t controller_id)
1061{
1062 struct ofputil_packet_in pin;
1063 struct ofpbuf *packet;
1064 struct flow key;
1065
1066 ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
1067 ctx->xout->slow = SLOW_CONTROLLER;
1068 if (!ctx->xin->packet) {
1069 return;
1070 }
1071
1072 packet = ofpbuf_clone(ctx->xin->packet);
1073
1074 key.skb_priority = 0;
1075 key.skb_mark = 0;
1076 memset(&key.tunnel, 0, sizeof key.tunnel);
1077
1078 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1079 &ctx->xout->odp_actions);
1080
1081 odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
1082 ctx->xout->odp_actions.size, NULL, NULL);
1083
1084 pin.packet = packet->data;
1085 pin.packet_len = packet->size;
1086 pin.reason = reason;
1087 pin.controller_id = controller_id;
1088 pin.table_id = ctx->table_id;
1089 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
1090
1091 pin.send_len = len;
1092 flow_get_metadata(&ctx->xin->flow, &pin.fmd);
1093
1094 connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
1095 ofpbuf_delete(packet);
1096}
1097
1098static void
9cfef3d0 1099compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 1100{
33bf9176
BP
1101 struct flow_wildcards *wc = &ctx->xout->wc;
1102 struct flow *flow = &ctx->xin->flow;
1103
9583bc14
EJ
1104 ovs_assert(eth_type_mpls(eth_type));
1105
33bf9176
BP
1106 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1107 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1108 memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
9583bc14 1109
33bf9176
BP
1110 if (flow->mpls_depth) {
1111 flow->mpls_lse &= ~htonl(MPLS_BOS_MASK);
1112 flow->mpls_depth++;
9583bc14
EJ
1113 } else {
1114 ovs_be32 label;
1115 uint8_t tc, ttl;
1116
33bf9176 1117 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
9583bc14
EJ
1118 label = htonl(0x2); /* IPV6 Explicit Null. */
1119 } else {
1120 label = htonl(0x0); /* IPV4 Explicit Null. */
1121 }
33bf9176
BP
1122 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1123 ttl = flow->nw_ttl ? flow->nw_ttl : 0x40;
1124 flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
1125 flow->mpls_depth = 1;
9583bc14 1126 }
33bf9176 1127 flow->dl_type = eth_type;
9583bc14
EJ
1128}
1129
1130static void
9cfef3d0 1131compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 1132{
33bf9176
BP
1133 struct flow_wildcards *wc = &ctx->xout->wc;
1134 struct flow *flow = &ctx->xin->flow;
1135
9583bc14
EJ
1136 ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
1137 ovs_assert(!eth_type_mpls(eth_type));
1138
33bf9176
BP
1139 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1140 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1141 memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
1142
1143 if (flow->mpls_depth) {
1144 flow->mpls_depth--;
1145 flow->mpls_lse = htonl(0);
1146 if (!flow->mpls_depth) {
1147 flow->dl_type = eth_type;
9583bc14
EJ
1148 }
1149 }
1150}
1151
1152static bool
1153compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
1154{
33bf9176
BP
1155 struct flow *flow = &ctx->xin->flow;
1156
1157 if (!is_ip_any(flow)) {
9583bc14
EJ
1158 return false;
1159 }
1160
33bf9176
BP
1161 if (flow->nw_ttl > 1) {
1162 flow->nw_ttl--;
9583bc14
EJ
1163 return false;
1164 } else {
1165 size_t i;
1166
1167 for (i = 0; i < ids->n_controllers; i++) {
1168 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
1169 ids->cnt_ids[i]);
1170 }
1171
1172 /* Stop processing for current table. */
1173 return true;
1174 }
1175}
1176
1177static bool
9cfef3d0 1178compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14
EJ
1179{
1180 if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
1181 return true;
1182 }
1183
1184 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
1185 return false;
1186}
1187
1188static bool
9cfef3d0 1189compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 1190{
33bf9176
BP
1191 struct flow *flow = &ctx->xin->flow;
1192 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse);
9583bc14 1193
33bf9176 1194 if (!eth_type_mpls(flow->dl_type)) {
9583bc14
EJ
1195 return false;
1196 }
1197
1198 if (ttl > 1) {
1199 ttl--;
33bf9176 1200 set_mpls_lse_ttl(&flow->mpls_lse, ttl);
9583bc14
EJ
1201 return false;
1202 } else {
1203 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
1204
1205 /* Stop processing for current table. */
1206 return true;
1207 }
1208}
1209
1210static void
1211xlate_output_action(struct xlate_ctx *ctx,
1212 uint16_t port, uint16_t max_len, bool may_packet_in)
1213{
1214 uint16_t prev_nf_output_iface = ctx->xout->nf_output_iface;
1215
1216 ctx->xout->nf_output_iface = NF_OUT_DROP;
1217
1218 switch (port) {
1219 case OFPP_IN_PORT:
1220 compose_output_action(ctx, ctx->xin->flow.in_port);
1221 break;
1222 case OFPP_TABLE:
1223 xlate_table_action(ctx, ctx->xin->flow.in_port, 0, may_packet_in);
1224 break;
1225 case OFPP_NORMAL:
1226 xlate_normal(ctx);
1227 break;
1228 case OFPP_FLOOD:
1229 flood_packets(ctx, false);
1230 break;
1231 case OFPP_ALL:
1232 flood_packets(ctx, true);
1233 break;
1234 case OFPP_CONTROLLER:
1235 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
1236 break;
1237 case OFPP_NONE:
1238 break;
1239 case OFPP_LOCAL:
1240 default:
1241 if (port != ctx->xin->flow.in_port) {
1242 compose_output_action(ctx, port);
1243 } else {
1244 xlate_report(ctx, "skipping output to input port");
1245 }
1246 break;
1247 }
1248
1249 if (prev_nf_output_iface == NF_OUT_FLOOD) {
1250 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1251 } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1252 ctx->xout->nf_output_iface = prev_nf_output_iface;
1253 } else if (prev_nf_output_iface != NF_OUT_DROP &&
1254 ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1255 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1256 }
1257}
1258
1259static void
1260xlate_output_reg_action(struct xlate_ctx *ctx,
1261 const struct ofpact_output_reg *or)
1262{
1263 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
1264 if (port <= UINT16_MAX) {
1265 union mf_subvalue value;
1266
1267 memset(&value, 0xff, sizeof value);
1268 mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
1269 xlate_output_action(ctx, port, or->max_len, false);
1270 }
1271}
1272
1273static void
1274xlate_enqueue_action(struct xlate_ctx *ctx,
1275 const struct ofpact_enqueue *enqueue)
1276{
1277 uint16_t ofp_port = enqueue->port;
1278 uint32_t queue_id = enqueue->queue;
1279 uint32_t flow_priority, priority;
1280 int error;
1281
1282 /* Translate queue to priority. */
cdc3ab65 1283 error = ofproto_dpif_queue_to_priority(ctx->ofproto, queue_id, &priority);
9583bc14
EJ
1284 if (error) {
1285 /* Fall back to ordinary output action. */
1286 xlate_output_action(ctx, enqueue->port, 0, false);
1287 return;
1288 }
1289
1290 /* Check output port. */
1291 if (ofp_port == OFPP_IN_PORT) {
1292 ofp_port = ctx->xin->flow.in_port;
1293 } else if (ofp_port == ctx->xin->flow.in_port) {
1294 return;
1295 }
1296
1297 /* Add datapath actions. */
1298 flow_priority = ctx->xin->flow.skb_priority;
1299 ctx->xin->flow.skb_priority = priority;
1300 compose_output_action(ctx, ofp_port);
1301 ctx->xin->flow.skb_priority = flow_priority;
1302
1303 /* Update NetFlow output port. */
1304 if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1305 ctx->xout->nf_output_iface = ofp_port;
1306 } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1307 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1308 }
1309}
1310
1311static void
1312xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
1313{
1314 uint32_t skb_priority;
1315
cdc3ab65
EJ
1316 if (!ofproto_dpif_queue_to_priority(ctx->ofproto, queue_id,
1317 &skb_priority)) {
9583bc14
EJ
1318 ctx->xin->flow.skb_priority = skb_priority;
1319 } else {
1320 /* Couldn't translate queue to a priority. Nothing to do. A warning
1321 * has already been logged. */
1322 }
1323}
1324
1325static bool
1326slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
1327{
1328 struct ofproto_dpif *ofproto = ofproto_;
1329 struct ofport_dpif *port;
1330
1331 switch (ofp_port) {
1332 case OFPP_IN_PORT:
1333 case OFPP_TABLE:
1334 case OFPP_NORMAL:
1335 case OFPP_FLOOD:
1336 case OFPP_ALL:
1337 case OFPP_NONE:
1338 return true;
1339 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
1340 return false;
1341 default:
1342 port = get_ofp_port(ofproto, ofp_port);
1343 return port ? port->may_enable : false;
1344 }
1345}
1346
1347static void
1348xlate_bundle_action(struct xlate_ctx *ctx,
1349 const struct ofpact_bundle *bundle)
1350{
1351 uint16_t port;
1352
1353 port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
1354 slave_enabled_cb, ctx->ofproto);
1355 if (bundle->dst.field) {
1356 nxm_reg_load(&bundle->dst, port, &ctx->xin->flow);
1357 } else {
1358 xlate_output_action(ctx, port, 0, false);
1359 }
1360}
1361
1362static void
1363xlate_learn_action(struct xlate_ctx *ctx,
1364 const struct ofpact_learn *learn)
1365{
1366 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
1367 struct ofputil_flow_mod fm;
1368 uint64_t ofpacts_stub[1024 / 8];
1369 struct ofpbuf ofpacts;
1370 int error;
1371
1372 ctx->xout->has_learn = true;
1373
1374 learn_mask(learn, &ctx->xout->wc);
1375
1376 if (!ctx->xin->may_learn) {
1377 return;
1378 }
1379
1380 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
1381 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
1382
1383 error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
1384 if (error && !VLOG_DROP_WARN(&rl)) {
1385 VLOG_WARN("learning action failed to modify flow table (%s)",
1386 ofperr_get_name(error));
1387 }
1388
1389 ofpbuf_uninit(&ofpacts);
1390}
1391
1392/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
1393 * means "infinite". */
1394static void
1395reduce_timeout(uint16_t max, uint16_t *timeout)
1396{
1397 if (max && (!*timeout || *timeout > max)) {
1398 *timeout = max;
1399 }
1400}
1401
1402static void
1403xlate_fin_timeout(struct xlate_ctx *ctx,
1404 const struct ofpact_fin_timeout *oft)
1405{
1406 if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
1407 struct rule_dpif *rule = ctx->rule;
1408
1409 reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
1410 reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
1411 }
1412}
1413
1414static void
1415xlate_sample_action(struct xlate_ctx *ctx,
1416 const struct ofpact_sample *os)
1417{
1418 union user_action_cookie cookie;
1419 /* Scale the probability from 16-bit to 32-bit while representing
1420 * the same percentage. */
1421 uint32_t probability = (os->probability << 16) | os->probability;
1422
1423 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1424 &ctx->xout->odp_actions);
1425
1426 compose_flow_sample_cookie(os->probability, os->collector_set_id,
1427 os->obs_domain_id, os->obs_point_id, &cookie);
1428 compose_sample_action(ctx->ofproto, &ctx->xout->odp_actions, &ctx->xin->flow,
1429 probability, &cookie, sizeof cookie.flow_sample);
1430}
1431
1432static bool
1433may_receive(const struct ofport_dpif *port, struct xlate_ctx *ctx)
1434{
1435 if (port->up.pp.config & (eth_addr_equals(ctx->xin->flow.dl_dst,
1436 eth_addr_stp)
1437 ? OFPUTIL_PC_NO_RECV_STP
1438 : OFPUTIL_PC_NO_RECV)) {
1439 return false;
1440 }
1441
1442 /* Only drop packets here if both forwarding and learning are
1443 * disabled. If just learning is enabled, we need to have
1444 * OFPP_NORMAL and the learning action have a look at the packet
1445 * before we can drop it. */
1446 if (!stp_forward_in_state(port->stp_state)
1447 && !stp_learn_in_state(port->stp_state)) {
1448 return false;
1449 }
1450
1451 return true;
1452}
1453
1454static bool
1455tunnel_ecn_ok(struct xlate_ctx *ctx)
1456{
1457 if (is_ip_any(&ctx->base_flow)
1458 && (ctx->xin->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
1459 if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
1460 VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
1461 " but is not ECN capable");
1462 return false;
1463 } else {
1464 /* Set the ECN CE value in the tunneled packet. */
1465 ctx->xin->flow.nw_tos |= IP_ECN_CE;
1466 }
1467 }
1468
1469 return true;
1470}
1471
1472static void
1473do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
1474 struct xlate_ctx *ctx)
1475{
33bf9176
BP
1476 struct flow_wildcards *wc = &ctx->xout->wc;
1477 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
1478 bool was_evictable = true;
1479 const struct ofpact *a;
1480
1481 if (ctx->rule) {
1482 /* Don't let the rule we're working on get evicted underneath us. */
1483 was_evictable = ctx->rule->up.evictable;
1484 ctx->rule->up.evictable = false;
1485 }
1486
1487 do_xlate_actions_again:
1488 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
1489 struct ofpact_controller *controller;
1490 const struct ofpact_metadata *metadata;
1491
1492 if (ctx->exit) {
1493 break;
1494 }
1495
1496 switch (a->type) {
1497 case OFPACT_OUTPUT:
1498 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
1499 ofpact_get_OUTPUT(a)->max_len, true);
1500 break;
1501
1502 case OFPACT_CONTROLLER:
1503 controller = ofpact_get_CONTROLLER(a);
1504 execute_controller_action(ctx, controller->max_len,
1505 controller->reason,
1506 controller->controller_id);
1507 break;
1508
1509 case OFPACT_ENQUEUE:
1510 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
1511 break;
1512
1513 case OFPACT_SET_VLAN_VID:
33bf9176
BP
1514 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1515 flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
1516 | htons(VLAN_CFI));
9583bc14
EJ
1517 break;
1518
1519 case OFPACT_SET_VLAN_PCP:
33bf9176
BP
1520 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1521 flow->vlan_tci |=
9583bc14
EJ
1522 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
1523 | VLAN_CFI);
1524 break;
1525
1526 case OFPACT_STRIP_VLAN:
33bf9176 1527 flow->vlan_tci = htons(0);
9583bc14
EJ
1528 break;
1529
1530 case OFPACT_PUSH_VLAN:
1531 /* XXX 802.1AD(QinQ) */
33bf9176 1532 flow->vlan_tci = htons(VLAN_CFI);
9583bc14
EJ
1533 break;
1534
1535 case OFPACT_SET_ETH_SRC:
33bf9176 1536 memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
1537 break;
1538
1539 case OFPACT_SET_ETH_DST:
33bf9176 1540 memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
1541 break;
1542
1543 case OFPACT_SET_IPV4_SRC:
33bf9176
BP
1544 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1545 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1546 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
1547 }
1548 break;
1549
1550 case OFPACT_SET_IPV4_DST:
33bf9176
BP
1551 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1552 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1553 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
1554 }
1555 break;
1556
1557 case OFPACT_SET_IPV4_DSCP:
1558 /* OpenFlow 1.0 only supports IPv4. */
33bf9176
BP
1559 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1560 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1561 flow->nw_tos &= ~IP_DSCP_MASK;
1562 flow->nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
9583bc14
EJ
1563 }
1564 break;
1565
1566 case OFPACT_SET_L4_SRC_PORT:
33bf9176
BP
1567 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1568 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1569 if (is_ip_any(flow)) {
1570 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
1571 }
1572 break;
1573
1574 case OFPACT_SET_L4_DST_PORT:
33bf9176
BP
1575 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1576 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1577 if (is_ip_any(flow)) {
1578 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
1579 }
1580 break;
1581
1582 case OFPACT_RESUBMIT:
1583 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
1584 break;
1585
1586 case OFPACT_SET_TUNNEL:
33bf9176 1587 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
1588 break;
1589
1590 case OFPACT_SET_QUEUE:
1591 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
1592 break;
1593
1594 case OFPACT_POP_QUEUE:
33bf9176
BP
1595 memset(&wc->masks.skb_priority, 0xff,
1596 sizeof wc->masks.skb_priority);
9583bc14 1597
33bf9176 1598 flow->skb_priority = ctx->orig_skb_priority;
9583bc14
EJ
1599 break;
1600
1601 case OFPACT_REG_MOVE:
33bf9176 1602 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
9583bc14
EJ
1603 break;
1604
1605 case OFPACT_REG_LOAD:
33bf9176 1606 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow);
9583bc14
EJ
1607 break;
1608
1609 case OFPACT_STACK_PUSH:
33bf9176
BP
1610 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
1611 &ctx->stack);
9583bc14
EJ
1612 break;
1613
1614 case OFPACT_STACK_POP:
33bf9176 1615 nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, &ctx->stack);
9583bc14
EJ
1616 break;
1617
1618 case OFPACT_PUSH_MPLS:
9cfef3d0 1619 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
9583bc14
EJ
1620 break;
1621
1622 case OFPACT_POP_MPLS:
9cfef3d0 1623 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
1624 break;
1625
1626 case OFPACT_SET_MPLS_TTL:
9cfef3d0 1627 if (compose_set_mpls_ttl_action(ctx,
9583bc14
EJ
1628 ofpact_get_SET_MPLS_TTL(a)->ttl)) {
1629 goto out;
1630 }
1631 break;
1632
1633 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 1634 if (compose_dec_mpls_ttl_action(ctx)) {
9583bc14
EJ
1635 goto out;
1636 }
1637 break;
1638
1639 case OFPACT_DEC_TTL:
33bf9176 1640 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
9583bc14
EJ
1641 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
1642 goto out;
1643 }
1644 break;
1645
1646 case OFPACT_NOTE:
1647 /* Nothing to do. */
1648 break;
1649
1650 case OFPACT_MULTIPATH:
33bf9176 1651 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
9583bc14
EJ
1652 break;
1653
1654 case OFPACT_BUNDLE:
1655 ctx->ofproto->has_bundle_action = true;
1656 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
1657 break;
1658
1659 case OFPACT_OUTPUT_REG:
1660 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
1661 break;
1662
1663 case OFPACT_LEARN:
1664 xlate_learn_action(ctx, ofpact_get_LEARN(a));
1665 break;
1666
1667 case OFPACT_EXIT:
1668 ctx->exit = true;
1669 break;
1670
1671 case OFPACT_FIN_TIMEOUT:
33bf9176
BP
1672 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1673 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
1674 ctx->xout->has_fin_timeout = true;
1675 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
1676 break;
1677
1678 case OFPACT_CLEAR_ACTIONS:
1679 /* XXX
1680 * Nothing to do because writa-actions is not supported for now.
1681 * When writa-actions is supported, clear-actions also must
1682 * be supported at the same time.
1683 */
1684 break;
1685
1686 case OFPACT_WRITE_METADATA:
1687 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
1688 flow->metadata &= ~metadata->mask;
1689 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
1690 break;
1691
1692 case OFPACT_GOTO_TABLE: {
1693 /* It is assumed that goto-table is the last action. */
1694 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
1695 struct rule_dpif *rule;
1696
1697 ovs_assert(ctx->table_id < ogt->table_id);
1698
1699 ctx->table_id = ogt->table_id;
1700
1701 /* Look up a flow from the new table. */
33bf9176
BP
1702 rule = rule_dpif_lookup_in_table(ctx->ofproto, flow, wc,
1703 ctx->table_id);
9583bc14
EJ
1704
1705 tag_the_flow(ctx, rule);
1706
1707 rule = ctx_rule_hooks(ctx, rule, true);
1708
1709 if (rule) {
1710 if (ctx->rule) {
1711 ctx->rule->up.evictable = was_evictable;
1712 }
1713 ctx->rule = rule;
1714 was_evictable = rule->up.evictable;
1715 rule->up.evictable = false;
1716
1717 /* Tail recursion removal. */
1718 ofpacts = rule->up.ofpacts;
1719 ofpacts_len = rule->up.ofpacts_len;
1720 goto do_xlate_actions_again;
1721 }
1722 break;
1723 }
1724
1725 case OFPACT_SAMPLE:
1726 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
1727 break;
1728 }
1729 }
1730
1731out:
1732 if (ctx->rule) {
1733 ctx->rule->up.evictable = was_evictable;
1734 }
1735}
1736
1737void
1738xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
1739 const struct flow *flow, struct rule_dpif *rule,
1740 uint8_t tcp_flags, const struct ofpbuf *packet)
1741{
1742 xin->ofproto = ofproto;
1743 xin->flow = *flow;
1744 xin->packet = packet;
1745 xin->may_learn = packet != NULL;
1746 xin->rule = rule;
1747 xin->ofpacts = NULL;
1748 xin->ofpacts_len = 0;
1749 xin->tcp_flags = tcp_flags;
1750 xin->resubmit_hook = NULL;
1751 xin->report_hook = NULL;
1752 xin->resubmit_stats = NULL;
1753}
1754
1755void
1756xlate_out_uninit(struct xlate_out *xout)
1757{
1758 if (xout) {
1759 ofpbuf_uninit(&xout->odp_actions);
1760 }
1761}
1762
1763/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
1764 * into datapath actions, using 'ctx', and discards the datapath actions. */
1765void
1766xlate_actions_for_side_effects(struct xlate_in *xin)
1767{
1768 struct xlate_out xout;
1769
1770 xlate_actions(xin, &xout);
1771 xlate_out_uninit(&xout);
1772}
1773
1774static void
1775xlate_report(struct xlate_ctx *ctx, const char *s)
1776{
1777 if (ctx->xin->report_hook) {
4d0acc70 1778 ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
9583bc14
EJ
1779 }
1780}
1781
1782void
1783xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
1784{
1785 dst->wc = src->wc;
1786 dst->tags = src->tags;
1787 dst->slow = src->slow;
1788 dst->has_learn = src->has_learn;
1789 dst->has_normal = src->has_normal;
1790 dst->has_fin_timeout = src->has_fin_timeout;
1791 dst->nf_output_iface = src->nf_output_iface;
1792 dst->mirrors = src->mirrors;
1793
1794 ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
1795 sizeof dst->odp_actions_stub);
1796 ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
1797 src->odp_actions.size);
1798}
1799\f
1800
1801/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
1802 * into datapath actions in 'odp_actions', using 'ctx'. */
1803void
1804xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
1805{
1806 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
1807 * that in the future we always keep a copy of the original flow for
1808 * tracing purposes. */
1809 static bool hit_resubmit_limit;
1810
33bf9176
BP
1811 struct flow_wildcards *wc = &xout->wc;
1812 struct flow *flow = &xin->flow;
1813
9583bc14
EJ
1814 enum slow_path_reason special;
1815 const struct ofpact *ofpacts;
1816 struct ofport_dpif *in_port;
1817 struct flow orig_flow;
1818 struct xlate_ctx ctx;
1819 size_t ofpacts_len;
1820
1821 COVERAGE_INC(ofproto_dpif_xlate);
1822
1823 /* Flow initialization rules:
1824 * - 'base_flow' must match the kernel's view of the packet at the
1825 * time that action processing starts. 'flow' represents any
1826 * transformations we wish to make through actions.
1827 * - By default 'base_flow' and 'flow' are the same since the input
1828 * packet matches the output before any actions are applied.
1829 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
1830 * of the received packet as seen by the kernel. If we later output
1831 * to another device without any modifications this will cause us to
1832 * insert a new tag since the original one was stripped off by the
1833 * VLAN device.
1834 * - Tunnel metadata as received is retained in 'flow'. This allows
1835 * tunnel metadata matching also in later tables.
1836 * Since a kernel action for setting the tunnel metadata will only be
1837 * generated with actual tunnel output, changing the tunnel metadata
1838 * values in 'flow' (such as tun_id) will only have effect with a later
1839 * tunnel output action.
1840 * - Tunnel 'base_flow' is completely cleared since that is what the
1841 * kernel does. If we wish to maintain the original values an action
1842 * needs to be generated. */
1843
1844 ctx.xin = xin;
1845 ctx.xout = xout;
1846
1847 ctx.ofproto = xin->ofproto;
1848 ctx.rule = xin->rule;
1849
33bf9176 1850 ctx.base_flow = *flow;
9583bc14 1851 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
33bf9176 1852 ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
9583bc14 1853
33bf9176
BP
1854 flow_wildcards_init_catchall(wc);
1855 memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
9583bc14
EJ
1856
1857 if (tnl_port_should_receive(&ctx.xin->flow)) {
33bf9176 1858 memset(&wc->masks.tunnel, 0xff, sizeof wc->masks.tunnel);
9583bc14
EJ
1859 }
1860
1861 /* Disable most wildcarding for NetFlow. */
1862 if (xin->ofproto->netflow) {
33bf9176
BP
1863 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1864 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1865 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1866 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
1867 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1868 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1869 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1870 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1871 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
9583bc14
EJ
1872 }
1873
1874 ctx.xout->tags = 0;
1875 ctx.xout->slow = 0;
1876 ctx.xout->has_learn = false;
1877 ctx.xout->has_normal = false;
1878 ctx.xout->has_fin_timeout = false;
1879 ctx.xout->nf_output_iface = NF_OUT_DROP;
1880 ctx.xout->mirrors = 0;
1881
1882 ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
1883 sizeof ctx.xout->odp_actions_stub);
1884 ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
1885
1886 ctx.recurse = 0;
1887 ctx.max_resubmit_trigger = false;
33bf9176 1888 ctx.orig_skb_priority = flow->skb_priority;
9583bc14
EJ
1889 ctx.table_id = 0;
1890 ctx.exit = false;
1891
1892 if (xin->ofpacts) {
1893 ofpacts = xin->ofpacts;
1894 ofpacts_len = xin->ofpacts_len;
1895 } else if (xin->rule) {
1896 ofpacts = xin->rule->up.ofpacts;
1897 ofpacts_len = xin->rule->up.ofpacts_len;
1898 } else {
1899 NOT_REACHED();
1900 }
1901
1902 ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
1903
1904 if (ctx.ofproto->has_mirrors || hit_resubmit_limit) {
1905 /* Do this conditionally because the copy is expensive enough that it
1906 * shows up in profiles. */
33bf9176 1907 orig_flow = *flow;
9583bc14
EJ
1908 }
1909
33bf9176 1910 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
9583bc14
EJ
1911 switch (ctx.ofproto->up.frag_handling) {
1912 case OFPC_FRAG_NORMAL:
1913 /* We must pretend that transport ports are unavailable. */
33bf9176
BP
1914 flow->tp_src = ctx.base_flow.tp_src = htons(0);
1915 flow->tp_dst = ctx.base_flow.tp_dst = htons(0);
9583bc14
EJ
1916 break;
1917
1918 case OFPC_FRAG_DROP:
1919 return;
1920
1921 case OFPC_FRAG_REASM:
1922 NOT_REACHED();
1923
1924 case OFPC_FRAG_NX_MATCH:
1925 /* Nothing to do. */
1926 break;
1927
1928 case OFPC_INVALID_TTL_TO_CONTROLLER:
1929 NOT_REACHED();
1930 }
1931 }
1932
33bf9176
BP
1933 in_port = get_ofp_port(ctx.ofproto, flow->in_port);
1934 special = process_special(ctx.ofproto, flow, in_port, ctx.xin->packet);
9583bc14
EJ
1935 if (special) {
1936 ctx.xout->slow = special;
1937 } else {
1938 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
1939 size_t sample_actions_len;
1940 uint32_t local_odp_port;
1941
33bf9176
BP
1942 if (flow->in_port
1943 != vsp_realdev_to_vlandev(ctx.ofproto, flow->in_port,
1944 flow->vlan_tci)) {
9583bc14
EJ
1945 ctx.base_flow.vlan_tci = 0;
1946 }
1947
1948 add_sflow_action(&ctx);
1949 add_ipfix_action(&ctx);
1950 sample_actions_len = ctx.xout->odp_actions.size;
1951
1952 if (tunnel_ecn_ok(&ctx) && (!in_port || may_receive(in_port, &ctx))) {
1953 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
1954
1955 /* We've let OFPP_NORMAL and the learning action look at the
1956 * packet, so drop it now if forwarding is disabled. */
1957 if (in_port && !stp_forward_in_state(in_port->stp_state)) {
1958 ctx.xout->odp_actions.size = sample_actions_len;
1959 }
1960 }
1961
1962 if (ctx.max_resubmit_trigger && !ctx.xin->resubmit_hook) {
1963 if (!hit_resubmit_limit) {
1964 /* We didn't record the original flow. Make sure we do from
1965 * now on. */
1966 hit_resubmit_limit = true;
1967 } else if (!VLOG_DROP_ERR(&trace_rl)) {
1968 struct ds ds = DS_EMPTY_INITIALIZER;
1969
1970 ofproto_trace(ctx.ofproto, &orig_flow, ctx.xin->packet, &ds);
1971 VLOG_ERR("Trace triggered by excessive resubmit "
1972 "recursion:\n%s", ds_cstr(&ds));
1973 ds_destroy(&ds);
1974 }
1975 }
1976
1977 local_odp_port = ofp_port_to_odp_port(ctx.ofproto, OFPP_LOCAL);
33bf9176 1978 if (!connmgr_must_output_local(ctx.ofproto->up.connmgr, flow,
9583bc14
EJ
1979 local_odp_port,
1980 ctx.xout->odp_actions.data,
1981 ctx.xout->odp_actions.size)) {
1982 compose_output_action(&ctx, OFPP_LOCAL);
1983 }
1984 if (ctx.ofproto->has_mirrors) {
1985 add_mirror_actions(&ctx, &orig_flow);
1986 }
1987 fix_sflow_action(&ctx);
1988 }
1989
1990 ofpbuf_uninit(&ctx.stack);
1991
1992 /* Clear the metadata and register wildcard masks, because we won't
1993 * use non-header fields as part of the cache. */
33bf9176
BP
1994 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
1995 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
9583bc14 1996}