]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto-dpif-xlate.c
ofproto: Remove soon-to-be-invalid optimizations.
[mirror_ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
9583bc14
EJ
1/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
8449c4d6
EJ
19#include <errno.h>
20
db7d4e46 21#include "bfd.h"
9583bc14
EJ
22#include "bitmap.h"
23#include "bond.h"
24#include "bundle.h"
25#include "byte-order.h"
db7d4e46 26#include "cfm.h"
9583bc14
EJ
27#include "connmgr.h"
28#include "coverage.h"
29#include "dpif.h"
30#include "dynamic-string.h"
f7f1ea29 31#include "in-band.h"
db7d4e46 32#include "lacp.h"
9583bc14 33#include "learn.h"
46c88433 34#include "list.h"
9583bc14
EJ
35#include "mac-learning.h"
36#include "meta-flow.h"
37#include "multipath.h"
38#include "netdev-vport.h"
39#include "netlink.h"
40#include "nx-match.h"
41#include "odp-execute.h"
42#include "ofp-actions.h"
43#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 44#include "ofproto/ofproto-dpif-mirror.h"
9583bc14
EJ
45#include "ofproto/ofproto-dpif-sflow.h"
46#include "ofproto/ofproto-dpif.h"
47#include "tunnel.h"
48#include "vlog.h"
49
46c88433 50COVERAGE_DEFINE(xlate_actions);
9583bc14
EJ
51
52VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
53
8a553e9a
EJ
54/* Maximum depth of flow table recursion (due to resubmit actions) in a
55 * flow translation. */
56#define MAX_RESUBMIT_RECURSION 64
57
dc24a00f
EJ
58struct ovs_rwlock xlate_rwlock = OVS_RWLOCK_INITIALIZER;
59
46c88433
EJ
60struct xbridge {
61 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
62 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
63
64 struct list xbundles; /* Owned xbundles. */
65 struct hmap xports; /* Indexed by ofp_port. */
66
67 char *name; /* Name used in log messages. */
89a8a7f0 68 struct dpif *dpif; /* Datapath interface. */
46c88433
EJ
69 struct mac_learning *ml; /* Mac learning handle. */
70 struct mbridge *mbridge; /* Mirroring. */
71 struct dpif_sflow *sflow; /* SFlow handle, or null. */
72 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
9d189a50 73 struct stp *stp; /* STP or null if disabled. */
46c88433 74
ec89fc6f
EJ
75 /* Special rules installed by ofproto-dpif. */
76 struct rule_dpif *miss_rule;
77 struct rule_dpif *no_packet_in_rule;
78
46c88433 79 enum ofp_config_flags frag; /* Fragmentation handling. */
46c88433
EJ
80 bool has_netflow; /* Bridge runs netflow? */
81 bool has_in_band; /* Bridge has in band control? */
82 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
83};
84
85struct xbundle {
86 struct hmap_node hmap_node; /* In global 'xbundles' map. */
87 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
88
89 struct list list_node; /* In parent 'xbridges' list. */
90 struct xbridge *xbridge; /* Parent xbridge. */
91
92 struct list xports; /* Contains "struct xport"s. */
93
94 char *name; /* Name used in log messages. */
95 struct bond *bond; /* Nonnull iff more than one port. */
96 struct lacp *lacp; /* LACP handle or null. */
97
98 enum port_vlan_mode vlan_mode; /* VLAN mode. */
99 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
100 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
101 * NULL if all VLANs are trunked. */
102 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
103 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
104};
105
106struct xport {
107 struct hmap_node hmap_node; /* Node in global 'xports' map. */
108 struct ofport_dpif *ofport; /* Key in global 'xports map. */
109
110 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
111 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
112
113 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
114
115 struct list bundle_node; /* In parent xbundle (if it exists). */
116 struct xbundle *xbundle; /* Parent xbundle or null. */
117
118 struct netdev *netdev; /* 'ofport''s netdev. */
119
120 struct xbridge *xbridge; /* Parent bridge. */
121 struct xport *peer; /* Patch port peer or null. */
122
123 enum ofputil_port_config config; /* OpenFlow port configuration. */
92cf817b 124 int stp_port_no; /* STP port number or -1 if not in use. */
46c88433 125
55954f6e
EJ
126 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
127
46c88433
EJ
128 bool may_enable; /* May be enabled in bonds. */
129 bool is_tunnel; /* Is a tunnel port. */
130
131 struct cfm *cfm; /* CFM handle or null. */
132 struct bfd *bfd; /* BFD handle or null. */
133};
134
4d0acc70
EJ
135struct xlate_ctx {
136 struct xlate_in *xin;
137 struct xlate_out *xout;
138
46c88433 139 const struct xbridge *xbridge;
4d0acc70
EJ
140
141 /* Flow at the last commit. */
142 struct flow base_flow;
143
144 /* Tunnel IP destination address as received. This is stored separately
145 * as the base_flow.tunnel is cleared on init to reflect the datapath
146 * behavior. Used to make sure not to send tunneled output to ourselves,
147 * which might lead to an infinite loop. This could happen easily
148 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
149 * actually set the tun_dst field. */
150 ovs_be32 orig_tunnel_ip_dst;
151
152 /* Stack for the push and pop actions. Each stack element is of type
153 * "union mf_subvalue". */
154 union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
155 struct ofpbuf stack;
156
157 /* The rule that we are currently translating, or NULL. */
158 struct rule_dpif *rule;
159
160 int recurse; /* Recursion level, via xlate_table_action. */
4d0acc70
EJ
161 uint32_t orig_skb_priority; /* Priority when packet arrived. */
162 uint8_t table_id; /* OpenFlow table ID where flow was found. */
163 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 164 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
4d0acc70
EJ
165 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
166 bool exit; /* No further actions should be processed. */
167};
168
9583bc14
EJ
169/* A controller may use OFPP_NONE as the ingress port to indicate that
170 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
171 * when an input bundle is needed for validation (e.g., mirroring or
172 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
3815d6c2
LS
173 * any 'port' structs, so care must be taken when dealing with it.
174 * The bundle's name and vlan mode are initialized in lookup_input_bundle() */
175static struct xbundle ofpp_none_bundle;
9583bc14 176
55954f6e
EJ
177/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
178 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
179 * traffic egressing the 'ofport' with that priority should be marked with. */
180struct skb_priority_to_dscp {
181 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
182 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
183
184 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
185};
186
46c88433
EJ
187static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
188static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
189static struct hmap xports = HMAP_INITIALIZER(&xports);
190
191static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14
EJ
192static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
193 struct xlate_ctx *);
194static void xlate_normal(struct xlate_ctx *);
195static void xlate_report(struct xlate_ctx *, const char *);
4e022ec0 196static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
9583bc14 197 uint8_t table_id, bool may_packet_in);
46c88433
EJ
198static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
199static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
200static void output_normal(struct xlate_ctx *, const struct xbundle *,
9583bc14 201 uint16_t vlan);
4e022ec0 202static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
9583bc14 203
46c88433
EJ
204static struct xbridge *xbridge_lookup(const struct ofproto_dpif *);
205static struct xbundle *xbundle_lookup(const struct ofbundle *);
5e6af486 206static struct xport *xport_lookup(const struct ofport_dpif *);
46c88433 207static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
55954f6e
EJ
208static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
209 uint32_t skb_priority);
210static void clear_skb_priorities(struct xport *);
211static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
212 uint8_t *dscp);
46c88433
EJ
213
214void
215xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
ec89fc6f
EJ
216 struct dpif *dpif, struct rule_dpif *miss_rule,
217 struct rule_dpif *no_packet_in_rule,
218 const struct mac_learning *ml, struct stp *stp,
219 const struct mbridge *mbridge,
46c88433
EJ
220 const struct dpif_sflow *sflow,
221 const struct dpif_ipfix *ipfix, enum ofp_config_flags frag,
9d189a50 222 bool forward_bpdu, bool has_in_band, bool has_netflow)
46c88433
EJ
223{
224 struct xbridge *xbridge = xbridge_lookup(ofproto);
225
226 if (!xbridge) {
227 xbridge = xzalloc(sizeof *xbridge);
228 xbridge->ofproto = ofproto;
229
230 hmap_insert(&xbridges, &xbridge->hmap_node, hash_pointer(ofproto, 0));
231 hmap_init(&xbridge->xports);
232 list_init(&xbridge->xbundles);
233 }
234
235 if (xbridge->ml != ml) {
236 mac_learning_unref(xbridge->ml);
237 xbridge->ml = mac_learning_ref(ml);
238 }
239
240 if (xbridge->mbridge != mbridge) {
241 mbridge_unref(xbridge->mbridge);
242 xbridge->mbridge = mbridge_ref(mbridge);
243 }
244
245 if (xbridge->sflow != sflow) {
246 dpif_sflow_unref(xbridge->sflow);
247 xbridge->sflow = dpif_sflow_ref(sflow);
248 }
249
250 if (xbridge->ipfix != ipfix) {
251 dpif_ipfix_unref(xbridge->ipfix);
252 xbridge->ipfix = dpif_ipfix_ref(ipfix);
253 }
254
9d189a50
EJ
255 if (xbridge->stp != stp) {
256 stp_unref(xbridge->stp);
257 xbridge->stp = stp_ref(stp);
258 }
259
46c88433
EJ
260 free(xbridge->name);
261 xbridge->name = xstrdup(name);
262
89a8a7f0 263 xbridge->dpif = dpif;
46c88433
EJ
264 xbridge->forward_bpdu = forward_bpdu;
265 xbridge->has_in_band = has_in_band;
266 xbridge->has_netflow = has_netflow;
46c88433 267 xbridge->frag = frag;
ec89fc6f
EJ
268 xbridge->miss_rule = miss_rule;
269 xbridge->no_packet_in_rule = no_packet_in_rule;
46c88433
EJ
270}
271
272void
273xlate_remove_ofproto(struct ofproto_dpif *ofproto)
274{
275 struct xbridge *xbridge = xbridge_lookup(ofproto);
276 struct xbundle *xbundle, *next_xbundle;
277 struct xport *xport, *next_xport;
278
279 if (!xbridge) {
280 return;
281 }
282
283 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
284 xlate_ofport_remove(xport->ofport);
285 }
286
287 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
288 xlate_bundle_remove(xbundle->ofbundle);
289 }
290
291 hmap_remove(&xbridges, &xbridge->hmap_node);
795cc5c1
EJ
292 mac_learning_unref(xbridge->ml);
293 mbridge_unref(xbridge->mbridge);
294 dpif_sflow_unref(xbridge->sflow);
295 dpif_ipfix_unref(xbridge->ipfix);
296 stp_unref(xbridge->stp);
297 hmap_destroy(&xbridge->xports);
46c88433
EJ
298 free(xbridge->name);
299 free(xbridge);
300}
301
302void
303xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
304 const char *name, enum port_vlan_mode vlan_mode, int vlan,
305 unsigned long *trunks, bool use_priority_tags,
306 const struct bond *bond, const struct lacp *lacp,
307 bool floodable)
308{
309 struct xbundle *xbundle = xbundle_lookup(ofbundle);
310
311 if (!xbundle) {
312 xbundle = xzalloc(sizeof *xbundle);
313 xbundle->ofbundle = ofbundle;
314 xbundle->xbridge = xbridge_lookup(ofproto);
315
316 hmap_insert(&xbundles, &xbundle->hmap_node, hash_pointer(ofbundle, 0));
317 list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
318 list_init(&xbundle->xports);
319 }
320
321 ovs_assert(xbundle->xbridge);
322
323 free(xbundle->name);
324 xbundle->name = xstrdup(name);
325
326 xbundle->vlan_mode = vlan_mode;
327 xbundle->vlan = vlan;
328 xbundle->trunks = trunks;
329 xbundle->use_priority_tags = use_priority_tags;
330 xbundle->floodable = floodable;
331
332 if (xbundle->bond != bond) {
333 bond_unref(xbundle->bond);
334 xbundle->bond = bond_ref(bond);
335 }
336
337 if (xbundle->lacp != lacp) {
338 lacp_unref(xbundle->lacp);
339 xbundle->lacp = lacp_ref(lacp);
340 }
341}
342
343void
344xlate_bundle_remove(struct ofbundle *ofbundle)
345{
346 struct xbundle *xbundle = xbundle_lookup(ofbundle);
347 struct xport *xport, *next;
348
349 if (!xbundle) {
350 return;
351 }
352
353 LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
354 list_remove(&xport->bundle_node);
355 xport->xbundle = NULL;
356 }
357
358 hmap_remove(&xbundles, &xbundle->hmap_node);
359 list_remove(&xbundle->list_node);
360 bond_unref(xbundle->bond);
361 lacp_unref(xbundle->lacp);
362 free(xbundle->name);
363 free(xbundle);
364}
365
366void
367xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
368 struct ofport_dpif *ofport, ofp_port_t ofp_port,
369 odp_port_t odp_port, const struct netdev *netdev,
370 const struct cfm *cfm, const struct bfd *bfd,
9d189a50 371 struct ofport_dpif *peer, int stp_port_no,
55954f6e 372 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
9d189a50
EJ
373 enum ofputil_port_config config, bool is_tunnel,
374 bool may_enable)
46c88433
EJ
375{
376 struct xport *xport = xport_lookup(ofport);
55954f6e 377 size_t i;
46c88433
EJ
378
379 if (!xport) {
380 xport = xzalloc(sizeof *xport);
381 xport->ofport = ofport;
382 xport->xbridge = xbridge_lookup(ofproto);
383 xport->ofp_port = ofp_port;
384
55954f6e 385 hmap_init(&xport->skb_priorities);
46c88433
EJ
386 hmap_insert(&xports, &xport->hmap_node, hash_pointer(ofport, 0));
387 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
388 hash_ofp_port(xport->ofp_port));
389 }
390
391 ovs_assert(xport->ofp_port == ofp_port);
392
393 xport->config = config;
9d189a50 394 xport->stp_port_no = stp_port_no;
46c88433
EJ
395 xport->is_tunnel = is_tunnel;
396 xport->may_enable = may_enable;
397 xport->odp_port = odp_port;
398
399 if (xport->netdev != netdev) {
400 netdev_close(xport->netdev);
401 xport->netdev = netdev_ref(netdev);
402 }
403
404 if (xport->cfm != cfm) {
405 cfm_unref(xport->cfm);
406 xport->cfm = cfm_ref(cfm);
407 }
408
409 if (xport->bfd != bfd) {
410 bfd_unref(xport->bfd);
411 xport->bfd = bfd_ref(bfd);
412 }
413
414 if (xport->peer) {
415 xport->peer->peer = NULL;
416 }
5e6af486 417 xport->peer = xport_lookup(peer);
46c88433
EJ
418 if (xport->peer) {
419 xport->peer->peer = xport;
420 }
421
422 if (xport->xbundle) {
423 list_remove(&xport->bundle_node);
424 }
5e6af486 425 xport->xbundle = xbundle_lookup(ofbundle);
46c88433
EJ
426 if (xport->xbundle) {
427 list_insert(&xport->xbundle->xports, &xport->bundle_node);
428 }
55954f6e
EJ
429
430 clear_skb_priorities(xport);
431 for (i = 0; i < n_qdscp; i++) {
432 struct skb_priority_to_dscp *pdscp;
433 uint32_t skb_priority;
434
89a8a7f0
EJ
435 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
436 &skb_priority)) {
55954f6e
EJ
437 continue;
438 }
439
440 pdscp = xmalloc(sizeof *pdscp);
441 pdscp->skb_priority = skb_priority;
442 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
443 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
444 hash_int(pdscp->skb_priority, 0));
445 }
46c88433
EJ
446}
447
448void
449xlate_ofport_remove(struct ofport_dpif *ofport)
450{
451 struct xport *xport = xport_lookup(ofport);
452
453 if (!xport) {
454 return;
455 }
456
457 if (xport->peer) {
458 xport->peer->peer = NULL;
459 xport->peer = NULL;
460 }
461
e621a12d
EJ
462 if (xport->xbundle) {
463 list_remove(&xport->bundle_node);
464 }
465
55954f6e
EJ
466 clear_skb_priorities(xport);
467 hmap_destroy(&xport->skb_priorities);
468
46c88433
EJ
469 hmap_remove(&xports, &xport->hmap_node);
470 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
471
472 netdev_close(xport->netdev);
473 cfm_unref(xport->cfm);
474 bfd_unref(xport->bfd);
475 free(xport);
476}
477
8449c4d6
EJ
478/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
479 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
480 * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
481 * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
482 * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
483 * 'packet' ingressed.
484 *
485 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
486 * 'flow''s in_port to OFPP_NONE.
487 *
488 * This function does post-processing on data returned from
489 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
490 * of the upcall processing logic. In particular, if the extracted in_port is
491 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
492 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
493 * a VLAN header onto 'packet' (if it is nonnull).
494 *
495 * Similarly, this function also includes some logic to help with tunnels. It
496 * may modify 'flow' as necessary to make the tunneling implementation
497 * transparent to the upcall processing logic.
498 *
499 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
500 * or some other positive errno if there are other problems. */
501int
502xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
503 const struct nlattr *key, size_t key_len,
504 struct flow *flow, enum odp_key_fitness *fitnessp,
505 struct ofproto_dpif **ofproto, odp_port_t *odp_in_port)
506{
507 enum odp_key_fitness fitness;
508 const struct xport *xport;
509 int error = ENODEV;
510
dc24a00f 511 ovs_rwlock_rdlock(&xlate_rwlock);
8449c4d6
EJ
512 fitness = odp_flow_key_to_flow(key, key_len, flow);
513 if (fitness == ODP_FIT_ERROR) {
514 error = EINVAL;
515 goto exit;
516 }
517
518 if (odp_in_port) {
519 *odp_in_port = flow->in_port.odp_port;
520 }
521
522 xport = xport_lookup(tnl_port_should_receive(flow)
523 ? tnl_port_receive(flow)
524 : odp_port_to_ofport(backer, flow->in_port.odp_port));
525
526 flow->in_port.ofp_port = xport ? xport->ofp_port : OFPP_NONE;
527 if (!xport) {
528 goto exit;
529 }
530
531 if (vsp_adjust_flow(xport->xbridge->ofproto, flow)) {
532 if (packet) {
533 /* Make the packet resemble the flow, so that it gets sent to
534 * an OpenFlow controller properly, so that it looks correct
535 * for sFlow, and so that flow_extract() will get the correct
536 * vlan_tci if it is called on 'packet'.
537 *
538 * The allocated space inside 'packet' probably also contains
539 * 'key', that is, both 'packet' and 'key' are probably part of
540 * a struct dpif_upcall (see the large comment on that
541 * structure definition), so pushing data on 'packet' is in
542 * general not a good idea since it could overwrite 'key' or
543 * free it as a side effect. However, it's OK in this special
544 * case because we know that 'packet' is inside a Netlink
545 * attribute: pushing 4 bytes will just overwrite the 4-byte
546 * "struct nlattr", which is fine since we don't need that
547 * header anymore. */
548 eth_push_vlan(packet, flow->vlan_tci);
549 }
550 /* We can't reproduce 'key' from 'flow'. */
551 fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
552 }
553 error = 0;
554
555 if (ofproto) {
556 *ofproto = xport->xbridge->ofproto;
557 }
558
559exit:
560 if (fitnessp) {
561 *fitnessp = fitness;
562 }
dc24a00f 563 ovs_rwlock_unlock(&xlate_rwlock);
8449c4d6
EJ
564 return error;
565}
566
46c88433
EJ
567static struct xbridge *
568xbridge_lookup(const struct ofproto_dpif *ofproto)
569{
570 struct xbridge *xbridge;
571
5e6af486
EJ
572 if (!ofproto) {
573 return NULL;
574 }
575
46c88433
EJ
576 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
577 &xbridges) {
578 if (xbridge->ofproto == ofproto) {
579 return xbridge;
580 }
581 }
582 return NULL;
583}
584
585static struct xbundle *
586xbundle_lookup(const struct ofbundle *ofbundle)
587{
588 struct xbundle *xbundle;
589
5e6af486
EJ
590 if (!ofbundle) {
591 return NULL;
592 }
593
46c88433
EJ
594 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
595 &xbundles) {
596 if (xbundle->ofbundle == ofbundle) {
597 return xbundle;
598 }
599 }
600 return NULL;
601}
602
603static struct xport *
5e6af486 604xport_lookup(const struct ofport_dpif *ofport)
46c88433
EJ
605{
606 struct xport *xport;
607
5e6af486
EJ
608 if (!ofport) {
609 return NULL;
610 }
611
46c88433
EJ
612 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
613 &xports) {
614 if (xport->ofport == ofport) {
615 return xport;
616 }
617 }
618 return NULL;
619}
620
40085e56
EJ
621static struct stp_port *
622xport_get_stp_port(const struct xport *xport)
623{
92cf817b 624 return xport->xbridge->stp && xport->stp_port_no != -1
40085e56
EJ
625 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
626 : NULL;
627}
9d189a50
EJ
628
629static enum stp_state
630xport_stp_learn_state(const struct xport *xport)
631{
40085e56
EJ
632 struct stp_port *sp = xport_get_stp_port(xport);
633 return stp_learn_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
9d189a50
EJ
634}
635
636static bool
637xport_stp_forward_state(const struct xport *xport)
638{
40085e56
EJ
639 struct stp_port *sp = xport_get_stp_port(xport);
640 return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
9d189a50
EJ
641}
642
643/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
644 * were used to make the determination.*/
645static bool
646stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
647{
648 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
649 return eth_addr_equals(flow->dl_dst, eth_addr_stp);
650}
651
652static void
653stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
654{
40085e56 655 struct stp_port *sp = xport_get_stp_port(xport);
9d189a50
EJ
656 struct ofpbuf payload = *packet;
657 struct eth_header *eth = payload.data;
9d189a50
EJ
658
659 /* Sink packets on ports that have STP disabled when the bridge has
660 * STP enabled. */
661 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
662 return;
663 }
664
665 /* Trim off padding on payload. */
666 if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
667 payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
668 }
669
670 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
671 stp_received_bpdu(sp, payload.data, payload.size);
672 }
673}
674
46c88433
EJ
675static struct xport *
676get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
677{
678 struct xport *xport;
679
680 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
681 &xbridge->xports) {
682 if (xport->ofp_port == ofp_port) {
683 return xport;
684 }
685 }
686 return NULL;
687}
688
689static odp_port_t
690ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
691{
692 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
693 return xport ? xport->odp_port : ODPP_NONE;
694}
695
9583bc14 696static bool
46c88433 697xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
698{
699 return (bundle->vlan_mode != PORT_VLAN_ACCESS
700 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
701}
702
703static bool
46c88433
EJ
704xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
705{
706 return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
707}
708
709static mirror_mask_t
710xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
711{
712 return xbundle != &ofpp_none_bundle
713 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
714 : 0;
715}
716
717static mirror_mask_t
718xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 719{
46c88433
EJ
720 return xbundle != &ofpp_none_bundle
721 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
722 : 0;
9583bc14
EJ
723}
724
46c88433
EJ
725static mirror_mask_t
726xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 727{
46c88433
EJ
728 return xbundle != &ofpp_none_bundle
729 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
730 : 0;
731}
732
733static struct xbundle *
734lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
735 bool warn, struct xport **in_xportp)
736{
737 struct xport *xport;
9583bc14
EJ
738
739 /* Find the port and bundle for the received packet. */
46c88433
EJ
740 xport = get_ofp_port(xbridge, in_port);
741 if (in_xportp) {
742 *in_xportp = xport;
9583bc14 743 }
46c88433
EJ
744 if (xport && xport->xbundle) {
745 return xport->xbundle;
9583bc14
EJ
746 }
747
748 /* Special-case OFPP_NONE, which a controller may use as the ingress
749 * port for traffic that it is sourcing. */
750 if (in_port == OFPP_NONE) {
3815d6c2
LS
751 ofpp_none_bundle.name = "OFPP_NONE";
752 ofpp_none_bundle.vlan_mode = PORT_VLAN_TRUNK;
9583bc14
EJ
753 return &ofpp_none_bundle;
754 }
755
756 /* Odd. A few possible reasons here:
757 *
758 * - We deleted a port but there are still a few packets queued up
759 * from it.
760 *
761 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
762 * we don't know about.
763 *
764 * - The ofproto client didn't configure the port as part of a bundle.
765 * This is particularly likely to happen if a packet was received on the
766 * port after it was created, but before the client had a chance to
767 * configure its bundle.
768 */
769 if (warn) {
770 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
771
772 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
46c88433 773 "port %"PRIu16, xbridge->name, in_port);
9583bc14
EJ
774 }
775 return NULL;
776}
777
778static void
779add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
780{
46c88433 781 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 782 mirror_mask_t mirrors;
46c88433 783 struct xbundle *in_xbundle;
9583bc14
EJ
784 uint16_t vlan;
785 uint16_t vid;
cdf5d3a5
EJ
786
787 mirrors = ctx->xout->mirrors;
788 ctx->xout->mirrors = 0;
9583bc14 789
46c88433
EJ
790 in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
791 ctx->xin->packet != NULL, NULL);
792 if (!in_xbundle) {
9583bc14
EJ
793 return;
794 }
46c88433 795 mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
9583bc14
EJ
796
797 /* Drop frames on bundles reserved for mirroring. */
46c88433 798 if (xbundle_mirror_out(xbridge, in_xbundle)) {
9583bc14
EJ
799 if (ctx->xin->packet != NULL) {
800 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
801 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
802 "%s, which is reserved exclusively for mirroring",
46c88433 803 ctx->xbridge->name, in_xbundle->name);
9583bc14 804 }
aaa0fbae 805 ofpbuf_clear(&ctx->xout->odp_actions);
9583bc14
EJ
806 return;
807 }
808
809 /* Check VLAN. */
810 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
46c88433 811 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
9583bc14
EJ
812 return;
813 }
46c88433 814 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14 815
9583bc14
EJ
816 if (!mirrors) {
817 return;
818 }
819
820 /* Restore the original packet before adding the mirror actions. */
821 ctx->xin->flow = *orig_flow;
822
823 while (mirrors) {
ec7ceaed
EJ
824 mirror_mask_t dup_mirrors;
825 struct ofbundle *out;
826 unsigned long *vlans;
827 bool vlan_mirrored;
828 bool has_mirror;
829 int out_vlan;
830
46c88433 831 has_mirror = mirror_get(xbridge->mbridge, mirror_mask_ffs(mirrors) - 1,
ec7ceaed
EJ
832 &vlans, &dup_mirrors, &out, &out_vlan);
833 ovs_assert(has_mirror);
834
835 if (vlans) {
9583bc14
EJ
836 ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
837 }
ec7ceaed
EJ
838 vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
839 free(vlans);
9583bc14 840
ec7ceaed 841 if (!vlan_mirrored) {
9583bc14
EJ
842 mirrors = zero_rightmost_1bit(mirrors);
843 continue;
844 }
845
ec7ceaed
EJ
846 mirrors &= ~dup_mirrors;
847 ctx->xout->mirrors |= dup_mirrors;
848 if (out) {
46c88433
EJ
849 struct xbundle *out_xbundle = xbundle_lookup(out);
850 if (out_xbundle) {
851 output_normal(ctx, out_xbundle, vlan);
852 }
ec7ceaed 853 } else if (vlan != out_vlan
9583bc14 854 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
46c88433 855 struct xbundle *xbundle;
9583bc14 856
46c88433
EJ
857 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
858 if (xbundle_includes_vlan(xbundle, out_vlan)
859 && !xbundle_mirror_out(xbridge, xbundle)) {
860 output_normal(ctx, xbundle, out_vlan);
9583bc14
EJ
861 }
862 }
863 }
864 }
865}
866
867/* Given 'vid', the VID obtained from the 802.1Q header that was received as
46c88433 868 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
9583bc14
EJ
869 * the bundle on which the packet was received, returns the VLAN to which the
870 * packet belongs.
871 *
872 * Both 'vid' and the return value are in the range 0...4095. */
873static uint16_t
46c88433 874input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
9583bc14 875{
46c88433 876 switch (in_xbundle->vlan_mode) {
9583bc14 877 case PORT_VLAN_ACCESS:
46c88433 878 return in_xbundle->vlan;
9583bc14
EJ
879 break;
880
881 case PORT_VLAN_TRUNK:
882 return vid;
883
884 case PORT_VLAN_NATIVE_UNTAGGED:
885 case PORT_VLAN_NATIVE_TAGGED:
46c88433 886 return vid ? vid : in_xbundle->vlan;
9583bc14
EJ
887
888 default:
889 NOT_REACHED();
890 }
891}
892
46c88433 893/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
9583bc14
EJ
894 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
895 * a warning.
896 *
897 * 'vid' should be the VID obtained from the 802.1Q header that was received as
898 * part of a packet (specify 0 if there was no 802.1Q header), in the range
899 * 0...4095. */
900static bool
46c88433 901input_vid_is_valid(uint16_t vid, struct xbundle *in_xbundle, bool warn)
9583bc14
EJ
902{
903 /* Allow any VID on the OFPP_NONE port. */
46c88433 904 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
905 return true;
906 }
907
46c88433 908 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
909 case PORT_VLAN_ACCESS:
910 if (vid) {
911 if (warn) {
912 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
46c88433 913 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" tagged "
9583bc14 914 "packet received on port %s configured as VLAN "
46c88433
EJ
915 "%"PRIu16" access port", vid, in_xbundle->name,
916 in_xbundle->vlan);
9583bc14
EJ
917 }
918 return false;
919 }
920 return true;
921
922 case PORT_VLAN_NATIVE_UNTAGGED:
923 case PORT_VLAN_NATIVE_TAGGED:
924 if (!vid) {
925 /* Port must always carry its native VLAN. */
926 return true;
927 }
928 /* Fall through. */
929 case PORT_VLAN_TRUNK:
46c88433 930 if (!xbundle_includes_vlan(in_xbundle, vid)) {
9583bc14
EJ
931 if (warn) {
932 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
46c88433 933 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" packet "
9583bc14 934 "received on port %s not configured for trunking "
46c88433 935 "VLAN %"PRIu16, vid, in_xbundle->name, vid);
9583bc14
EJ
936 }
937 return false;
938 }
939 return true;
940
941 default:
942 NOT_REACHED();
943 }
944
945}
946
947/* Given 'vlan', the VLAN that a packet belongs to, and
46c88433 948 * 'out_xbundle', a bundle on which the packet is to be output, returns the VID
9583bc14
EJ
949 * that should be included in the 802.1Q header. (If the return value is 0,
950 * then the 802.1Q header should only be included in the packet if there is a
951 * nonzero PCP.)
952 *
953 * Both 'vlan' and the return value are in the range 0...4095. */
954static uint16_t
46c88433 955output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
9583bc14 956{
46c88433 957 switch (out_xbundle->vlan_mode) {
9583bc14
EJ
958 case PORT_VLAN_ACCESS:
959 return 0;
960
961 case PORT_VLAN_TRUNK:
962 case PORT_VLAN_NATIVE_TAGGED:
963 return vlan;
964
965 case PORT_VLAN_NATIVE_UNTAGGED:
46c88433 966 return vlan == out_xbundle->vlan ? 0 : vlan;
9583bc14
EJ
967
968 default:
969 NOT_REACHED();
970 }
971}
972
973static void
46c88433 974output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
9583bc14
EJ
975 uint16_t vlan)
976{
33bf9176 977 ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
9583bc14
EJ
978 uint16_t vid;
979 ovs_be16 tci, old_tci;
46c88433 980 struct xport *xport;
9583bc14 981
46c88433
EJ
982 vid = output_vlan_to_vid(out_xbundle, vlan);
983 if (list_is_empty(&out_xbundle->xports)) {
984 /* Partially configured bundle with no slaves. Drop the packet. */
985 return;
986 } else if (!out_xbundle->bond) {
987 xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
988 bundle_node);
9583bc14 989 } else {
46c88433
EJ
990 struct ofport_dpif *ofport;
991
992 ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
4a1b8f30 993 &ctx->xout->wc, vid);
5e6af486 994 xport = xport_lookup(ofport);
46c88433
EJ
995
996 if (!xport) {
9583bc14
EJ
997 /* No slaves enabled, so drop packet. */
998 return;
999 }
1000 }
1001
33bf9176 1002 old_tci = *flow_tci;
9583bc14 1003 tci = htons(vid);
46c88433 1004 if (tci || out_xbundle->use_priority_tags) {
33bf9176 1005 tci |= *flow_tci & htons(VLAN_PCP_MASK);
9583bc14
EJ
1006 if (tci) {
1007 tci |= htons(VLAN_CFI);
1008 }
1009 }
33bf9176 1010 *flow_tci = tci;
9583bc14 1011
46c88433 1012 compose_output_action(ctx, xport->ofp_port);
33bf9176 1013 *flow_tci = old_tci;
9583bc14
EJ
1014}
1015
1016/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
1017 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
1018 * indicate this; newer upstream kernels use gratuitous ARP requests. */
1019static bool
1020is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
1021{
1022 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
1023 return false;
1024 }
1025
1026 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1027 if (!eth_addr_is_broadcast(flow->dl_dst)) {
1028 return false;
1029 }
1030
1031 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1032 if (flow->nw_proto == ARP_OP_REPLY) {
1033 return true;
1034 } else if (flow->nw_proto == ARP_OP_REQUEST) {
1035 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1036 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1037
1038 return flow->nw_src == flow->nw_dst;
1039 } else {
1040 return false;
1041 }
1042}
1043
ee047520
BP
1044/* Checks whether a MAC learning update is necessary for MAC learning table
1045 * 'ml' given that a packet matching 'flow' was received on 'in_xbundle' in
1046 * 'vlan'.
1047 *
1048 * Most packets processed through the MAC learning table do not actually
1049 * change it in any way. This function requires only a read lock on the MAC
1050 * learning table, so it is much cheaper in this common case.
1051 *
1052 * Keep the code here synchronized with that in update_learning_table__()
1053 * below. */
1054static bool
1055is_mac_learning_update_needed(const struct mac_learning *ml,
1056 const struct flow *flow,
1057 struct flow_wildcards *wc,
1058 int vlan, struct xbundle *in_xbundle)
1059 OVS_REQ_RDLOCK(ml->rwlock)
9583bc14
EJ
1060{
1061 struct mac_entry *mac;
1062
ee047520
BP
1063 if (!mac_learning_may_learn(ml, flow->dl_src, vlan)) {
1064 return false;
1065 }
1066
1067 mac = mac_learning_lookup(ml, flow->dl_src, vlan);
1068 if (!mac || mac_entry_age(ml, mac)) {
1069 return true;
9583bc14
EJ
1070 }
1071
ee047520
BP
1072 if (is_gratuitous_arp(flow, wc)) {
1073 /* We don't want to learn from gratuitous ARP packets that are
1074 * reflected back over bond slaves so we lock the learning table. */
1075 if (!in_xbundle->bond) {
1076 return true;
1077 } else if (mac_entry_is_grat_arp_locked(mac)) {
1078 return false;
1079 }
1080 }
1081
1082 return mac->port.p != in_xbundle->ofbundle;
1083}
1084
1085
1086/* Updates MAC learning table 'ml' given that a packet matching 'flow' was
1087 * received on 'in_xbundle' in 'vlan'.
1088 *
1089 * This code repeats all the checks in is_mac_learning_update_needed() because
1090 * the lock was released between there and here and thus the MAC learning state
1091 * could have changed.
1092 *
1093 * Keep the code here synchronized with that in is_mac_learning_update_needed()
1094 * above. */
1095static void
1096update_learning_table__(const struct xbridge *xbridge,
1097 const struct flow *flow, struct flow_wildcards *wc,
1098 int vlan, struct xbundle *in_xbundle)
1099 OVS_REQ_WRLOCK(xbridge->ml->rwlock)
1100{
1101 struct mac_entry *mac;
1102
46c88433 1103 if (!mac_learning_may_learn(xbridge->ml, flow->dl_src, vlan)) {
ee047520 1104 return;
9583bc14
EJ
1105 }
1106
46c88433 1107 mac = mac_learning_insert(xbridge->ml, flow->dl_src, vlan);
9583bc14
EJ
1108 if (is_gratuitous_arp(flow, wc)) {
1109 /* We don't want to learn from gratuitous ARP packets that are
1110 * reflected back over bond slaves so we lock the learning table. */
46c88433 1111 if (!in_xbundle->bond) {
9583bc14
EJ
1112 mac_entry_set_grat_arp_lock(mac);
1113 } else if (mac_entry_is_grat_arp_locked(mac)) {
ee047520 1114 return;
9583bc14
EJ
1115 }
1116 }
1117
30618594 1118 if (mac->port.p != in_xbundle->ofbundle) {
9583bc14
EJ
1119 /* The log messages here could actually be useful in debugging,
1120 * so keep the rate limit relatively high. */
1121 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
ee047520 1122
9583bc14
EJ
1123 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
1124 "on port %s in VLAN %d",
46c88433
EJ
1125 xbridge->name, ETH_ADDR_ARGS(flow->dl_src),
1126 in_xbundle->name, vlan);
9583bc14 1127
46c88433 1128 mac->port.p = in_xbundle->ofbundle;
30618594 1129 mac_learning_changed(xbridge->ml);
9583bc14 1130 }
ee047520
BP
1131}
1132
1133static void
1134update_learning_table(const struct xbridge *xbridge,
1135 const struct flow *flow, struct flow_wildcards *wc,
1136 int vlan, struct xbundle *in_xbundle)
1137{
1138 bool need_update;
1139
1140 /* Don't learn the OFPP_NONE port. */
1141 if (in_xbundle == &ofpp_none_bundle) {
1142 return;
1143 }
1144
1145 /* First try the common case: no change to MAC learning table. */
1146 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
1147 need_update = is_mac_learning_update_needed(xbridge->ml, flow, wc, vlan,
1148 in_xbundle);
509c0149 1149 ovs_rwlock_unlock(&xbridge->ml->rwlock);
ee047520
BP
1150
1151 if (need_update) {
1152 /* Slow path: MAC learning table might need an update. */
1153 ovs_rwlock_wrlock(&xbridge->ml->rwlock);
1154 update_learning_table__(xbridge, flow, wc, vlan, in_xbundle);
1155 ovs_rwlock_unlock(&xbridge->ml->rwlock);
1156 }
9583bc14
EJ
1157}
1158
46c88433 1159/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
9583bc14
EJ
1160 * dropped. Returns true if they may be forwarded, false if they should be
1161 * dropped.
1162 *
46c88433 1163 * 'in_port' must be the xport that corresponds to flow->in_port.
9583bc14
EJ
1164 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
1165 *
1166 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
1167 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
1168 * checked by input_vid_is_valid().
1169 *
1170 * May also add tags to '*tags', although the current implementation only does
1171 * so in one special case.
1172 */
1173static bool
46c88433 1174is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
9583bc14
EJ
1175 uint16_t vlan)
1176{
46c88433
EJ
1177 struct xbundle *in_xbundle = in_port->xbundle;
1178 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 1179 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
1180
1181 /* Drop frames for reserved multicast addresses
1182 * only if forward_bpdu option is absent. */
46c88433 1183 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
9583bc14
EJ
1184 xlate_report(ctx, "packet has reserved destination MAC, dropping");
1185 return false;
1186 }
1187
46c88433 1188 if (in_xbundle->bond) {
9583bc14
EJ
1189 struct mac_entry *mac;
1190
46c88433 1191 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
4a1b8f30 1192 flow->dl_dst)) {
9583bc14
EJ
1193 case BV_ACCEPT:
1194 break;
1195
1196 case BV_DROP:
1197 xlate_report(ctx, "bonding refused admissibility, dropping");
1198 return false;
1199
1200 case BV_DROP_IF_MOVED:
509c0149 1201 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
30618594 1202 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
46c88433 1203 if (mac && mac->port.p != in_xbundle->ofbundle &&
9583bc14
EJ
1204 (!is_gratuitous_arp(flow, &ctx->xout->wc)
1205 || mac_entry_is_grat_arp_locked(mac))) {
509c0149 1206 ovs_rwlock_unlock(&xbridge->ml->rwlock);
9583bc14
EJ
1207 xlate_report(ctx, "SLB bond thinks this packet looped back, "
1208 "dropping");
1209 return false;
1210 }
509c0149 1211 ovs_rwlock_unlock(&xbridge->ml->rwlock);
9583bc14
EJ
1212 break;
1213 }
1214 }
1215
1216 return true;
1217}
1218
1219static void
1220xlate_normal(struct xlate_ctx *ctx)
1221{
33bf9176
BP
1222 struct flow_wildcards *wc = &ctx->xout->wc;
1223 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
1224 struct xbundle *in_xbundle;
1225 struct xport *in_port;
9583bc14 1226 struct mac_entry *mac;
d6d5bbc9 1227 void *mac_port;
9583bc14
EJ
1228 uint16_t vlan;
1229 uint16_t vid;
1230
1231 ctx->xout->has_normal = true;
1232
33bf9176
BP
1233 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1234 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1dd35f8a 1235 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 1236
46c88433
EJ
1237 in_xbundle = lookup_input_bundle(ctx->xbridge, flow->in_port.ofp_port,
1238 ctx->xin->packet != NULL, &in_port);
1239 if (!in_xbundle) {
9583bc14
EJ
1240 xlate_report(ctx, "no input bundle, dropping");
1241 return;
1242 }
1243
1244 /* Drop malformed frames. */
33bf9176
BP
1245 if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
1246 !(flow->vlan_tci & htons(VLAN_CFI))) {
9583bc14
EJ
1247 if (ctx->xin->packet != NULL) {
1248 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1249 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
1250 "VLAN tag received on port %s",
46c88433 1251 ctx->xbridge->name, in_xbundle->name);
9583bc14
EJ
1252 }
1253 xlate_report(ctx, "partial VLAN tag, dropping");
1254 return;
1255 }
1256
1257 /* Drop frames on bundles reserved for mirroring. */
46c88433 1258 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14
EJ
1259 if (ctx->xin->packet != NULL) {
1260 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1261 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
1262 "%s, which is reserved exclusively for mirroring",
46c88433 1263 ctx->xbridge->name, in_xbundle->name);
9583bc14
EJ
1264 }
1265 xlate_report(ctx, "input port is mirror output port, dropping");
1266 return;
1267 }
1268
1269 /* Check VLAN. */
33bf9176 1270 vid = vlan_tci_to_vid(flow->vlan_tci);
46c88433 1271 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
9583bc14
EJ
1272 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
1273 return;
1274 }
46c88433 1275 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14
EJ
1276
1277 /* Check other admissibility requirements. */
1278 if (in_port && !is_admissible(ctx, in_port, vlan)) {
1279 return;
1280 }
1281
1282 /* Learn source MAC. */
1283 if (ctx->xin->may_learn) {
46c88433 1284 update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
9583bc14
EJ
1285 }
1286
1287 /* Determine output bundle. */
509c0149 1288 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
30618594 1289 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
d6d5bbc9
EJ
1290 mac_port = mac ? mac->port.p : NULL;
1291 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
1292
1293 if (mac_port) {
1294 struct xbundle *mac_xbundle = xbundle_lookup(mac_port);
46c88433 1295 if (mac_xbundle && mac_xbundle != in_xbundle) {
9583bc14 1296 xlate_report(ctx, "forwarding to learned port");
46c88433
EJ
1297 output_normal(ctx, mac_xbundle, vlan);
1298 } else if (!mac_xbundle) {
1299 xlate_report(ctx, "learned port is unknown, dropping");
9583bc14
EJ
1300 } else {
1301 xlate_report(ctx, "learned port is input port, dropping");
1302 }
1303 } else {
46c88433 1304 struct xbundle *xbundle;
9583bc14
EJ
1305
1306 xlate_report(ctx, "no learned MAC for destination, flooding");
46c88433
EJ
1307 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
1308 if (xbundle != in_xbundle
1309 && xbundle_includes_vlan(xbundle, vlan)
1310 && xbundle->floodable
1311 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
1312 output_normal(ctx, xbundle, vlan);
9583bc14
EJ
1313 }
1314 }
1315 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1316 }
1317}
1318
1319/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
1320 * the number of packets out of UINT32_MAX to sample. The given
1321 * cookie is passed back in the callback for each sampled packet.
1322 */
1323static size_t
46c88433 1324compose_sample_action(const struct xbridge *xbridge,
9583bc14
EJ
1325 struct ofpbuf *odp_actions,
1326 const struct flow *flow,
1327 const uint32_t probability,
1328 const union user_action_cookie *cookie,
1329 const size_t cookie_size)
1330{
1331 size_t sample_offset, actions_offset;
89a8a7f0 1332 odp_port_t odp_port;
9583bc14 1333 int cookie_offset;
89a8a7f0 1334 uint32_t pid;
9583bc14
EJ
1335
1336 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
1337
1338 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
1339
1340 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
89a8a7f0
EJ
1341
1342 odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
1343 pid = dpif_port_get_pid(xbridge->dpif, odp_port);
1344 cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
9583bc14
EJ
1345
1346 nl_msg_end_nested(odp_actions, actions_offset);
1347 nl_msg_end_nested(odp_actions, sample_offset);
1348 return cookie_offset;
1349}
1350
1351static void
46c88433
EJ
1352compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
1353 odp_port_t odp_port, unsigned int n_outputs,
1354 union user_action_cookie *cookie)
9583bc14
EJ
1355{
1356 int ifindex;
1357
1358 cookie->type = USER_ACTION_COOKIE_SFLOW;
1359 cookie->sflow.vlan_tci = vlan_tci;
1360
1361 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
1362 * port information") for the interpretation of cookie->output. */
1363 switch (n_outputs) {
1364 case 0:
1365 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
1366 cookie->sflow.output = 0x40000000 | 256;
1367 break;
1368
1369 case 1:
46c88433 1370 ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
9583bc14
EJ
1371 if (ifindex) {
1372 cookie->sflow.output = ifindex;
1373 break;
1374 }
1375 /* Fall through. */
1376 default:
1377 /* 0x80000000 means "multiple output ports. */
1378 cookie->sflow.output = 0x80000000 | n_outputs;
1379 break;
1380 }
1381}
1382
1383/* Compose SAMPLE action for sFlow bridge sampling. */
1384static size_t
46c88433 1385compose_sflow_action(const struct xbridge *xbridge,
9583bc14
EJ
1386 struct ofpbuf *odp_actions,
1387 const struct flow *flow,
4e022ec0 1388 odp_port_t odp_port)
9583bc14
EJ
1389{
1390 uint32_t probability;
1391 union user_action_cookie cookie;
1392
46c88433 1393 if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
1394 return 0;
1395 }
1396
46c88433
EJ
1397 probability = dpif_sflow_get_probability(xbridge->sflow);
1398 compose_sflow_cookie(xbridge, htons(0), odp_port,
4e022ec0 1399 odp_port == ODPP_NONE ? 0 : 1, &cookie);
9583bc14 1400
46c88433 1401 return compose_sample_action(xbridge, odp_actions, flow, probability,
9583bc14
EJ
1402 &cookie, sizeof cookie.sflow);
1403}
1404
1405static void
1406compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
1407 uint32_t obs_domain_id, uint32_t obs_point_id,
1408 union user_action_cookie *cookie)
1409{
1410 cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
1411 cookie->flow_sample.probability = probability;
1412 cookie->flow_sample.collector_set_id = collector_set_id;
1413 cookie->flow_sample.obs_domain_id = obs_domain_id;
1414 cookie->flow_sample.obs_point_id = obs_point_id;
1415}
1416
1417static void
1418compose_ipfix_cookie(union user_action_cookie *cookie)
1419{
1420 cookie->type = USER_ACTION_COOKIE_IPFIX;
1421}
1422
1423/* Compose SAMPLE action for IPFIX bridge sampling. */
1424static void
46c88433 1425compose_ipfix_action(const struct xbridge *xbridge,
9583bc14
EJ
1426 struct ofpbuf *odp_actions,
1427 const struct flow *flow)
1428{
1429 uint32_t probability;
1430 union user_action_cookie cookie;
1431
46c88433 1432 if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
1433 return;
1434 }
1435
46c88433 1436 probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
9583bc14
EJ
1437 compose_ipfix_cookie(&cookie);
1438
46c88433 1439 compose_sample_action(xbridge, odp_actions, flow, probability,
9583bc14
EJ
1440 &cookie, sizeof cookie.ipfix);
1441}
1442
1443/* SAMPLE action for sFlow must be first action in any given list of
1444 * actions. At this point we do not have all information required to
1445 * build it. So try to build sample action as complete as possible. */
1446static void
1447add_sflow_action(struct xlate_ctx *ctx)
1448{
46c88433 1449 ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
9583bc14 1450 &ctx->xout->odp_actions,
4e022ec0 1451 &ctx->xin->flow, ODPP_NONE);
9583bc14
EJ
1452 ctx->sflow_odp_port = 0;
1453 ctx->sflow_n_outputs = 0;
1454}
1455
1456/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
1457 * of actions, eventually after the SAMPLE action for sFlow. */
1458static void
1459add_ipfix_action(struct xlate_ctx *ctx)
1460{
46c88433 1461 compose_ipfix_action(ctx->xbridge, &ctx->xout->odp_actions,
9583bc14
EJ
1462 &ctx->xin->flow);
1463}
1464
1465/* Fix SAMPLE action according to data collected while composing ODP actions.
1466 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
1467 * USERSPACE action's user-cookie which is required for sflow. */
1468static void
1469fix_sflow_action(struct xlate_ctx *ctx)
1470{
1471 const struct flow *base = &ctx->base_flow;
1472 union user_action_cookie *cookie;
1473
1474 if (!ctx->user_cookie_offset) {
1475 return;
1476 }
1477
1478 cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
1479 sizeof cookie->sflow);
1480 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
1481
46c88433 1482 compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
9583bc14
EJ
1483 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
1484}
1485
db7d4e46 1486static enum slow_path_reason
642dc74d 1487process_special(struct xlate_ctx *ctx, const struct flow *flow,
46c88433 1488 const struct xport *xport, const struct ofpbuf *packet)
db7d4e46 1489{
642dc74d 1490 struct flow_wildcards *wc = &ctx->xout->wc;
46c88433 1491 const struct xbridge *xbridge = ctx->xbridge;
642dc74d 1492
46c88433 1493 if (!xport) {
db7d4e46 1494 return 0;
46c88433 1495 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 1496 if (packet) {
46c88433 1497 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46
JP
1498 }
1499 return SLOW_CFM;
fab52e16 1500 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 1501 if (packet) {
46c88433 1502 bfd_process_packet(xport->bfd, flow, packet);
db7d4e46
JP
1503 }
1504 return SLOW_BFD;
46c88433 1505 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
1506 && flow->dl_type == htons(ETH_TYPE_LACP)) {
1507 if (packet) {
46c88433 1508 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
db7d4e46
JP
1509 }
1510 return SLOW_LACP;
9d189a50 1511 } else if (xbridge->stp && stp_should_process_flow(flow, wc)) {
db7d4e46 1512 if (packet) {
9d189a50 1513 stp_process_packet(xport, packet);
db7d4e46
JP
1514 }
1515 return SLOW_STP;
1516 } else {
1517 return 0;
1518 }
1519}
1520
9583bc14 1521static void
4e022ec0 1522compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
9583bc14
EJ
1523 bool check_stp)
1524{
46c88433 1525 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1dd35f8a 1526 struct flow_wildcards *wc = &ctx->xout->wc;
33bf9176 1527 struct flow *flow = &ctx->xin->flow;
9583bc14 1528 ovs_be16 flow_vlan_tci;
1362e248 1529 uint32_t flow_pkt_mark;
9583bc14 1530 uint8_t flow_nw_tos;
4e022ec0 1531 odp_port_t out_port, odp_port;
ca077186 1532 uint8_t dscp;
9583bc14
EJ
1533
1534 /* If 'struct flow' gets additional metadata, we'll need to zero it out
1535 * before traversing a patch port. */
1536 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
1537
46c88433 1538 if (!xport) {
9583bc14
EJ
1539 xlate_report(ctx, "Nonexistent output port");
1540 return;
46c88433 1541 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
9583bc14
EJ
1542 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
1543 return;
9d189a50 1544 } else if (check_stp && !xport_stp_forward_state(xport)) {
9583bc14
EJ
1545 xlate_report(ctx, "STP not in forwarding state, skipping output");
1546 return;
1547 }
1548
46c88433
EJ
1549 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
1550 ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
1551 xport->xbundle);
cdf5d3a5
EJ
1552 }
1553
46c88433
EJ
1554 if (xport->peer) {
1555 const struct xport *peer = xport->peer;
9583bc14 1556 struct flow old_flow = ctx->xin->flow;
9583bc14 1557 enum slow_path_reason special;
9583bc14 1558
46c88433
EJ
1559 ctx->xbridge = peer->xbridge;
1560 flow->in_port.ofp_port = peer->ofp_port;
33bf9176
BP
1561 flow->metadata = htonll(0);
1562 memset(&flow->tunnel, 0, sizeof flow->tunnel);
1563 memset(flow->regs, 0, sizeof flow->regs);
9583bc14 1564
642dc74d 1565 special = process_special(ctx, &ctx->xin->flow, peer,
9583bc14
EJ
1566 ctx->xin->packet);
1567 if (special) {
1568 ctx->xout->slow = special;
ddd3c975 1569 } else if (may_receive(peer, ctx)) {
9d189a50 1570 if (xport_stp_forward_state(peer)) {
4e022ec0 1571 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
9583bc14
EJ
1572 } else {
1573 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
1574 * learning action look at the packet, then drop it. */
1575 struct flow old_base_flow = ctx->base_flow;
1576 size_t old_size = ctx->xout->odp_actions.size;
cdf5d3a5 1577 mirror_mask_t old_mirrors = ctx->xout->mirrors;
4e022ec0 1578 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
cdf5d3a5 1579 ctx->xout->mirrors = old_mirrors;
9583bc14
EJ
1580 ctx->base_flow = old_base_flow;
1581 ctx->xout->odp_actions.size = old_size;
1582 }
1583 }
1584
1585 ctx->xin->flow = old_flow;
832554e3 1586 ctx->xbridge = xport->xbridge;
9583bc14
EJ
1587
1588 if (ctx->xin->resubmit_stats) {
46c88433
EJ
1589 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
1590 netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
9583bc14
EJ
1591 }
1592
1593 return;
1594 }
1595
33bf9176 1596 flow_vlan_tci = flow->vlan_tci;
1362e248 1597 flow_pkt_mark = flow->pkt_mark;
33bf9176 1598 flow_nw_tos = flow->nw_tos;
9583bc14 1599
55954f6e 1600 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
1dd35f8a 1601 wc->masks.nw_tos |= IP_ECN_MASK;
33bf9176 1602 flow->nw_tos &= ~IP_DSCP_MASK;
ca077186 1603 flow->nw_tos |= dscp;
9583bc14
EJ
1604 }
1605
46c88433 1606 if (xport->is_tunnel) {
9583bc14
EJ
1607 /* Save tunnel metadata so that changes made due to
1608 * the Logical (tunnel) Port are not visible for any further
1609 * matches, while explicit set actions on tunnel metadata are.
1610 */
33bf9176 1611 struct flow_tnl flow_tnl = flow->tunnel;
46c88433 1612 odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
4e022ec0 1613 if (odp_port == ODPP_NONE) {
9583bc14
EJ
1614 xlate_report(ctx, "Tunneling decided against output");
1615 goto out; /* restore flow_nw_tos */
1616 }
33bf9176 1617 if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
9583bc14
EJ
1618 xlate_report(ctx, "Not tunneling to our own address");
1619 goto out; /* restore flow_nw_tos */
1620 }
1621 if (ctx->xin->resubmit_stats) {
46c88433 1622 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14
EJ
1623 }
1624 out_port = odp_port;
33bf9176 1625 commit_odp_tunnel_action(flow, &ctx->base_flow,
9583bc14 1626 &ctx->xout->odp_actions);
33bf9176 1627 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
9583bc14 1628 } else {
4e022ec0 1629 ofp_port_t vlandev_port;
1dd35f8a 1630
46c88433
EJ
1631 odp_port = xport->odp_port;
1632 if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
1dd35f8a
JP
1633 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
1634 }
46c88433 1635 vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, ofp_port,
33bf9176 1636 flow->vlan_tci);
9583bc14
EJ
1637 if (vlandev_port == ofp_port) {
1638 out_port = odp_port;
1639 } else {
46c88433 1640 out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
33bf9176 1641 flow->vlan_tci = htons(0);
9583bc14 1642 }
9583bc14 1643 }
9583bc14 1644
4e022ec0 1645 if (out_port != ODPP_NONE) {
1dd35f8a
JP
1646 commit_odp_actions(flow, &ctx->base_flow,
1647 &ctx->xout->odp_actions, &ctx->xout->wc);
4e022ec0
AW
1648 nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
1649 out_port);
9583bc14 1650
6cbbf4fa
EJ
1651 ctx->sflow_odp_port = odp_port;
1652 ctx->sflow_n_outputs++;
1653 ctx->xout->nf_output_iface = ofp_port;
1654 }
1655
1656 out:
9583bc14 1657 /* Restore flow */
33bf9176 1658 flow->vlan_tci = flow_vlan_tci;
1362e248 1659 flow->pkt_mark = flow_pkt_mark;
33bf9176 1660 flow->nw_tos = flow_nw_tos;
9583bc14
EJ
1661}
1662
1663static void
4e022ec0 1664compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port)
9583bc14
EJ
1665{
1666 compose_output_action__(ctx, ofp_port, true);
1667}
1668
bb61b33d
BP
1669static void
1670xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
70742c7f 1671 OVS_RELEASES(rule)
bb61b33d
BP
1672{
1673 struct rule_dpif *old_rule = ctx->rule;
70742c7f
EJ
1674 const struct ofpact *ofpacts;
1675 size_t ofpacts_len;
bb61b33d
BP
1676
1677 if (ctx->xin->resubmit_stats) {
70742c7f 1678 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
bb61b33d
BP
1679 }
1680
1681 ctx->recurse++;
1682 ctx->rule = rule;
70742c7f
EJ
1683 rule_dpif_get_actions(rule, &ofpacts, &ofpacts_len);
1684 do_xlate_actions(ofpacts, ofpacts_len, ctx);
bb61b33d
BP
1685 ctx->rule = old_rule;
1686 ctx->recurse--;
1687
70742c7f 1688 rule_dpif_release(rule);
bb61b33d
BP
1689}
1690
9583bc14
EJ
1691static void
1692xlate_table_action(struct xlate_ctx *ctx,
4e022ec0 1693 ofp_port_t in_port, uint8_t table_id, bool may_packet_in)
9583bc14
EJ
1694{
1695 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
1696 struct rule_dpif *rule;
4e022ec0 1697 ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14 1698 uint8_t old_table_id = ctx->table_id;
bb61b33d 1699 bool got_rule;
9583bc14
EJ
1700
1701 ctx->table_id = table_id;
1702
bb61b33d
BP
1703 /* Look up a flow with 'in_port' as the input port. Then restore the
1704 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
1705 * have surprising behavior). */
4e022ec0 1706 ctx->xin->flow.in_port.ofp_port = in_port;
bb61b33d
BP
1707 got_rule = rule_dpif_lookup_in_table(ctx->xbridge->ofproto,
1708 &ctx->xin->flow, &ctx->xout->wc,
1709 table_id, &rule);
4e022ec0 1710 ctx->xin->flow.in_port.ofp_port = old_in_port;
9583bc14 1711
ad3efdcb
EJ
1712 if (ctx->xin->resubmit_hook) {
1713 ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
1714 }
1715
bb61b33d
BP
1716 if (got_rule) {
1717 xlate_recursively(ctx, rule);
1718 } else if (may_packet_in) {
ad3efdcb
EJ
1719 struct xport *xport;
1720
ad3efdcb
EJ
1721 /* XXX
1722 * check if table configuration flags
1723 * OFPTC_TABLE_MISS_CONTROLLER, default.
1724 * OFPTC_TABLE_MISS_CONTINUE,
1725 * OFPTC_TABLE_MISS_DROP
1726 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do? */
1727 xport = get_ofp_port(ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
70742c7f
EJ
1728 choose_miss_rule(xport ? xport->config : 0,
1729 ctx->xbridge->miss_rule,
1730 ctx->xbridge->no_packet_in_rule, &rule);
bb61b33d 1731 xlate_recursively(ctx, rule);
ad3efdcb
EJ
1732 }
1733
9583bc14
EJ
1734 ctx->table_id = old_table_id;
1735 } else {
1736 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
1737
1738 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
1739 MAX_RESUBMIT_RECURSION);
9583bc14
EJ
1740 }
1741}
1742
1743static void
1744xlate_ofpact_resubmit(struct xlate_ctx *ctx,
1745 const struct ofpact_resubmit *resubmit)
1746{
4e022ec0 1747 ofp_port_t in_port;
9583bc14
EJ
1748 uint8_t table_id;
1749
1750 in_port = resubmit->in_port;
1751 if (in_port == OFPP_IN_PORT) {
4e022ec0 1752 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
1753 }
1754
1755 table_id = resubmit->table_id;
1756 if (table_id == 255) {
1757 table_id = ctx->table_id;
1758 }
1759
1760 xlate_table_action(ctx, in_port, table_id, false);
1761}
1762
1763static void
1764flood_packets(struct xlate_ctx *ctx, bool all)
1765{
46c88433 1766 const struct xport *xport;
9583bc14 1767
46c88433
EJ
1768 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
1769 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
1770 continue;
1771 }
1772
1773 if (all) {
46c88433
EJ
1774 compose_output_action__(ctx, xport->ofp_port, false);
1775 } else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
1776 compose_output_action(ctx, xport->ofp_port);
9583bc14
EJ
1777 }
1778 }
1779
1780 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1781}
1782
1783static void
1784execute_controller_action(struct xlate_ctx *ctx, int len,
1785 enum ofp_packet_in_reason reason,
1786 uint16_t controller_id)
1787{
ada3a58d 1788 struct ofputil_packet_in *pin;
9583bc14
EJ
1789 struct ofpbuf *packet;
1790 struct flow key;
1791
1792 ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
1793 ctx->xout->slow = SLOW_CONTROLLER;
1794 if (!ctx->xin->packet) {
1795 return;
1796 }
1797
1798 packet = ofpbuf_clone(ctx->xin->packet);
1799
1800 key.skb_priority = 0;
1362e248 1801 key.pkt_mark = 0;
9583bc14
EJ
1802 memset(&key.tunnel, 0, sizeof key.tunnel);
1803
1804 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1dd35f8a 1805 &ctx->xout->odp_actions, &ctx->xout->wc);
9583bc14
EJ
1806
1807 odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
1808 ctx->xout->odp_actions.size, NULL, NULL);
1809
ada3a58d
EJ
1810 pin = xmalloc(sizeof *pin);
1811 pin->packet_len = packet->size;
1812 pin->packet = ofpbuf_steal_data(packet);
1813 pin->reason = reason;
1814 pin->controller_id = controller_id;
1815 pin->table_id = ctx->table_id;
70742c7f 1816 pin->cookie = ctx->rule ? rule_dpif_get_flow_cookie(ctx->rule) : 0;
9583bc14 1817
ada3a58d
EJ
1818 pin->send_len = len;
1819 flow_get_metadata(&ctx->xin->flow, &pin->fmd);
9583bc14 1820
ada3a58d 1821 ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, pin);
9583bc14
EJ
1822 ofpbuf_delete(packet);
1823}
1824
1825static void
9cfef3d0 1826compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 1827{
33bf9176
BP
1828 struct flow_wildcards *wc = &ctx->xout->wc;
1829 struct flow *flow = &ctx->xin->flow;
1830
9583bc14
EJ
1831 ovs_assert(eth_type_mpls(eth_type));
1832
33bf9176
BP
1833 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1834 memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
9583bc14 1835
33bf9176
BP
1836 if (flow->mpls_depth) {
1837 flow->mpls_lse &= ~htonl(MPLS_BOS_MASK);
1838 flow->mpls_depth++;
9583bc14
EJ
1839 } else {
1840 ovs_be32 label;
1841 uint8_t tc, ttl;
1842
33bf9176 1843 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
9583bc14
EJ
1844 label = htonl(0x2); /* IPV6 Explicit Null. */
1845 } else {
1846 label = htonl(0x0); /* IPV4 Explicit Null. */
1847 }
1dd35f8a
JP
1848 wc->masks.nw_tos |= IP_DSCP_MASK;
1849 wc->masks.nw_ttl = 0xff;
33bf9176
BP
1850 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1851 ttl = flow->nw_ttl ? flow->nw_ttl : 0x40;
1852 flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
1853 flow->mpls_depth = 1;
9583bc14 1854 }
33bf9176 1855 flow->dl_type = eth_type;
9583bc14
EJ
1856}
1857
1858static void
9cfef3d0 1859compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 1860{
33bf9176
BP
1861 struct flow_wildcards *wc = &ctx->xout->wc;
1862 struct flow *flow = &ctx->xin->flow;
1863
9583bc14
EJ
1864 ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
1865 ovs_assert(!eth_type_mpls(eth_type));
1866
33bf9176
BP
1867 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1868 memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
1869
1870 if (flow->mpls_depth) {
1871 flow->mpls_depth--;
1872 flow->mpls_lse = htonl(0);
1873 if (!flow->mpls_depth) {
1874 flow->dl_type = eth_type;
9583bc14
EJ
1875 }
1876 }
1877}
1878
1879static bool
1880compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
1881{
33bf9176
BP
1882 struct flow *flow = &ctx->xin->flow;
1883
1884 if (!is_ip_any(flow)) {
9583bc14
EJ
1885 return false;
1886 }
1887
1dd35f8a 1888 ctx->xout->wc.masks.nw_ttl = 0xff;
33bf9176
BP
1889 if (flow->nw_ttl > 1) {
1890 flow->nw_ttl--;
9583bc14
EJ
1891 return false;
1892 } else {
1893 size_t i;
1894
1895 for (i = 0; i < ids->n_controllers; i++) {
1896 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
1897 ids->cnt_ids[i]);
1898 }
1899
1900 /* Stop processing for current table. */
1901 return true;
1902 }
1903}
1904
1905static bool
9cfef3d0 1906compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14
EJ
1907{
1908 if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
1909 return true;
1910 }
1911
f74e7df7 1912 ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_TTL_MASK);
9583bc14
EJ
1913 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
1914 return false;
1915}
1916
1917static bool
9cfef3d0 1918compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 1919{
33bf9176
BP
1920 struct flow *flow = &ctx->xin->flow;
1921 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse);
1dd35f8a
JP
1922 struct flow_wildcards *wc = &ctx->xout->wc;
1923
1dd35f8a 1924 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
9583bc14 1925
33bf9176 1926 if (!eth_type_mpls(flow->dl_type)) {
9583bc14
EJ
1927 return false;
1928 }
1929
1930 if (ttl > 1) {
1931 ttl--;
33bf9176 1932 set_mpls_lse_ttl(&flow->mpls_lse, ttl);
9583bc14
EJ
1933 return false;
1934 } else {
1935 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
1936
1937 /* Stop processing for current table. */
1938 return true;
1939 }
1940}
1941
1942static void
1943xlate_output_action(struct xlate_ctx *ctx,
4e022ec0 1944 ofp_port_t port, uint16_t max_len, bool may_packet_in)
9583bc14 1945{
4e022ec0 1946 ofp_port_t prev_nf_output_iface = ctx->xout->nf_output_iface;
9583bc14
EJ
1947
1948 ctx->xout->nf_output_iface = NF_OUT_DROP;
1949
1950 switch (port) {
1951 case OFPP_IN_PORT:
4e022ec0 1952 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port);
9583bc14
EJ
1953 break;
1954 case OFPP_TABLE:
4e022ec0
AW
1955 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
1956 0, may_packet_in);
9583bc14
EJ
1957 break;
1958 case OFPP_NORMAL:
1959 xlate_normal(ctx);
1960 break;
1961 case OFPP_FLOOD:
1962 flood_packets(ctx, false);
1963 break;
1964 case OFPP_ALL:
1965 flood_packets(ctx, true);
1966 break;
1967 case OFPP_CONTROLLER:
1968 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
1969 break;
1970 case OFPP_NONE:
1971 break;
1972 case OFPP_LOCAL:
1973 default:
4e022ec0 1974 if (port != ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
1975 compose_output_action(ctx, port);
1976 } else {
1977 xlate_report(ctx, "skipping output to input port");
1978 }
1979 break;
1980 }
1981
1982 if (prev_nf_output_iface == NF_OUT_FLOOD) {
1983 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1984 } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1985 ctx->xout->nf_output_iface = prev_nf_output_iface;
1986 } else if (prev_nf_output_iface != NF_OUT_DROP &&
1987 ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1988 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1989 }
1990}
1991
1992static void
1993xlate_output_reg_action(struct xlate_ctx *ctx,
1994 const struct ofpact_output_reg *or)
1995{
1996 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
1997 if (port <= UINT16_MAX) {
1998 union mf_subvalue value;
1999
2000 memset(&value, 0xff, sizeof value);
2001 mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
4e022ec0
AW
2002 xlate_output_action(ctx, u16_to_ofp(port),
2003 or->max_len, false);
9583bc14
EJ
2004 }
2005}
2006
2007static void
2008xlate_enqueue_action(struct xlate_ctx *ctx,
2009 const struct ofpact_enqueue *enqueue)
2010{
4e022ec0 2011 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
2012 uint32_t queue_id = enqueue->queue;
2013 uint32_t flow_priority, priority;
2014 int error;
2015
2016 /* Translate queue to priority. */
89a8a7f0 2017 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
9583bc14
EJ
2018 if (error) {
2019 /* Fall back to ordinary output action. */
2020 xlate_output_action(ctx, enqueue->port, 0, false);
2021 return;
2022 }
2023
2024 /* Check output port. */
2025 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
2026 ofp_port = ctx->xin->flow.in_port.ofp_port;
2027 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
2028 return;
2029 }
2030
2031 /* Add datapath actions. */
2032 flow_priority = ctx->xin->flow.skb_priority;
2033 ctx->xin->flow.skb_priority = priority;
2034 compose_output_action(ctx, ofp_port);
2035 ctx->xin->flow.skb_priority = flow_priority;
2036
2037 /* Update NetFlow output port. */
2038 if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
2039 ctx->xout->nf_output_iface = ofp_port;
2040 } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
2041 ctx->xout->nf_output_iface = NF_OUT_MULTI;
2042 }
2043}
2044
2045static void
2046xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
2047{
2048 uint32_t skb_priority;
2049
89a8a7f0 2050 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
9583bc14
EJ
2051 ctx->xin->flow.skb_priority = skb_priority;
2052 } else {
2053 /* Couldn't translate queue to a priority. Nothing to do. A warning
2054 * has already been logged. */
2055 }
2056}
2057
2058static bool
46c88433 2059slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 2060{
46c88433
EJ
2061 const struct xbridge *xbridge = xbridge_;
2062 struct xport *port;
9583bc14
EJ
2063
2064 switch (ofp_port) {
2065 case OFPP_IN_PORT:
2066 case OFPP_TABLE:
2067 case OFPP_NORMAL:
2068 case OFPP_FLOOD:
2069 case OFPP_ALL:
2070 case OFPP_NONE:
2071 return true;
2072 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
2073 return false;
2074 default:
46c88433 2075 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
2076 return port ? port->may_enable : false;
2077 }
2078}
2079
2080static void
2081xlate_bundle_action(struct xlate_ctx *ctx,
2082 const struct ofpact_bundle *bundle)
2083{
4e022ec0 2084 ofp_port_t port;
9583bc14
EJ
2085
2086 port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
46c88433
EJ
2087 slave_enabled_cb,
2088 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 2089 if (bundle->dst.field) {
f74e7df7
JP
2090 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow,
2091 &ctx->xout->wc);
9583bc14
EJ
2092 } else {
2093 xlate_output_action(ctx, port, 0, false);
2094 }
2095}
2096
2097static void
2098xlate_learn_action(struct xlate_ctx *ctx,
2099 const struct ofpact_learn *learn)
2100{
3d9c5e58 2101 struct ofputil_flow_mod *fm;
9583bc14 2102 struct ofpbuf ofpacts;
9583bc14
EJ
2103
2104 ctx->xout->has_learn = true;
2105
2106 learn_mask(learn, &ctx->xout->wc);
2107
2108 if (!ctx->xin->may_learn) {
2109 return;
2110 }
2111
3d9c5e58
EJ
2112 fm = xmalloc(sizeof *fm);
2113 ofpbuf_init(&ofpacts, 0);
2114 learn_execute(learn, &ctx->xin->flow, fm, &ofpacts);
9583bc14 2115
3d9c5e58 2116 ofproto_dpif_flow_mod(ctx->xbridge->ofproto, fm);
9583bc14
EJ
2117}
2118
9583bc14
EJ
2119static void
2120xlate_fin_timeout(struct xlate_ctx *ctx,
2121 const struct ofpact_fin_timeout *oft)
2122{
2123 if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
70742c7f
EJ
2124 rule_dpif_reduce_timeouts(ctx->rule, oft->fin_idle_timeout,
2125 oft->fin_hard_timeout);
9583bc14
EJ
2126 }
2127}
2128
2129static void
2130xlate_sample_action(struct xlate_ctx *ctx,
2131 const struct ofpact_sample *os)
2132{
2133 union user_action_cookie cookie;
2134 /* Scale the probability from 16-bit to 32-bit while representing
2135 * the same percentage. */
2136 uint32_t probability = (os->probability << 16) | os->probability;
2137
2138 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1dd35f8a 2139 &ctx->xout->odp_actions, &ctx->xout->wc);
9583bc14
EJ
2140
2141 compose_flow_sample_cookie(os->probability, os->collector_set_id,
2142 os->obs_domain_id, os->obs_point_id, &cookie);
46c88433 2143 compose_sample_action(ctx->xbridge, &ctx->xout->odp_actions, &ctx->xin->flow,
9583bc14
EJ
2144 probability, &cookie, sizeof cookie.flow_sample);
2145}
2146
2147static bool
46c88433 2148may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 2149{
46c88433
EJ
2150 if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
2151 ? OFPUTIL_PC_NO_RECV_STP
2152 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
2153 return false;
2154 }
2155
2156 /* Only drop packets here if both forwarding and learning are
2157 * disabled. If just learning is enabled, we need to have
2158 * OFPP_NORMAL and the learning action have a look at the packet
2159 * before we can drop it. */
9d189a50 2160 if (!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) {
9583bc14
EJ
2161 return false;
2162 }
2163
2164 return true;
2165}
2166
9583bc14
EJ
2167static void
2168do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
2169 struct xlate_ctx *ctx)
2170{
33bf9176
BP
2171 struct flow_wildcards *wc = &ctx->xout->wc;
2172 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
2173 const struct ofpact *a;
2174
9583bc14
EJ
2175 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
2176 struct ofpact_controller *controller;
2177 const struct ofpact_metadata *metadata;
2178
2179 if (ctx->exit) {
2180 break;
2181 }
2182
2183 switch (a->type) {
2184 case OFPACT_OUTPUT:
2185 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
2186 ofpact_get_OUTPUT(a)->max_len, true);
2187 break;
2188
7395c052
NZ
2189 case OFPACT_GROUP:
2190 /* XXX not yet implemented */
2191 break;
2192
9583bc14
EJ
2193 case OFPACT_CONTROLLER:
2194 controller = ofpact_get_CONTROLLER(a);
2195 execute_controller_action(ctx, controller->max_len,
2196 controller->reason,
2197 controller->controller_id);
2198 break;
2199
2200 case OFPACT_ENQUEUE:
2201 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
2202 break;
2203
2204 case OFPACT_SET_VLAN_VID:
f74e7df7 2205 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
33bf9176
BP
2206 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
2207 flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
2208 | htons(VLAN_CFI));
9583bc14
EJ
2209 break;
2210
2211 case OFPACT_SET_VLAN_PCP:
f74e7df7 2212 wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
33bf9176
BP
2213 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
2214 flow->vlan_tci |=
9583bc14
EJ
2215 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
2216 | VLAN_CFI);
2217 break;
2218
2219 case OFPACT_STRIP_VLAN:
f74e7df7 2220 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
33bf9176 2221 flow->vlan_tci = htons(0);
9583bc14
EJ
2222 break;
2223
2224 case OFPACT_PUSH_VLAN:
2225 /* XXX 802.1AD(QinQ) */
f74e7df7 2226 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
33bf9176 2227 flow->vlan_tci = htons(VLAN_CFI);
9583bc14
EJ
2228 break;
2229
2230 case OFPACT_SET_ETH_SRC:
f74e7df7 2231 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
33bf9176 2232 memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
2233 break;
2234
2235 case OFPACT_SET_ETH_DST:
f74e7df7 2236 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
33bf9176 2237 memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
2238 break;
2239
2240 case OFPACT_SET_IPV4_SRC:
f74e7df7 2241 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
33bf9176
BP
2242 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2243 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
2244 }
2245 break;
2246
2247 case OFPACT_SET_IPV4_DST:
f74e7df7 2248 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
33bf9176
BP
2249 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2250 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
2251 }
2252 break;
2253
2254 case OFPACT_SET_IPV4_DSCP:
f74e7df7 2255 wc->masks.nw_tos |= IP_DSCP_MASK;
9583bc14 2256 /* OpenFlow 1.0 only supports IPv4. */
33bf9176
BP
2257 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2258 flow->nw_tos &= ~IP_DSCP_MASK;
2259 flow->nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
9583bc14
EJ
2260 }
2261 break;
2262
2263 case OFPACT_SET_L4_SRC_PORT:
33bf9176 2264 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
f74e7df7 2265 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
33bf9176
BP
2266 if (is_ip_any(flow)) {
2267 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
2268 }
2269 break;
2270
2271 case OFPACT_SET_L4_DST_PORT:
33bf9176 2272 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
f74e7df7 2273 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
33bf9176
BP
2274 if (is_ip_any(flow)) {
2275 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
2276 }
2277 break;
2278
2279 case OFPACT_RESUBMIT:
2280 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
2281 break;
2282
2283 case OFPACT_SET_TUNNEL:
33bf9176 2284 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
2285 break;
2286
2287 case OFPACT_SET_QUEUE:
2288 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
2289 break;
2290
2291 case OFPACT_POP_QUEUE:
33bf9176 2292 flow->skb_priority = ctx->orig_skb_priority;
9583bc14
EJ
2293 break;
2294
2295 case OFPACT_REG_MOVE:
33bf9176 2296 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
9583bc14
EJ
2297 break;
2298
2299 case OFPACT_REG_LOAD:
33bf9176 2300 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow);
9583bc14
EJ
2301 break;
2302
2303 case OFPACT_STACK_PUSH:
33bf9176
BP
2304 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
2305 &ctx->stack);
9583bc14
EJ
2306 break;
2307
2308 case OFPACT_STACK_POP:
f74e7df7
JP
2309 nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, wc,
2310 &ctx->stack);
9583bc14
EJ
2311 break;
2312
2313 case OFPACT_PUSH_MPLS:
9cfef3d0 2314 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
9583bc14
EJ
2315 break;
2316
2317 case OFPACT_POP_MPLS:
9cfef3d0 2318 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
2319 break;
2320
2321 case OFPACT_SET_MPLS_TTL:
9cfef3d0 2322 if (compose_set_mpls_ttl_action(ctx,
9583bc14 2323 ofpact_get_SET_MPLS_TTL(a)->ttl)) {
ad3efdcb 2324 return;
9583bc14
EJ
2325 }
2326 break;
2327
2328 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 2329 if (compose_dec_mpls_ttl_action(ctx)) {
ad3efdcb 2330 return;
9583bc14
EJ
2331 }
2332 break;
2333
2334 case OFPACT_DEC_TTL:
f74e7df7 2335 wc->masks.nw_ttl = 0xff;
9583bc14 2336 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
ad3efdcb 2337 return;
9583bc14
EJ
2338 }
2339 break;
2340
2341 case OFPACT_NOTE:
2342 /* Nothing to do. */
2343 break;
2344
2345 case OFPACT_MULTIPATH:
33bf9176 2346 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
9583bc14
EJ
2347 break;
2348
2349 case OFPACT_BUNDLE:
9583bc14
EJ
2350 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
2351 break;
2352
2353 case OFPACT_OUTPUT_REG:
2354 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
2355 break;
2356
2357 case OFPACT_LEARN:
2358 xlate_learn_action(ctx, ofpact_get_LEARN(a));
2359 break;
2360
2361 case OFPACT_EXIT:
2362 ctx->exit = true;
2363 break;
2364
2365 case OFPACT_FIN_TIMEOUT:
33bf9176 2366 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
2367 ctx->xout->has_fin_timeout = true;
2368 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
2369 break;
2370
2371 case OFPACT_CLEAR_ACTIONS:
2372 /* XXX
2373 * Nothing to do because writa-actions is not supported for now.
2374 * When writa-actions is supported, clear-actions also must
2375 * be supported at the same time.
2376 */
2377 break;
2378
2379 case OFPACT_WRITE_METADATA:
2380 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
2381 flow->metadata &= ~metadata->mask;
2382 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
2383 break;
2384
638a19b0
JR
2385 case OFPACT_METER:
2386 /* Not implemented yet. */
2387 break;
2388
9583bc14
EJ
2389 case OFPACT_GOTO_TABLE: {
2390 /* It is assumed that goto-table is the last action. */
2391 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
9583bc14
EJ
2392
2393 ovs_assert(ctx->table_id < ogt->table_id);
4468099e
EJ
2394 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
2395 ogt->table_id, true);
9583bc14
EJ
2396 break;
2397 }
2398
2399 case OFPACT_SAMPLE:
2400 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
2401 break;
2402 }
2403 }
9583bc14
EJ
2404}
2405
2406void
2407xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
2408 const struct flow *flow, struct rule_dpif *rule,
2409 uint8_t tcp_flags, const struct ofpbuf *packet)
2410{
2411 xin->ofproto = ofproto;
2412 xin->flow = *flow;
2413 xin->packet = packet;
2414 xin->may_learn = packet != NULL;
2415 xin->rule = rule;
2416 xin->ofpacts = NULL;
2417 xin->ofpacts_len = 0;
2418 xin->tcp_flags = tcp_flags;
2419 xin->resubmit_hook = NULL;
2420 xin->report_hook = NULL;
2421 xin->resubmit_stats = NULL;
2422}
2423
2424void
2425xlate_out_uninit(struct xlate_out *xout)
2426{
2427 if (xout) {
2428 ofpbuf_uninit(&xout->odp_actions);
2429 }
2430}
2431
2432/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
2433 * into datapath actions, using 'ctx', and discards the datapath actions. */
2434void
2435xlate_actions_for_side_effects(struct xlate_in *xin)
2436{
2437 struct xlate_out xout;
2438
2439 xlate_actions(xin, &xout);
2440 xlate_out_uninit(&xout);
2441}
2442
2443static void
2444xlate_report(struct xlate_ctx *ctx, const char *s)
2445{
2446 if (ctx->xin->report_hook) {
4d0acc70 2447 ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
9583bc14
EJ
2448 }
2449}
2450
2451void
2452xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
2453{
2454 dst->wc = src->wc;
9583bc14
EJ
2455 dst->slow = src->slow;
2456 dst->has_learn = src->has_learn;
2457 dst->has_normal = src->has_normal;
2458 dst->has_fin_timeout = src->has_fin_timeout;
2459 dst->nf_output_iface = src->nf_output_iface;
2460 dst->mirrors = src->mirrors;
2461
2462 ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
2463 sizeof dst->odp_actions_stub);
2464 ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
2465 src->odp_actions.size);
2466}
2467\f
55954f6e
EJ
2468static struct skb_priority_to_dscp *
2469get_skb_priority(const struct xport *xport, uint32_t skb_priority)
2470{
2471 struct skb_priority_to_dscp *pdscp;
2472 uint32_t hash;
2473
2474 hash = hash_int(skb_priority, 0);
2475 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
2476 if (pdscp->skb_priority == skb_priority) {
2477 return pdscp;
2478 }
2479 }
2480 return NULL;
2481}
2482
2483static bool
2484dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
2485 uint8_t *dscp)
2486{
2487 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
2488 *dscp = pdscp ? pdscp->dscp : 0;
2489 return pdscp != NULL;
2490}
2491
2492static void
2493clear_skb_priorities(struct xport *xport)
2494{
2495 struct skb_priority_to_dscp *pdscp, *next;
2496
2497 HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &xport->skb_priorities) {
2498 hmap_remove(&xport->skb_priorities, &pdscp->hmap_node);
2499 free(pdscp);
2500 }
2501}
2502
ce4a6b76
BP
2503static bool
2504actions_output_to_local_port(const struct xlate_ctx *ctx)
2505{
46c88433 2506 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
2507 const struct nlattr *a;
2508 unsigned int left;
2509
2510 NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
2511 ctx->xout->odp_actions.size) {
2512 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
2513 && nl_attr_get_odp_port(a) == local_odp_port) {
2514 return true;
2515 }
2516 }
2517 return false;
2518}
9583bc14
EJ
2519
2520/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
56450a41
BP
2521 * into datapath actions in 'odp_actions', using 'ctx'.
2522 *
2523 * The caller must take responsibility for eventually freeing 'xout', with
2524 * xlate_out_uninit(). */
9583bc14
EJ
2525void
2526xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
2527{
33bf9176
BP
2528 struct flow_wildcards *wc = &xout->wc;
2529 struct flow *flow = &xin->flow;
2530
9583bc14
EJ
2531 enum slow_path_reason special;
2532 const struct ofpact *ofpacts;
46c88433 2533 struct xport *in_port;
9583bc14
EJ
2534 struct flow orig_flow;
2535 struct xlate_ctx ctx;
2536 size_t ofpacts_len;
62a7cc71 2537 bool tnl_may_send;
9583bc14 2538
46c88433 2539 COVERAGE_INC(xlate_actions);
9583bc14 2540
dc24a00f
EJ
2541 ovs_rwlock_rdlock(&xlate_rwlock);
2542
9583bc14
EJ
2543 /* Flow initialization rules:
2544 * - 'base_flow' must match the kernel's view of the packet at the
2545 * time that action processing starts. 'flow' represents any
2546 * transformations we wish to make through actions.
2547 * - By default 'base_flow' and 'flow' are the same since the input
2548 * packet matches the output before any actions are applied.
2549 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
2550 * of the received packet as seen by the kernel. If we later output
2551 * to another device without any modifications this will cause us to
2552 * insert a new tag since the original one was stripped off by the
2553 * VLAN device.
2554 * - Tunnel metadata as received is retained in 'flow'. This allows
2555 * tunnel metadata matching also in later tables.
2556 * Since a kernel action for setting the tunnel metadata will only be
2557 * generated with actual tunnel output, changing the tunnel metadata
2558 * values in 'flow' (such as tun_id) will only have effect with a later
2559 * tunnel output action.
2560 * - Tunnel 'base_flow' is completely cleared since that is what the
2561 * kernel does. If we wish to maintain the original values an action
2562 * needs to be generated. */
2563
2564 ctx.xin = xin;
2565 ctx.xout = xout;
46c88433
EJ
2566 ctx.xout->slow = 0;
2567 ctx.xout->has_learn = false;
2568 ctx.xout->has_normal = false;
2569 ctx.xout->has_fin_timeout = false;
2570 ctx.xout->nf_output_iface = NF_OUT_DROP;
2571 ctx.xout->mirrors = 0;
2572 ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
2573 sizeof ctx.xout->odp_actions_stub);
2574 ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
2575
2576 ctx.xbridge = xbridge_lookup(xin->ofproto);
2577 if (!ctx.xbridge) {
dc24a00f 2578 goto out;
46c88433 2579 }
9583bc14 2580
9583bc14
EJ
2581 ctx.rule = xin->rule;
2582
33bf9176 2583 ctx.base_flow = *flow;
9583bc14 2584 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
33bf9176 2585 ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
9583bc14 2586
33bf9176
BP
2587 flow_wildcards_init_catchall(wc);
2588 memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
1dd35f8a 2589 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
7431e171 2590 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
1dd35f8a 2591 wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
9583bc14 2592
62a7cc71 2593 tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
46c88433 2594 if (ctx.xbridge->has_netflow) {
9b658910 2595 netflow_mask_wc(flow, wc);
9583bc14
EJ
2596 }
2597
9583bc14 2598 ctx.recurse = 0;
33bf9176 2599 ctx.orig_skb_priority = flow->skb_priority;
9583bc14
EJ
2600 ctx.table_id = 0;
2601 ctx.exit = false;
2602
2603 if (xin->ofpacts) {
2604 ofpacts = xin->ofpacts;
2605 ofpacts_len = xin->ofpacts_len;
2606 } else if (xin->rule) {
70742c7f 2607 rule_dpif_get_actions(xin->rule, &ofpacts, &ofpacts_len);
9583bc14
EJ
2608 } else {
2609 NOT_REACHED();
2610 }
2611
2612 ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
2613
ade6ad9c 2614 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
9583bc14
EJ
2615 /* Do this conditionally because the copy is expensive enough that it
2616 * shows up in profiles. */
33bf9176 2617 orig_flow = *flow;
9583bc14
EJ
2618 }
2619
33bf9176 2620 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
46c88433 2621 switch (ctx.xbridge->frag) {
9583bc14
EJ
2622 case OFPC_FRAG_NORMAL:
2623 /* We must pretend that transport ports are unavailable. */
33bf9176
BP
2624 flow->tp_src = ctx.base_flow.tp_src = htons(0);
2625 flow->tp_dst = ctx.base_flow.tp_dst = htons(0);
9583bc14
EJ
2626 break;
2627
2628 case OFPC_FRAG_DROP:
dc24a00f 2629 goto out;
9583bc14
EJ
2630
2631 case OFPC_FRAG_REASM:
2632 NOT_REACHED();
2633
2634 case OFPC_FRAG_NX_MATCH:
2635 /* Nothing to do. */
2636 break;
2637
2638 case OFPC_INVALID_TTL_TO_CONTROLLER:
2639 NOT_REACHED();
2640 }
2641 }
2642
46c88433 2643 in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
642dc74d 2644 special = process_special(&ctx, flow, in_port, ctx.xin->packet);
9583bc14
EJ
2645 if (special) {
2646 ctx.xout->slow = special;
2647 } else {
9583bc14 2648 size_t sample_actions_len;
9583bc14 2649
4e022ec0 2650 if (flow->in_port.ofp_port
46c88433
EJ
2651 != vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
2652 flow->in_port.ofp_port,
33bf9176 2653 flow->vlan_tci)) {
9583bc14
EJ
2654 ctx.base_flow.vlan_tci = 0;
2655 }
2656
2657 add_sflow_action(&ctx);
2658 add_ipfix_action(&ctx);
2659 sample_actions_len = ctx.xout->odp_actions.size;
2660
62a7cc71 2661 if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
9583bc14
EJ
2662 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
2663
2664 /* We've let OFPP_NORMAL and the learning action look at the
2665 * packet, so drop it now if forwarding is disabled. */
9d189a50 2666 if (in_port && !xport_stp_forward_state(in_port)) {
9583bc14
EJ
2667 ctx.xout->odp_actions.size = sample_actions_len;
2668 }
2669 }
2670
46c88433 2671 if (ctx.xbridge->has_in_band
ce4a6b76
BP
2672 && in_band_must_output_to_local_port(flow)
2673 && !actions_output_to_local_port(&ctx)) {
9583bc14
EJ
2674 compose_output_action(&ctx, OFPP_LOCAL);
2675 }
aaa0fbae
EJ
2676
2677 fix_sflow_action(&ctx);
2678
46c88433 2679 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
9583bc14
EJ
2680 add_mirror_actions(&ctx, &orig_flow);
2681 }
9583bc14
EJ
2682 }
2683
2684 ofpbuf_uninit(&ctx.stack);
2685
2686 /* Clear the metadata and register wildcard masks, because we won't
2687 * use non-header fields as part of the cache. */
33bf9176
BP
2688 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
2689 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
dc24a00f
EJ
2690
2691out:
2692 ovs_rwlock_unlock(&xlate_rwlock);
9583bc14 2693}