]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto-dpif-xlate.c
vtep: Limit the split elements to 2 (maxsplit + 1)
[mirror_ovs.git] / ofproto / ofproto-dpif-xlate.c
CommitLineData
18080541 1/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
9583bc14
EJ
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16
17#include "ofproto/ofproto-dpif-xlate.h"
18
8449c4d6 19#include <errno.h>
a36de779
PS
20#include <arpa/inet.h>
21#include <net/if.h>
22#include <sys/socket.h>
23#include <netinet/in.h>
8449c4d6 24
a36de779 25#include "tnl-arp-cache.h"
db7d4e46 26#include "bfd.h"
9583bc14
EJ
27#include "bitmap.h"
28#include "bond.h"
29#include "bundle.h"
30#include "byte-order.h"
db7d4e46 31#include "cfm.h"
9583bc14
EJ
32#include "connmgr.h"
33#include "coverage.h"
34#include "dpif.h"
35#include "dynamic-string.h"
f7f1ea29 36#include "in-band.h"
db7d4e46 37#include "lacp.h"
9583bc14 38#include "learn.h"
46c88433 39#include "list.h"
9583bc14 40#include "mac-learning.h"
6d95c4e8 41#include "mcast-snooping.h"
9583bc14
EJ
42#include "meta-flow.h"
43#include "multipath.h"
44#include "netdev-vport.h"
45#include "netlink.h"
46#include "nx-match.h"
47#include "odp-execute.h"
48#include "ofp-actions.h"
49#include "ofproto/ofproto-dpif-ipfix.h"
ec7ceaed 50#include "ofproto/ofproto-dpif-mirror.h"
60d02c72 51#include "ofproto/ofproto-dpif-monitor.h"
9583bc14
EJ
52#include "ofproto/ofproto-dpif-sflow.h"
53#include "ofproto/ofproto-dpif.h"
6f00e29b 54#include "ofproto/ofproto-provider.h"
91088554 55#include "packet-dpif.h"
a36de779
PS
56#include "ovs-router.h"
57#include "tnl-ports.h"
9583bc14 58#include "tunnel.h"
e6211adc 59#include "openvswitch/vlog.h"
9583bc14 60
46c88433 61COVERAGE_DEFINE(xlate_actions);
0f032e95 62COVERAGE_DEFINE(xlate_actions_oversize);
7d031d7e 63COVERAGE_DEFINE(xlate_actions_too_many_output);
9583bc14
EJ
64
65VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
66
8a553e9a
EJ
67/* Maximum depth of flow table recursion (due to resubmit actions) in a
68 * flow translation. */
69#define MAX_RESUBMIT_RECURSION 64
adcf00ba
AZ
70#define MAX_INTERNAL_RESUBMITS 1 /* Max resbmits allowed using rules in
71 internal table. */
8a553e9a 72
7bbdd84f
SH
73/* Timeout for internal rules created to handle recirculation */
74#define RECIRC_TIMEOUT 60
75
98b07853
BP
76/* Maximum number of resubmit actions in a flow translation, whether they are
77 * recursive or not. */
78#define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
79
46c88433
EJ
80struct xbridge {
81 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
82 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
83
ca6ba700 84 struct ovs_list xbundles; /* Owned xbundles. */
46c88433
EJ
85 struct hmap xports; /* Indexed by ofp_port. */
86
87 char *name; /* Name used in log messages. */
89a8a7f0 88 struct dpif *dpif; /* Datapath interface. */
46c88433 89 struct mac_learning *ml; /* Mac learning handle. */
6d95c4e8 90 struct mcast_snooping *ms; /* Multicast Snooping handle. */
46c88433
EJ
91 struct mbridge *mbridge; /* Mirroring. */
92 struct dpif_sflow *sflow; /* SFlow handle, or null. */
93 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
ce3955be 94 struct netflow *netflow; /* Netflow handle, or null. */
9d189a50 95 struct stp *stp; /* STP or null if disabled. */
9efd308e 96 struct rstp *rstp; /* RSTP or null if disabled. */
46c88433 97
46c88433
EJ
98 bool has_in_band; /* Bridge has in band control? */
99 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
4b97b70d 100
adcf00ba
AZ
101 /* True if the datapath supports recirculation. */
102 bool enable_recirc;
103
4b97b70d
BP
104 /* True if the datapath supports variable-length
105 * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
106 * False if the datapath supports only 8-byte (or shorter) userdata. */
107 bool variable_length_userdata;
8bfd0fda
BP
108
109 /* Number of MPLS label stack entries that the datapath supports
110 * in matches. */
111 size_t max_mpls_depth;
53477c2c
JR
112
113 /* True if the datapath supports masked data in OVS_ACTION_ATTR_SET
114 * actions. */
115 bool masked_set_action;
46c88433
EJ
116};
117
118struct xbundle {
119 struct hmap_node hmap_node; /* In global 'xbundles' map. */
120 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
121
ca6ba700 122 struct ovs_list list_node; /* In parent 'xbridges' list. */
46c88433
EJ
123 struct xbridge *xbridge; /* Parent xbridge. */
124
ca6ba700 125 struct ovs_list xports; /* Contains "struct xport"s. */
46c88433
EJ
126
127 char *name; /* Name used in log messages. */
128 struct bond *bond; /* Nonnull iff more than one port. */
129 struct lacp *lacp; /* LACP handle or null. */
130
131 enum port_vlan_mode vlan_mode; /* VLAN mode. */
132 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
133 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
134 * NULL if all VLANs are trunked. */
135 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
136 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
137};
138
139struct xport {
140 struct hmap_node hmap_node; /* Node in global 'xports' map. */
141 struct ofport_dpif *ofport; /* Key in global 'xports map. */
142
143 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
144 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
145
146 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
147
ca6ba700 148 struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
46c88433
EJ
149 struct xbundle *xbundle; /* Parent xbundle or null. */
150
151 struct netdev *netdev; /* 'ofport''s netdev. */
152
153 struct xbridge *xbridge; /* Parent bridge. */
154 struct xport *peer; /* Patch port peer or null. */
155
156 enum ofputil_port_config config; /* OpenFlow port configuration. */
dd8cd4b4 157 enum ofputil_port_state state; /* OpenFlow port state. */
92cf817b 158 int stp_port_no; /* STP port number or -1 if not in use. */
f025bcb7 159 struct rstp_port *rstp_port; /* RSTP port or null. */
46c88433 160
55954f6e
EJ
161 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
162
46c88433
EJ
163 bool may_enable; /* May be enabled in bonds. */
164 bool is_tunnel; /* Is a tunnel port. */
165
166 struct cfm *cfm; /* CFM handle or null. */
167 struct bfd *bfd; /* BFD handle or null. */
168};
169
4d0acc70
EJ
170struct xlate_ctx {
171 struct xlate_in *xin;
172 struct xlate_out *xout;
173
46c88433 174 const struct xbridge *xbridge;
4d0acc70
EJ
175
176 /* Flow at the last commit. */
177 struct flow base_flow;
178
179 /* Tunnel IP destination address as received. This is stored separately
180 * as the base_flow.tunnel is cleared on init to reflect the datapath
181 * behavior. Used to make sure not to send tunneled output to ourselves,
182 * which might lead to an infinite loop. This could happen easily
183 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
184 * actually set the tun_dst field. */
185 ovs_be32 orig_tunnel_ip_dst;
186
187 /* Stack for the push and pop actions. Each stack element is of type
188 * "union mf_subvalue". */
189 union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
190 struct ofpbuf stack;
191
192 /* The rule that we are currently translating, or NULL. */
193 struct rule_dpif *rule;
194
98b07853
BP
195 /* Resubmit statistics, via xlate_table_action(). */
196 int recurse; /* Current resubmit nesting depth. */
197 int resubmits; /* Total number of resubmits. */
5a070238 198 bool in_group; /* Currently translating ofgroup, if true. */
029ca940 199 bool in_action_set; /* Currently translating action_set, if true. */
98b07853 200
4d0acc70
EJ
201 uint32_t orig_skb_priority; /* Priority when packet arrived. */
202 uint8_t table_id; /* OpenFlow table ID where flow was found. */
203 uint32_t sflow_n_outputs; /* Number of output ports. */
4e022ec0 204 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
4d0acc70
EJ
205 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
206 bool exit; /* No further actions should be processed. */
7fdb60a7 207
92c08f09
SH
208 bool use_recirc; /* Should generate recirc? */
209 struct xlate_recirc recirc; /* Information used for generating
210 * recirculation actions */
211
7bbdd84f
SH
212 /* True if a packet was but is no longer MPLS (due to an MPLS pop action).
213 * This is a trigger for recirculation in cases where translating an action
214 * or looking up a flow requires access to the fields of the packet after
215 * the MPLS label stack that was originally present. */
216 bool was_mpls;
217
7fdb60a7
SH
218 /* OpenFlow 1.1+ action set.
219 *
220 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
221 * When translation is otherwise complete, ofpacts_execute_action_set()
222 * converts it to a set of "struct ofpact"s that can be translated into
223 * datapath actions. */
c61f3870 224 bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
7fdb60a7
SH
225 struct ofpbuf action_set; /* Action set. */
226 uint64_t action_set_stub[1024 / 8];
4d0acc70
EJ
227};
228
9583bc14
EJ
229/* A controller may use OFPP_NONE as the ingress port to indicate that
230 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
231 * when an input bundle is needed for validation (e.g., mirroring or
232 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
3548d242
BP
233 * any 'port' structs, so care must be taken when dealing with it. */
234static struct xbundle ofpp_none_bundle = {
235 .name = "OFPP_NONE",
236 .vlan_mode = PORT_VLAN_TRUNK
237};
9583bc14 238
55954f6e
EJ
239/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
240 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
241 * traffic egressing the 'ofport' with that priority should be marked with. */
242struct skb_priority_to_dscp {
243 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
244 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
245
246 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
247};
248
b256dc52
JS
249enum xc_type {
250 XC_RULE,
251 XC_BOND,
252 XC_NETDEV,
253 XC_NETFLOW,
254 XC_MIRROR,
255 XC_LEARN,
256 XC_NORMAL,
257 XC_FIN_TIMEOUT,
1e684d7d 258 XC_GROUP,
a36de779 259 XC_TNL_ARP,
b256dc52
JS
260};
261
262/* xlate_cache entries hold enough information to perform the side effects of
263 * xlate_actions() for a rule, without needing to perform rule translation
264 * from scratch. The primary usage of these is to submit statistics to objects
265 * that a flow relates to, although they may be used for other effects as well
266 * (for instance, refreshing hard timeouts for learned flows). */
267struct xc_entry {
268 enum xc_type type;
269 union {
270 struct rule_dpif *rule;
271 struct {
272 struct netdev *tx;
273 struct netdev *rx;
274 struct bfd *bfd;
275 } dev;
276 struct {
277 struct netflow *netflow;
278 struct flow *flow;
279 ofp_port_t iface;
280 } nf;
281 struct {
282 struct mbridge *mbridge;
283 mirror_mask_t mirrors;
284 } mirror;
285 struct {
286 struct bond *bond;
287 struct flow *flow;
288 uint16_t vid;
289 } bond;
290 struct {
4165b5e0
JS
291 struct ofproto_dpif *ofproto;
292 struct ofputil_flow_mod *fm;
293 struct ofpbuf *ofpacts;
b256dc52
JS
294 } learn;
295 struct {
296 struct ofproto_dpif *ofproto;
297 struct flow *flow;
298 int vlan;
299 } normal;
300 struct {
301 struct rule_dpif *rule;
302 uint16_t idle;
303 uint16_t hard;
304 } fin;
1e684d7d
RW
305 struct {
306 struct group_dpif *group;
307 struct ofputil_bucket *bucket;
308 } group;
a36de779
PS
309 struct {
310 char br_name[IFNAMSIZ];
311 ovs_be32 d_ip;
312 } tnl_arp_cache;
b256dc52
JS
313 } u;
314};
315
316#define XC_ENTRY_FOR_EACH(entry, entries, xcache) \
317 entries = xcache->entries; \
318 for (entry = ofpbuf_try_pull(&entries, sizeof *entry); \
319 entry; \
320 entry = ofpbuf_try_pull(&entries, sizeof *entry))
321
322struct xlate_cache {
323 struct ofpbuf entries;
324};
325
84f0f298
RW
326/* Xlate config contains hash maps of all bridges, bundles and ports.
327 * Xcfgp contains the pointer to the current xlate configuration.
328 * When the main thread needs to change the configuration, it copies xcfgp to
329 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
330 * does not block handler and revalidator threads. */
331struct xlate_cfg {
332 struct hmap xbridges;
333 struct hmap xbundles;
334 struct hmap xports;
335};
b1b72f2d 336static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
f439f23b 337static struct xlate_cfg *new_xcfg = NULL;
46c88433
EJ
338
339static bool may_receive(const struct xport *, struct xlate_ctx *);
9583bc14
EJ
340static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
341 struct xlate_ctx *);
adcf00ba 342static void xlate_normal(struct xlate_ctx *);
34dd0d78 343static inline void xlate_report(struct xlate_ctx *, const char *);
6d328fa2
SH
344static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
345 uint8_t table_id, bool may_packet_in,
346 bool honor_table_miss);
46c88433
EJ
347static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
348static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
349static void output_normal(struct xlate_ctx *, const struct xbundle *,
9583bc14 350 uint16_t vlan);
4e022ec0 351static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
9583bc14 352
84f0f298
RW
353static struct xbridge *xbridge_lookup(struct xlate_cfg *,
354 const struct ofproto_dpif *);
355static struct xbundle *xbundle_lookup(struct xlate_cfg *,
356 const struct ofbundle *);
357static struct xport *xport_lookup(struct xlate_cfg *,
358 const struct ofport_dpif *);
46c88433 359static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
55954f6e
EJ
360static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
361 uint32_t skb_priority);
362static void clear_skb_priorities(struct xport *);
16194afd 363static size_t count_skb_priorities(const struct xport *);
55954f6e
EJ
364static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
365 uint8_t *dscp);
46c88433 366
b256dc52
JS
367static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc,
368 enum xc_type type);
84f0f298
RW
369static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
370static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
371static void xlate_xport_init(struct xlate_cfg *, struct xport *);
9efd308e 372static void xlate_xbridge_set(struct xbridge *, struct dpif *,
9efd308e
DV
373 const struct mac_learning *, struct stp *,
374 struct rstp *, const struct mcast_snooping *,
375 const struct mbridge *,
376 const struct dpif_sflow *,
377 const struct dpif_ipfix *,
2f47cdf4 378 const struct netflow *,
84f0f298
RW
379 bool forward_bpdu, bool has_in_band,
380 bool enable_recirc,
381 bool variable_length_userdata,
53477c2c
JR
382 size_t max_mpls_depth,
383 bool masked_set_action);
84f0f298
RW
384static void xlate_xbundle_set(struct xbundle *xbundle,
385 enum port_vlan_mode vlan_mode, int vlan,
386 unsigned long *trunks, bool use_priority_tags,
387 const struct bond *bond, const struct lacp *lacp,
388 bool floodable);
389static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
390 const struct netdev *netdev, const struct cfm *cfm,
391 const struct bfd *bfd, int stp_port_no,
f025bcb7 392 const struct rstp_port *rstp_port,
84f0f298
RW
393 enum ofputil_port_config config,
394 enum ofputil_port_state state, bool is_tunnel,
395 bool may_enable);
396static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
397static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
398static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
399static void xlate_xbridge_copy(struct xbridge *);
400static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
401static void xlate_xport_copy(struct xbridge *, struct xbundle *,
402 struct xport *);
403static void xlate_xcfg_free(struct xlate_cfg *);
b256dc52 404
34dd0d78
JR
405static inline void
406xlate_report(struct xlate_ctx *ctx, const char *s)
407{
408 if (OVS_UNLIKELY(ctx->xin->report_hook)) {
409 ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
410 }
411}
84f0f298
RW
412
413static void
414xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
415{
416 list_init(&xbridge->xbundles);
417 hmap_init(&xbridge->xports);
418 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
419 hash_pointer(xbridge->ofproto, 0));
420}
421
422static void
423xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
424{
425 list_init(&xbundle->xports);
426 list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
427 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
428 hash_pointer(xbundle->ofbundle, 0));
429}
430
431static void
432xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
433{
434 hmap_init(&xport->skb_priorities);
435 hmap_insert(&xcfg->xports, &xport->hmap_node,
436 hash_pointer(xport->ofport, 0));
437 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
438 hash_ofp_port(xport->ofp_port));
439}
440
441static void
442xlate_xbridge_set(struct xbridge *xbridge,
443 struct dpif *dpif,
ec89fc6f 444 const struct mac_learning *ml, struct stp *stp,
9efd308e 445 struct rstp *rstp, const struct mcast_snooping *ms,
ec89fc6f 446 const struct mbridge *mbridge,
46c88433 447 const struct dpif_sflow *sflow,
ce3955be 448 const struct dpif_ipfix *ipfix,
2f47cdf4 449 const struct netflow *netflow,
4b97b70d 450 bool forward_bpdu, bool has_in_band,
adcf00ba 451 bool enable_recirc,
8bfd0fda 452 bool variable_length_userdata,
53477c2c
JR
453 size_t max_mpls_depth,
454 bool masked_set_action)
46c88433 455{
46c88433
EJ
456 if (xbridge->ml != ml) {
457 mac_learning_unref(xbridge->ml);
458 xbridge->ml = mac_learning_ref(ml);
459 }
460
6d95c4e8
FL
461 if (xbridge->ms != ms) {
462 mcast_snooping_unref(xbridge->ms);
463 xbridge->ms = mcast_snooping_ref(ms);
464 }
465
46c88433
EJ
466 if (xbridge->mbridge != mbridge) {
467 mbridge_unref(xbridge->mbridge);
468 xbridge->mbridge = mbridge_ref(mbridge);
469 }
470
471 if (xbridge->sflow != sflow) {
472 dpif_sflow_unref(xbridge->sflow);
473 xbridge->sflow = dpif_sflow_ref(sflow);
474 }
475
476 if (xbridge->ipfix != ipfix) {
477 dpif_ipfix_unref(xbridge->ipfix);
478 xbridge->ipfix = dpif_ipfix_ref(ipfix);
479 }
480
9d189a50
EJ
481 if (xbridge->stp != stp) {
482 stp_unref(xbridge->stp);
483 xbridge->stp = stp_ref(stp);
484 }
485
9efd308e
DV
486 if (xbridge->rstp != rstp) {
487 rstp_unref(xbridge->rstp);
488 xbridge->rstp = rstp_ref(rstp);
489 }
490
ce3955be
EJ
491 if (xbridge->netflow != netflow) {
492 netflow_unref(xbridge->netflow);
493 xbridge->netflow = netflow_ref(netflow);
494 }
495
89a8a7f0 496 xbridge->dpif = dpif;
46c88433
EJ
497 xbridge->forward_bpdu = forward_bpdu;
498 xbridge->has_in_band = has_in_band;
adcf00ba 499 xbridge->enable_recirc = enable_recirc;
4b97b70d 500 xbridge->variable_length_userdata = variable_length_userdata;
8bfd0fda 501 xbridge->max_mpls_depth = max_mpls_depth;
53477c2c 502 xbridge->masked_set_action = masked_set_action;
46c88433
EJ
503}
504
84f0f298
RW
505static void
506xlate_xbundle_set(struct xbundle *xbundle,
507 enum port_vlan_mode vlan_mode, int vlan,
508 unsigned long *trunks, bool use_priority_tags,
509 const struct bond *bond, const struct lacp *lacp,
510 bool floodable)
511{
512 ovs_assert(xbundle->xbridge);
513
514 xbundle->vlan_mode = vlan_mode;
515 xbundle->vlan = vlan;
516 xbundle->trunks = trunks;
517 xbundle->use_priority_tags = use_priority_tags;
518 xbundle->floodable = floodable;
519
520 if (xbundle->bond != bond) {
521 bond_unref(xbundle->bond);
522 xbundle->bond = bond_ref(bond);
523 }
524
525 if (xbundle->lacp != lacp) {
526 lacp_unref(xbundle->lacp);
527 xbundle->lacp = lacp_ref(lacp);
528 }
529}
530
531static void
532xlate_xport_set(struct xport *xport, odp_port_t odp_port,
533 const struct netdev *netdev, const struct cfm *cfm,
f025bcb7
JR
534 const struct bfd *bfd, int stp_port_no,
535 const struct rstp_port* rstp_port,
84f0f298
RW
536 enum ofputil_port_config config, enum ofputil_port_state state,
537 bool is_tunnel, bool may_enable)
538{
539 xport->config = config;
540 xport->state = state;
541 xport->stp_port_no = stp_port_no;
542 xport->is_tunnel = is_tunnel;
543 xport->may_enable = may_enable;
544 xport->odp_port = odp_port;
545
f025bcb7
JR
546 if (xport->rstp_port != rstp_port) {
547 rstp_port_unref(xport->rstp_port);
548 xport->rstp_port = rstp_port_ref(rstp_port);
549 }
550
84f0f298
RW
551 if (xport->cfm != cfm) {
552 cfm_unref(xport->cfm);
553 xport->cfm = cfm_ref(cfm);
554 }
555
556 if (xport->bfd != bfd) {
557 bfd_unref(xport->bfd);
558 xport->bfd = bfd_ref(bfd);
559 }
560
561 if (xport->netdev != netdev) {
562 netdev_close(xport->netdev);
563 xport->netdev = netdev_ref(netdev);
564 }
565}
566
567static void
568xlate_xbridge_copy(struct xbridge *xbridge)
569{
570 struct xbundle *xbundle;
571 struct xport *xport;
572 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
573 new_xbridge->ofproto = xbridge->ofproto;
574 new_xbridge->name = xstrdup(xbridge->name);
575 xlate_xbridge_init(new_xcfg, new_xbridge);
576
577 xlate_xbridge_set(new_xbridge,
34dd0d78 578 xbridge->dpif, xbridge->ml, xbridge->stp,
9efd308e
DV
579 xbridge->rstp, xbridge->ms, xbridge->mbridge,
580 xbridge->sflow, xbridge->ipfix, xbridge->netflow,
2f47cdf4 581 xbridge->forward_bpdu,
9efd308e
DV
582 xbridge->has_in_band, xbridge->enable_recirc,
583 xbridge->variable_length_userdata,
53477c2c 584 xbridge->max_mpls_depth, xbridge->masked_set_action);
84f0f298
RW
585 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
586 xlate_xbundle_copy(new_xbridge, xbundle);
587 }
588
589 /* Copy xports which are not part of a xbundle */
590 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
591 if (!xport->xbundle) {
592 xlate_xport_copy(new_xbridge, NULL, xport);
593 }
594 }
595}
596
597static void
598xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
599{
600 struct xport *xport;
601 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
602 new_xbundle->ofbundle = xbundle->ofbundle;
603 new_xbundle->xbridge = xbridge;
604 new_xbundle->name = xstrdup(xbundle->name);
605 xlate_xbundle_init(new_xcfg, new_xbundle);
606
607 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode,
608 xbundle->vlan, xbundle->trunks,
609 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
610 xbundle->floodable);
611 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
612 xlate_xport_copy(xbridge, new_xbundle, xport);
613 }
614}
615
616static void
617xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
618 struct xport *xport)
619{
620 struct skb_priority_to_dscp *pdscp, *new_pdscp;
621 struct xport *new_xport = xzalloc(sizeof *xport);
622 new_xport->ofport = xport->ofport;
623 new_xport->ofp_port = xport->ofp_port;
624 new_xport->xbridge = xbridge;
625 xlate_xport_init(new_xcfg, new_xport);
626
627 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
f025bcb7 628 xport->bfd, xport->stp_port_no, xport->rstp_port,
9efd308e
DV
629 xport->config, xport->state, xport->is_tunnel,
630 xport->may_enable);
84f0f298
RW
631
632 if (xport->peer) {
633 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
634 if (peer) {
635 new_xport->peer = peer;
636 new_xport->peer->peer = new_xport;
637 }
638 }
639
640 if (xbundle) {
641 new_xport->xbundle = xbundle;
642 list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
643 }
644
645 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
646 new_pdscp = xmalloc(sizeof *pdscp);
647 new_pdscp->skb_priority = pdscp->skb_priority;
648 new_pdscp->dscp = pdscp->dscp;
649 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
650 hash_int(new_pdscp->skb_priority, 0));
651 }
652}
653
654/* Sets the current xlate configuration to new_xcfg and frees the old xlate
655 * configuration in xcfgp.
656 *
657 * This needs to be called after editing the xlate configuration.
658 *
659 * Functions that edit the new xlate configuration are
660 * xlate_<ofport/bundle/ofport>_set and xlate_<ofport/bundle/ofport>_remove.
661 *
662 * A sample workflow:
663 *
664 * xlate_txn_start();
665 * ...
666 * edit_xlate_configuration();
667 * ...
668 * xlate_txn_commit(); */
46c88433 669void
84f0f298
RW
670xlate_txn_commit(void)
671{
672 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
673
674 ovsrcu_set(&xcfgp, new_xcfg);
40a9c4c2
AW
675 ovsrcu_synchronize();
676 xlate_xcfg_free(xcfg);
84f0f298
RW
677 new_xcfg = NULL;
678}
679
680/* Copies the current xlate configuration in xcfgp to new_xcfg.
681 *
682 * This needs to be called prior to editing the xlate configuration. */
683void
684xlate_txn_start(void)
685{
686 struct xbridge *xbridge;
687 struct xlate_cfg *xcfg;
688
689 ovs_assert(!new_xcfg);
690
691 new_xcfg = xmalloc(sizeof *new_xcfg);
692 hmap_init(&new_xcfg->xbridges);
693 hmap_init(&new_xcfg->xbundles);
694 hmap_init(&new_xcfg->xports);
695
696 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
697 if (!xcfg) {
698 return;
699 }
700
701 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
702 xlate_xbridge_copy(xbridge);
703 }
704}
705
706
707static void
708xlate_xcfg_free(struct xlate_cfg *xcfg)
709{
710 struct xbridge *xbridge, *next_xbridge;
711
712 if (!xcfg) {
713 return;
714 }
715
716 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
717 xlate_xbridge_remove(xcfg, xbridge);
718 }
719
720 hmap_destroy(&xcfg->xbridges);
721 hmap_destroy(&xcfg->xbundles);
722 hmap_destroy(&xcfg->xports);
723 free(xcfg);
724}
725
726void
727xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
34dd0d78 728 struct dpif *dpif,
84f0f298 729 const struct mac_learning *ml, struct stp *stp,
9efd308e 730 struct rstp *rstp, const struct mcast_snooping *ms,
84f0f298
RW
731 const struct mbridge *mbridge,
732 const struct dpif_sflow *sflow,
733 const struct dpif_ipfix *ipfix,
2f47cdf4 734 const struct netflow *netflow,
9efd308e 735 bool forward_bpdu, bool has_in_band, bool enable_recirc,
53477c2c
JR
736 bool variable_length_userdata, size_t max_mpls_depth,
737 bool masked_set_action)
84f0f298
RW
738{
739 struct xbridge *xbridge;
740
741 ovs_assert(new_xcfg);
742
743 xbridge = xbridge_lookup(new_xcfg, ofproto);
744 if (!xbridge) {
745 xbridge = xzalloc(sizeof *xbridge);
746 xbridge->ofproto = ofproto;
747
748 xlate_xbridge_init(new_xcfg, xbridge);
749 }
750
751 free(xbridge->name);
752 xbridge->name = xstrdup(name);
753
34dd0d78
JR
754 xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
755 netflow, forward_bpdu, has_in_band, enable_recirc,
53477c2c
JR
756 variable_length_userdata, max_mpls_depth,
757 masked_set_action);
84f0f298
RW
758}
759
760static void
761xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
46c88433 762{
46c88433
EJ
763 struct xbundle *xbundle, *next_xbundle;
764 struct xport *xport, *next_xport;
765
766 if (!xbridge) {
767 return;
768 }
769
770 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
84f0f298 771 xlate_xport_remove(xcfg, xport);
46c88433
EJ
772 }
773
774 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
84f0f298 775 xlate_xbundle_remove(xcfg, xbundle);
46c88433
EJ
776 }
777
84f0f298 778 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
795cc5c1 779 mac_learning_unref(xbridge->ml);
6d95c4e8 780 mcast_snooping_unref(xbridge->ms);
795cc5c1
EJ
781 mbridge_unref(xbridge->mbridge);
782 dpif_sflow_unref(xbridge->sflow);
783 dpif_ipfix_unref(xbridge->ipfix);
784 stp_unref(xbridge->stp);
9efd308e 785 rstp_unref(xbridge->rstp);
795cc5c1 786 hmap_destroy(&xbridge->xports);
46c88433
EJ
787 free(xbridge->name);
788 free(xbridge);
789}
790
84f0f298
RW
791void
792xlate_remove_ofproto(struct ofproto_dpif *ofproto)
793{
794 struct xbridge *xbridge;
795
796 ovs_assert(new_xcfg);
797
798 xbridge = xbridge_lookup(new_xcfg, ofproto);
799 xlate_xbridge_remove(new_xcfg, xbridge);
800}
801
46c88433
EJ
802void
803xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
804 const char *name, enum port_vlan_mode vlan_mode, int vlan,
805 unsigned long *trunks, bool use_priority_tags,
806 const struct bond *bond, const struct lacp *lacp,
807 bool floodable)
808{
84f0f298 809 struct xbundle *xbundle;
46c88433 810
84f0f298
RW
811 ovs_assert(new_xcfg);
812
813 xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433
EJ
814 if (!xbundle) {
815 xbundle = xzalloc(sizeof *xbundle);
816 xbundle->ofbundle = ofbundle;
84f0f298 817 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433 818
84f0f298 819 xlate_xbundle_init(new_xcfg, xbundle);
46c88433
EJ
820 }
821
46c88433
EJ
822 free(xbundle->name);
823 xbundle->name = xstrdup(name);
824
84f0f298
RW
825 xlate_xbundle_set(xbundle, vlan_mode, vlan, trunks,
826 use_priority_tags, bond, lacp, floodable);
46c88433
EJ
827}
828
84f0f298
RW
829static void
830xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
46c88433 831{
46c88433
EJ
832 struct xport *xport, *next;
833
834 if (!xbundle) {
835 return;
836 }
837
838 LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
839 list_remove(&xport->bundle_node);
840 xport->xbundle = NULL;
841 }
842
84f0f298 843 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
46c88433
EJ
844 list_remove(&xbundle->list_node);
845 bond_unref(xbundle->bond);
846 lacp_unref(xbundle->lacp);
847 free(xbundle->name);
848 free(xbundle);
849}
850
84f0f298
RW
851void
852xlate_bundle_remove(struct ofbundle *ofbundle)
853{
854 struct xbundle *xbundle;
855
856 ovs_assert(new_xcfg);
857
858 xbundle = xbundle_lookup(new_xcfg, ofbundle);
859 xlate_xbundle_remove(new_xcfg, xbundle);
860}
861
46c88433
EJ
862void
863xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
864 struct ofport_dpif *ofport, ofp_port_t ofp_port,
865 odp_port_t odp_port, const struct netdev *netdev,
866 const struct cfm *cfm, const struct bfd *bfd,
f025bcb7
JR
867 struct ofport_dpif *peer, int stp_port_no,
868 const struct rstp_port *rstp_port,
55954f6e 869 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
dd8cd4b4
SH
870 enum ofputil_port_config config,
871 enum ofputil_port_state state, bool is_tunnel,
9d189a50 872 bool may_enable)
46c88433 873{
55954f6e 874 size_t i;
84f0f298
RW
875 struct xport *xport;
876
877 ovs_assert(new_xcfg);
46c88433 878
84f0f298 879 xport = xport_lookup(new_xcfg, ofport);
46c88433
EJ
880 if (!xport) {
881 xport = xzalloc(sizeof *xport);
882 xport->ofport = ofport;
84f0f298 883 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
46c88433
EJ
884 xport->ofp_port = ofp_port;
885
84f0f298 886 xlate_xport_init(new_xcfg, xport);
46c88433
EJ
887 }
888
889 ovs_assert(xport->ofp_port == ofp_port);
890
9efd308e 891 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, stp_port_no,
f025bcb7 892 rstp_port, config, state, is_tunnel, may_enable);
46c88433
EJ
893
894 if (xport->peer) {
895 xport->peer->peer = NULL;
896 }
84f0f298 897 xport->peer = xport_lookup(new_xcfg, peer);
46c88433
EJ
898 if (xport->peer) {
899 xport->peer->peer = xport;
900 }
901
902 if (xport->xbundle) {
903 list_remove(&xport->bundle_node);
904 }
84f0f298 905 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
46c88433
EJ
906 if (xport->xbundle) {
907 list_insert(&xport->xbundle->xports, &xport->bundle_node);
908 }
55954f6e
EJ
909
910 clear_skb_priorities(xport);
911 for (i = 0; i < n_qdscp; i++) {
912 struct skb_priority_to_dscp *pdscp;
913 uint32_t skb_priority;
914
89a8a7f0
EJ
915 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
916 &skb_priority)) {
55954f6e
EJ
917 continue;
918 }
919
920 pdscp = xmalloc(sizeof *pdscp);
921 pdscp->skb_priority = skb_priority;
922 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
923 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
924 hash_int(pdscp->skb_priority, 0));
925 }
46c88433
EJ
926}
927
84f0f298
RW
928static void
929xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
46c88433 930{
46c88433
EJ
931 if (!xport) {
932 return;
933 }
934
935 if (xport->peer) {
936 xport->peer->peer = NULL;
937 xport->peer = NULL;
938 }
939
e621a12d
EJ
940 if (xport->xbundle) {
941 list_remove(&xport->bundle_node);
942 }
943
55954f6e
EJ
944 clear_skb_priorities(xport);
945 hmap_destroy(&xport->skb_priorities);
946
84f0f298 947 hmap_remove(&xcfg->xports, &xport->hmap_node);
46c88433
EJ
948 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
949
950 netdev_close(xport->netdev);
f025bcb7 951 rstp_port_unref(xport->rstp_port);
46c88433
EJ
952 cfm_unref(xport->cfm);
953 bfd_unref(xport->bfd);
954 free(xport);
955}
956
84f0f298
RW
957void
958xlate_ofport_remove(struct ofport_dpif *ofport)
959{
960 struct xport *xport;
961
962 ovs_assert(new_xcfg);
963
964 xport = xport_lookup(new_xcfg, ofport);
965 xlate_xport_remove(new_xcfg, xport);
966}
967
ef377a58
JR
968/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
969 * returns the corresponding struct xport, or NULL if none is found. */
970static struct xport *
971xlate_lookup_xport(const struct dpif_backer *backer, const struct flow *flow)
972{
973 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
974
975 return xport_lookup(xcfg, tnl_port_should_receive(flow)
976 ? tnl_port_receive(flow)
977 : odp_port_to_ofport(backer, flow->in_port.odp_port));
978}
979
980static struct ofproto_dpif *
981xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
982 ofp_port_t *ofp_in_port, const struct xport **xportp)
983{
f9038ef6
AW
984 struct ofproto_dpif *recv_ofproto = NULL;
985 struct ofproto_dpif *recirc_ofproto = NULL;
ef377a58 986 const struct xport *xport;
f9038ef6 987 ofp_port_t in_port = OFPP_NONE;
ef377a58
JR
988
989 *xportp = xport = xlate_lookup_xport(backer, flow);
990
991 if (xport) {
f9038ef6
AW
992 recv_ofproto = xport->xbridge->ofproto;
993 in_port = xport->ofp_port;
994 }
995
996 /* When recirc_id is set in 'flow', checks whether the ofproto_dpif that
997 * corresponds to the recirc_id is same as the receiving bridge. If they
998 * are the same, uses the 'recv_ofproto' and keeps the 'ofp_in_port' as
999 * assigned. Otherwise, uses the 'recirc_ofproto' that owns recirc_id and
1000 * assigns OFPP_NONE to 'ofp_in_port'. Doing this is in that, the
1001 * recirculated flow must be processced by the ofproto which originates
1002 * the recirculation, and as bridges can only see their own ports, the
1003 * in_port of the 'recv_ofproto' should not be passed to the
1004 * 'recirc_ofproto'.
1005 *
1006 * Admittedly, setting the 'ofp_in_port' to OFPP_NONE limits the
1007 * 'recirc_ofproto' from meaningfully matching on in_port of recirculated
1008 * flow, and should be fixed in the near future.
1009 *
1010 * TODO: Restore the original patch port.
1011 */
1012 if (recv_ofproto && flow->recirc_id) {
1013 recirc_ofproto = ofproto_dpif_recirc_get_ofproto(backer,
1014 flow->recirc_id);
1015 if (recv_ofproto != recirc_ofproto) {
1016 *xportp = xport = NULL;
1017 in_port = OFPP_NONE;
ef377a58 1018 }
ef377a58
JR
1019 }
1020
f9038ef6
AW
1021 if (ofp_in_port) {
1022 *ofp_in_port = in_port;
1023 }
1024
1025 return xport ? recv_ofproto : recirc_ofproto;
ef377a58
JR
1026}
1027
1028/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
1029 * returns the corresponding struct ofproto_dpif and OpenFlow port number. */
1030struct ofproto_dpif *
1031xlate_lookup_ofproto(const struct dpif_backer *backer, const struct flow *flow,
1032 ofp_port_t *ofp_in_port)
1033{
1034 const struct xport *xport;
1035
1036 return xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
1037}
1038
cc377352 1039/* Given a datapath and flow metadata ('backer', and 'flow' respectively),
ef377a58 1040 * optionally populates 'ofproto' with the ofproto_dpif, 'ofp_in_port' with the
cc377352 1041 * openflow in_port, and 'ipfix', 'sflow', and 'netflow' with the appropriate
dcc2c6cd
JR
1042 * handles for those protocols if they're enabled. Caller may use the returned
1043 * pointers until quiescing, for longer term use additional references must
1044 * be taken.
8449c4d6 1045 *
f9038ef6 1046 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
ef377a58 1047 */
8449c4d6 1048int
5c476ea3
JR
1049xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
1050 struct ofproto_dpif **ofprotop, struct dpif_ipfix **ipfix,
1051 struct dpif_sflow **sflow, struct netflow **netflow,
1052 ofp_port_t *ofp_in_port)
8449c4d6 1053{
ef377a58 1054 struct ofproto_dpif *ofproto;
84f0f298 1055 const struct xport *xport;
8449c4d6 1056
ef377a58 1057 ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
8449c4d6 1058
f9038ef6 1059 if (!ofproto) {
cc377352 1060 return ENODEV;
8449c4d6 1061 }
8449c4d6 1062
ef377a58
JR
1063 if (ofprotop) {
1064 *ofprotop = ofproto;
8449c4d6
EJ
1065 }
1066
1dfdb9b3 1067 if (ipfix) {
f9038ef6 1068 *ipfix = xport ? xport->xbridge->ipfix : NULL;
1dfdb9b3
EJ
1069 }
1070
1071 if (sflow) {
f9038ef6 1072 *sflow = xport ? xport->xbridge->sflow : NULL;
1dfdb9b3
EJ
1073 }
1074
1075 if (netflow) {
f9038ef6 1076 *netflow = xport ? xport->xbridge->netflow : NULL;
1dfdb9b3 1077 }
f9038ef6 1078
cc377352 1079 return 0;
8449c4d6
EJ
1080}
1081
46c88433 1082static struct xbridge *
84f0f298 1083xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
46c88433 1084{
84f0f298 1085 struct hmap *xbridges;
46c88433
EJ
1086 struct xbridge *xbridge;
1087
84f0f298 1088 if (!ofproto || !xcfg) {
5e6af486
EJ
1089 return NULL;
1090 }
1091
84f0f298
RW
1092 xbridges = &xcfg->xbridges;
1093
46c88433 1094 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
84f0f298 1095 xbridges) {
46c88433
EJ
1096 if (xbridge->ofproto == ofproto) {
1097 return xbridge;
1098 }
1099 }
1100 return NULL;
1101}
1102
1103static struct xbundle *
84f0f298 1104xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
46c88433 1105{
84f0f298 1106 struct hmap *xbundles;
46c88433
EJ
1107 struct xbundle *xbundle;
1108
84f0f298 1109 if (!ofbundle || !xcfg) {
5e6af486
EJ
1110 return NULL;
1111 }
1112
84f0f298
RW
1113 xbundles = &xcfg->xbundles;
1114
46c88433 1115 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
84f0f298 1116 xbundles) {
46c88433
EJ
1117 if (xbundle->ofbundle == ofbundle) {
1118 return xbundle;
1119 }
1120 }
1121 return NULL;
1122}
1123
1124static struct xport *
84f0f298 1125xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
46c88433 1126{
84f0f298 1127 struct hmap *xports;
46c88433
EJ
1128 struct xport *xport;
1129
84f0f298 1130 if (!ofport || !xcfg) {
5e6af486
EJ
1131 return NULL;
1132 }
1133
84f0f298
RW
1134 xports = &xcfg->xports;
1135
46c88433 1136 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
84f0f298 1137 xports) {
46c88433
EJ
1138 if (xport->ofport == ofport) {
1139 return xport;
1140 }
1141 }
1142 return NULL;
1143}
1144
40085e56
EJ
1145static struct stp_port *
1146xport_get_stp_port(const struct xport *xport)
1147{
92cf817b 1148 return xport->xbridge->stp && xport->stp_port_no != -1
40085e56
EJ
1149 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1150 : NULL;
1151}
9d189a50 1152
0d1cee12 1153static bool
9d189a50
EJ
1154xport_stp_learn_state(const struct xport *xport)
1155{
40085e56 1156 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1157 return sp
1158 ? stp_learn_in_state(stp_port_get_state(sp))
1159 : true;
9d189a50
EJ
1160}
1161
1162static bool
1163xport_stp_forward_state(const struct xport *xport)
1164{
40085e56 1165 struct stp_port *sp = xport_get_stp_port(xport);
4b5f1996
DV
1166 return sp
1167 ? stp_forward_in_state(stp_port_get_state(sp))
1168 : true;
9d189a50
EJ
1169}
1170
0d1cee12 1171static bool
bacdb85a 1172xport_stp_should_forward_bpdu(const struct xport *xport)
0d1cee12
K
1173{
1174 struct stp_port *sp = xport_get_stp_port(xport);
bacdb85a 1175 return stp_should_forward_bpdu(sp ? stp_port_get_state(sp) : STP_DISABLED);
0d1cee12
K
1176}
1177
9d189a50
EJ
1178/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1179 * were used to make the determination.*/
1180static bool
1181stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1182{
bbbca389 1183 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
9d189a50 1184 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
bbbca389 1185 return is_stp(flow);
9d189a50
EJ
1186}
1187
1188static void
1189stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
1190{
40085e56 1191 struct stp_port *sp = xport_get_stp_port(xport);
9d189a50 1192 struct ofpbuf payload = *packet;
1f317cb5 1193 struct eth_header *eth = ofpbuf_data(&payload);
9d189a50
EJ
1194
1195 /* Sink packets on ports that have STP disabled when the bridge has
1196 * STP enabled. */
1197 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1198 return;
1199 }
1200
1201 /* Trim off padding on payload. */
1f317cb5
PS
1202 if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1203 ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
9d189a50
EJ
1204 }
1205
1206 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1f317cb5 1207 stp_received_bpdu(sp, ofpbuf_data(&payload), ofpbuf_size(&payload));
9d189a50
EJ
1208 }
1209}
1210
f025bcb7
JR
1211static enum rstp_state
1212xport_get_rstp_port_state(const struct xport *xport)
9efd308e 1213{
f025bcb7
JR
1214 return xport->rstp_port
1215 ? rstp_port_get_state(xport->rstp_port)
1216 : RSTP_DISABLED;
9efd308e
DV
1217}
1218
1219static bool
1220xport_rstp_learn_state(const struct xport *xport)
1221{
4b5f1996
DV
1222 return xport->xbridge->rstp && xport->rstp_port
1223 ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
1224 : true;
9efd308e
DV
1225}
1226
1227static bool
1228xport_rstp_forward_state(const struct xport *xport)
1229{
4b5f1996
DV
1230 return xport->xbridge->rstp && xport->rstp_port
1231 ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
1232 : true;
9efd308e
DV
1233}
1234
1235static bool
1236xport_rstp_should_manage_bpdu(const struct xport *xport)
1237{
f025bcb7 1238 return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
9efd308e
DV
1239}
1240
1241static void
1242rstp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
1243{
9efd308e
DV
1244 struct ofpbuf payload = *packet;
1245 struct eth_header *eth = ofpbuf_data(&payload);
1246
f025bcb7
JR
1247 /* Sink packets on ports that have no RSTP. */
1248 if (!xport->rstp_port) {
9efd308e
DV
1249 return;
1250 }
1251
1252 /* Trim off padding on payload. */
1253 if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1254 ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1255 }
1256
1257 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
f025bcb7
JR
1258 rstp_port_received_bpdu(xport->rstp_port, ofpbuf_data(&payload),
1259 ofpbuf_size(&payload));
9efd308e
DV
1260 }
1261}
1262
46c88433
EJ
1263static struct xport *
1264get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1265{
1266 struct xport *xport;
1267
1268 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1269 &xbridge->xports) {
1270 if (xport->ofp_port == ofp_port) {
1271 return xport;
1272 }
1273 }
1274 return NULL;
1275}
1276
1277static odp_port_t
1278ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1279{
1280 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1281 return xport ? xport->odp_port : ODPP_NONE;
1282}
1283
dd8cd4b4
SH
1284static bool
1285odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1286{
086fa873
BP
1287 struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1288 return xport && xport->may_enable;
dd8cd4b4
SH
1289}
1290
1e684d7d 1291static struct ofputil_bucket *
dd8cd4b4
SH
1292group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1293 int depth);
1294
1295static bool
1296group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1297{
1298 struct group_dpif *group;
dd8cd4b4 1299
dc25893e
AZ
1300 if (group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group)) {
1301 struct ofputil_bucket *bucket;
dd8cd4b4 1302
dc25893e
AZ
1303 bucket = group_first_live_bucket(ctx, group, depth);
1304 group_dpif_unref(group);
1305 return bucket == NULL;
1306 }
dd8cd4b4 1307
dc25893e 1308 return false;
dd8cd4b4
SH
1309}
1310
1311#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1312
1313static bool
1314bucket_is_alive(const struct xlate_ctx *ctx,
1e684d7d 1315 struct ofputil_bucket *bucket, int depth)
dd8cd4b4
SH
1316{
1317 if (depth >= MAX_LIVENESS_RECURSION) {
1318 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
1319
1320 VLOG_WARN_RL(&rl, "bucket chaining exceeded %d links",
1321 MAX_LIVENESS_RECURSION);
1322 return false;
1323 }
1324
fdb1999b
AZ
1325 return (!ofputil_bucket_has_liveness(bucket)
1326 || (bucket->watch_port != OFPP_ANY
1327 && odp_port_is_alive(ctx, bucket->watch_port))
1328 || (bucket->watch_group != OFPG_ANY
1329 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
dd8cd4b4
SH
1330}
1331
1e684d7d 1332static struct ofputil_bucket *
dd8cd4b4
SH
1333group_first_live_bucket(const struct xlate_ctx *ctx,
1334 const struct group_dpif *group, int depth)
1335{
1336 struct ofputil_bucket *bucket;
ca6ba700 1337 const struct ovs_list *buckets;
dd8cd4b4
SH
1338
1339 group_dpif_get_buckets(group, &buckets);
1340 LIST_FOR_EACH (bucket, list_node, buckets) {
1341 if (bucket_is_alive(ctx, bucket, depth)) {
1342 return bucket;
1343 }
1344 }
1345
1346 return NULL;
1347}
1348
1e684d7d 1349static struct ofputil_bucket *
fe7e5749
SH
1350group_best_live_bucket(const struct xlate_ctx *ctx,
1351 const struct group_dpif *group,
1352 uint32_t basis)
1353{
1e684d7d 1354 struct ofputil_bucket *best_bucket = NULL;
fe7e5749
SH
1355 uint32_t best_score = 0;
1356 int i = 0;
1357
1e684d7d 1358 struct ofputil_bucket *bucket;
ca6ba700 1359 const struct ovs_list *buckets;
fe7e5749
SH
1360
1361 group_dpif_get_buckets(group, &buckets);
1362 LIST_FOR_EACH (bucket, list_node, buckets) {
1363 if (bucket_is_alive(ctx, bucket, 0)) {
7cb279c2 1364 uint32_t score = (hash_int(i, basis) & 0xffff) * bucket->weight;
fe7e5749
SH
1365 if (score >= best_score) {
1366 best_bucket = bucket;
1367 best_score = score;
1368 }
1369 }
1370 i++;
1371 }
1372
1373 return best_bucket;
1374}
1375
9583bc14 1376static bool
46c88433 1377xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
9583bc14
EJ
1378{
1379 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1380 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1381}
1382
1383static bool
46c88433
EJ
1384xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
1385{
1386 return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
1387}
1388
1389static mirror_mask_t
1390xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1391{
1392 return xbundle != &ofpp_none_bundle
1393 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1394 : 0;
1395}
1396
1397static mirror_mask_t
1398xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1399{
46c88433
EJ
1400 return xbundle != &ofpp_none_bundle
1401 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1402 : 0;
9583bc14
EJ
1403}
1404
46c88433
EJ
1405static mirror_mask_t
1406xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
9583bc14 1407{
46c88433
EJ
1408 return xbundle != &ofpp_none_bundle
1409 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
1410 : 0;
1411}
1412
1413static struct xbundle *
1414lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
1415 bool warn, struct xport **in_xportp)
1416{
1417 struct xport *xport;
9583bc14
EJ
1418
1419 /* Find the port and bundle for the received packet. */
46c88433
EJ
1420 xport = get_ofp_port(xbridge, in_port);
1421 if (in_xportp) {
1422 *in_xportp = xport;
9583bc14 1423 }
46c88433
EJ
1424 if (xport && xport->xbundle) {
1425 return xport->xbundle;
9583bc14
EJ
1426 }
1427
6362203b
YT
1428 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1429 * which a controller may use as the ingress port for traffic that
1430 * it is sourcing. */
1431 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
9583bc14
EJ
1432 return &ofpp_none_bundle;
1433 }
1434
1435 /* Odd. A few possible reasons here:
1436 *
1437 * - We deleted a port but there are still a few packets queued up
1438 * from it.
1439 *
1440 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1441 * we don't know about.
1442 *
1443 * - The ofproto client didn't configure the port as part of a bundle.
1444 * This is particularly likely to happen if a packet was received on the
1445 * port after it was created, but before the client had a chance to
1446 * configure its bundle.
1447 */
1448 if (warn) {
1449 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1450
1451 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
46c88433 1452 "port %"PRIu16, xbridge->name, in_port);
9583bc14
EJ
1453 }
1454 return NULL;
1455}
1456
1457static void
1458add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
1459{
46c88433 1460 const struct xbridge *xbridge = ctx->xbridge;
9583bc14 1461 mirror_mask_t mirrors;
46c88433 1462 struct xbundle *in_xbundle;
9583bc14
EJ
1463 uint16_t vlan;
1464 uint16_t vid;
cdf5d3a5
EJ
1465
1466 mirrors = ctx->xout->mirrors;
1467 ctx->xout->mirrors = 0;
9583bc14 1468
46c88433
EJ
1469 in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
1470 ctx->xin->packet != NULL, NULL);
1471 if (!in_xbundle) {
9583bc14
EJ
1472 return;
1473 }
46c88433 1474 mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
9583bc14
EJ
1475
1476 /* Drop frames on bundles reserved for mirroring. */
46c88433 1477 if (xbundle_mirror_out(xbridge, in_xbundle)) {
9583bc14
EJ
1478 if (ctx->xin->packet != NULL) {
1479 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1480 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
1481 "%s, which is reserved exclusively for mirroring",
46c88433 1482 ctx->xbridge->name, in_xbundle->name);
9583bc14 1483 }
cc377352 1484 ofpbuf_clear(ctx->xout->odp_actions);
9583bc14
EJ
1485 return;
1486 }
1487
1488 /* Check VLAN. */
1489 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
46c88433 1490 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
9583bc14
EJ
1491 return;
1492 }
46c88433 1493 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14 1494
9583bc14
EJ
1495 if (!mirrors) {
1496 return;
1497 }
1498
1499 /* Restore the original packet before adding the mirror actions. */
1500 ctx->xin->flow = *orig_flow;
1501
1502 while (mirrors) {
ec7ceaed
EJ
1503 mirror_mask_t dup_mirrors;
1504 struct ofbundle *out;
1505 unsigned long *vlans;
1506 bool vlan_mirrored;
1507 bool has_mirror;
1508 int out_vlan;
1509
4b6ab2b0 1510 has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
ec7ceaed
EJ
1511 &vlans, &dup_mirrors, &out, &out_vlan);
1512 ovs_assert(has_mirror);
1513
1514 if (vlans) {
9583bc14
EJ
1515 ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
1516 }
ec7ceaed
EJ
1517 vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
1518 free(vlans);
9583bc14 1519
ec7ceaed 1520 if (!vlan_mirrored) {
9583bc14
EJ
1521 mirrors = zero_rightmost_1bit(mirrors);
1522 continue;
1523 }
1524
ec7ceaed
EJ
1525 mirrors &= ~dup_mirrors;
1526 ctx->xout->mirrors |= dup_mirrors;
1527 if (out) {
84f0f298
RW
1528 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1529 struct xbundle *out_xbundle = xbundle_lookup(xcfg, out);
46c88433
EJ
1530 if (out_xbundle) {
1531 output_normal(ctx, out_xbundle, vlan);
1532 }
ec7ceaed 1533 } else if (vlan != out_vlan
9583bc14 1534 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
46c88433 1535 struct xbundle *xbundle;
9583bc14 1536
46c88433
EJ
1537 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
1538 if (xbundle_includes_vlan(xbundle, out_vlan)
1539 && !xbundle_mirror_out(xbridge, xbundle)) {
1540 output_normal(ctx, xbundle, out_vlan);
9583bc14
EJ
1541 }
1542 }
1543 }
1544 }
1545}
1546
1547/* Given 'vid', the VID obtained from the 802.1Q header that was received as
46c88433 1548 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
9583bc14
EJ
1549 * the bundle on which the packet was received, returns the VLAN to which the
1550 * packet belongs.
1551 *
1552 * Both 'vid' and the return value are in the range 0...4095. */
1553static uint16_t
46c88433 1554input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
9583bc14 1555{
46c88433 1556 switch (in_xbundle->vlan_mode) {
9583bc14 1557 case PORT_VLAN_ACCESS:
46c88433 1558 return in_xbundle->vlan;
9583bc14
EJ
1559 break;
1560
1561 case PORT_VLAN_TRUNK:
1562 return vid;
1563
1564 case PORT_VLAN_NATIVE_UNTAGGED:
1565 case PORT_VLAN_NATIVE_TAGGED:
46c88433 1566 return vid ? vid : in_xbundle->vlan;
9583bc14
EJ
1567
1568 default:
428b2edd 1569 OVS_NOT_REACHED();
9583bc14
EJ
1570 }
1571}
1572
46c88433 1573/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
9583bc14
EJ
1574 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
1575 * a warning.
1576 *
1577 * 'vid' should be the VID obtained from the 802.1Q header that was received as
1578 * part of a packet (specify 0 if there was no 802.1Q header), in the range
1579 * 0...4095. */
1580static bool
46c88433 1581input_vid_is_valid(uint16_t vid, struct xbundle *in_xbundle, bool warn)
9583bc14
EJ
1582{
1583 /* Allow any VID on the OFPP_NONE port. */
46c88433 1584 if (in_xbundle == &ofpp_none_bundle) {
9583bc14
EJ
1585 return true;
1586 }
1587
46c88433 1588 switch (in_xbundle->vlan_mode) {
9583bc14
EJ
1589 case PORT_VLAN_ACCESS:
1590 if (vid) {
1591 if (warn) {
1592 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
46c88433 1593 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" tagged "
9583bc14 1594 "packet received on port %s configured as VLAN "
46c88433
EJ
1595 "%"PRIu16" access port", vid, in_xbundle->name,
1596 in_xbundle->vlan);
9583bc14
EJ
1597 }
1598 return false;
1599 }
1600 return true;
1601
1602 case PORT_VLAN_NATIVE_UNTAGGED:
1603 case PORT_VLAN_NATIVE_TAGGED:
1604 if (!vid) {
1605 /* Port must always carry its native VLAN. */
1606 return true;
1607 }
1608 /* Fall through. */
1609 case PORT_VLAN_TRUNK:
46c88433 1610 if (!xbundle_includes_vlan(in_xbundle, vid)) {
9583bc14
EJ
1611 if (warn) {
1612 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
46c88433 1613 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" packet "
9583bc14 1614 "received on port %s not configured for trunking "
46c88433 1615 "VLAN %"PRIu16, vid, in_xbundle->name, vid);
9583bc14
EJ
1616 }
1617 return false;
1618 }
1619 return true;
1620
1621 default:
428b2edd 1622 OVS_NOT_REACHED();
9583bc14
EJ
1623 }
1624
1625}
1626
1627/* Given 'vlan', the VLAN that a packet belongs to, and
46c88433 1628 * 'out_xbundle', a bundle on which the packet is to be output, returns the VID
9583bc14
EJ
1629 * that should be included in the 802.1Q header. (If the return value is 0,
1630 * then the 802.1Q header should only be included in the packet if there is a
1631 * nonzero PCP.)
1632 *
1633 * Both 'vlan' and the return value are in the range 0...4095. */
1634static uint16_t
46c88433 1635output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
9583bc14 1636{
46c88433 1637 switch (out_xbundle->vlan_mode) {
9583bc14
EJ
1638 case PORT_VLAN_ACCESS:
1639 return 0;
1640
1641 case PORT_VLAN_TRUNK:
1642 case PORT_VLAN_NATIVE_TAGGED:
1643 return vlan;
1644
1645 case PORT_VLAN_NATIVE_UNTAGGED:
46c88433 1646 return vlan == out_xbundle->vlan ? 0 : vlan;
9583bc14
EJ
1647
1648 default:
428b2edd 1649 OVS_NOT_REACHED();
9583bc14
EJ
1650 }
1651}
1652
1653static void
46c88433 1654output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
9583bc14
EJ
1655 uint16_t vlan)
1656{
33bf9176 1657 ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
9583bc14
EJ
1658 uint16_t vid;
1659 ovs_be16 tci, old_tci;
46c88433 1660 struct xport *xport;
9583bc14 1661
46c88433
EJ
1662 vid = output_vlan_to_vid(out_xbundle, vlan);
1663 if (list_is_empty(&out_xbundle->xports)) {
1664 /* Partially configured bundle with no slaves. Drop the packet. */
1665 return;
1666 } else if (!out_xbundle->bond) {
92c08f09 1667 ctx->use_recirc = false;
46c88433
EJ
1668 xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
1669 bundle_node);
9583bc14 1670 } else {
84f0f298 1671 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
54ecb5a2 1672 struct flow_wildcards *wc = &ctx->xout->wc;
84f0f298
RW
1673 struct xlate_recirc *xr = &ctx->recirc;
1674 struct ofport_dpif *ofport;
adcf00ba
AZ
1675
1676 if (ctx->xbridge->enable_recirc) {
92c08f09 1677 ctx->use_recirc = bond_may_recirc(
62ac1f20 1678 out_xbundle->bond, &xr->recirc_id, &xr->hash_basis);
adcf00ba 1679
92c08f09 1680 if (ctx->use_recirc) {
adcf00ba 1681 /* Only TCP mode uses recirculation. */
347bf289 1682 xr->hash_alg = OVS_HASH_ALG_L4;
adcf00ba 1683 bond_update_post_recirc_rules(out_xbundle->bond, false);
54ecb5a2
AZ
1684
1685 /* Recirculation does not require unmasking hash fields. */
1686 wc = NULL;
adcf00ba
AZ
1687 }
1688 }
46c88433 1689
54ecb5a2
AZ
1690 ofport = bond_choose_output_slave(out_xbundle->bond,
1691 &ctx->xin->flow, wc, vid);
84f0f298 1692 xport = xport_lookup(xcfg, ofport);
46c88433
EJ
1693
1694 if (!xport) {
9583bc14
EJ
1695 /* No slaves enabled, so drop packet. */
1696 return;
1697 }
d6fc5f57 1698
b256dc52
JS
1699 /* If ctx->xout->use_recirc is set, the main thread will handle stats
1700 * accounting for this bond. */
92c08f09 1701 if (!ctx->use_recirc) {
b256dc52
JS
1702 if (ctx->xin->resubmit_stats) {
1703 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
1704 ctx->xin->resubmit_stats->n_bytes);
1705 }
1706 if (ctx->xin->xcache) {
1707 struct xc_entry *entry;
1708 struct flow *flow;
1709
1710 flow = &ctx->xin->flow;
1711 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
1712 entry->u.bond.bond = bond_ref(out_xbundle->bond);
1713 entry->u.bond.flow = xmemdup(flow, sizeof *flow);
1714 entry->u.bond.vid = vid;
1715 }
d6fc5f57 1716 }
9583bc14
EJ
1717 }
1718
33bf9176 1719 old_tci = *flow_tci;
9583bc14 1720 tci = htons(vid);
46c88433 1721 if (tci || out_xbundle->use_priority_tags) {
33bf9176 1722 tci |= *flow_tci & htons(VLAN_PCP_MASK);
9583bc14
EJ
1723 if (tci) {
1724 tci |= htons(VLAN_CFI);
1725 }
1726 }
33bf9176 1727 *flow_tci = tci;
9583bc14 1728
46c88433 1729 compose_output_action(ctx, xport->ofp_port);
33bf9176 1730 *flow_tci = old_tci;
9583bc14
EJ
1731}
1732
1733/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
1734 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
1735 * indicate this; newer upstream kernels use gratuitous ARP requests. */
1736static bool
1737is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
1738{
1739 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
1740 return false;
1741 }
1742
1743 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1744 if (!eth_addr_is_broadcast(flow->dl_dst)) {
1745 return false;
1746 }
1747
1748 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1749 if (flow->nw_proto == ARP_OP_REPLY) {
1750 return true;
1751 } else if (flow->nw_proto == ARP_OP_REQUEST) {
1752 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1753 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1754
1755 return flow->nw_src == flow->nw_dst;
1756 } else {
1757 return false;
1758 }
1759}
1760
ff69c24a
FL
1761/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
1762 * dropped. Returns true if they may be forwarded, false if they should be
1763 * dropped.
1764 *
1765 * 'in_port' must be the xport that corresponds to flow->in_port.
1766 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
1767 *
1768 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
1769 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
1770 * checked by input_vid_is_valid().
1771 *
1772 * May also add tags to '*tags', although the current implementation only does
1773 * so in one special case.
1774 */
1775static bool
1776is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
1777 uint16_t vlan)
1778{
1779 struct xbundle *in_xbundle = in_port->xbundle;
1780 const struct xbridge *xbridge = ctx->xbridge;
1781 struct flow *flow = &ctx->xin->flow;
1782
1783 /* Drop frames for reserved multicast addresses
1784 * only if forward_bpdu option is absent. */
1785 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
1786 xlate_report(ctx, "packet has reserved destination MAC, dropping");
1787 return false;
1788 }
1789
1790 if (in_xbundle->bond) {
1791 struct mac_entry *mac;
1792
1793 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
1794 flow->dl_dst)) {
1795 case BV_ACCEPT:
1796 break;
1797
1798 case BV_DROP:
1799 xlate_report(ctx, "bonding refused admissibility, dropping");
1800 return false;
1801
1802 case BV_DROP_IF_MOVED:
1803 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
1804 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
9d078ec2
BP
1805 if (mac
1806 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
1807 && (!is_gratuitous_arp(flow, &ctx->xout->wc)
1808 || mac_entry_is_grat_arp_locked(mac))) {
ff69c24a
FL
1809 ovs_rwlock_unlock(&xbridge->ml->rwlock);
1810 xlate_report(ctx, "SLB bond thinks this packet looped back, "
1811 "dropping");
1812 return false;
1813 }
1814 ovs_rwlock_unlock(&xbridge->ml->rwlock);
1815 break;
1816 }
1817 }
1818
1819 return true;
1820}
1821
ee047520
BP
1822/* Checks whether a MAC learning update is necessary for MAC learning table
1823 * 'ml' given that a packet matching 'flow' was received on 'in_xbundle' in
1824 * 'vlan'.
1825 *
1826 * Most packets processed through the MAC learning table do not actually
1827 * change it in any way. This function requires only a read lock on the MAC
1828 * learning table, so it is much cheaper in this common case.
1829 *
1830 * Keep the code here synchronized with that in update_learning_table__()
1831 * below. */
1832static bool
1833is_mac_learning_update_needed(const struct mac_learning *ml,
1834 const struct flow *flow,
1835 struct flow_wildcards *wc,
1836 int vlan, struct xbundle *in_xbundle)
d6fc5f57 1837OVS_REQ_RDLOCK(ml->rwlock)
9583bc14
EJ
1838{
1839 struct mac_entry *mac;
1840
ee047520
BP
1841 if (!mac_learning_may_learn(ml, flow->dl_src, vlan)) {
1842 return false;
1843 }
1844
1845 mac = mac_learning_lookup(ml, flow->dl_src, vlan);
1846 if (!mac || mac_entry_age(ml, mac)) {
1847 return true;
9583bc14
EJ
1848 }
1849
ee047520
BP
1850 if (is_gratuitous_arp(flow, wc)) {
1851 /* We don't want to learn from gratuitous ARP packets that are
1852 * reflected back over bond slaves so we lock the learning table. */
1853 if (!in_xbundle->bond) {
1854 return true;
1855 } else if (mac_entry_is_grat_arp_locked(mac)) {
1856 return false;
1857 }
1858 }
1859
9d078ec2 1860 return mac_entry_get_port(ml, mac) != in_xbundle->ofbundle;
ee047520
BP
1861}
1862
1863
1864/* Updates MAC learning table 'ml' given that a packet matching 'flow' was
1865 * received on 'in_xbundle' in 'vlan'.
1866 *
1867 * This code repeats all the checks in is_mac_learning_update_needed() because
1868 * the lock was released between there and here and thus the MAC learning state
1869 * could have changed.
1870 *
1871 * Keep the code here synchronized with that in is_mac_learning_update_needed()
1872 * above. */
1873static void
1874update_learning_table__(const struct xbridge *xbridge,
1875 const struct flow *flow, struct flow_wildcards *wc,
1876 int vlan, struct xbundle *in_xbundle)
d6fc5f57 1877OVS_REQ_WRLOCK(xbridge->ml->rwlock)
ee047520
BP
1878{
1879 struct mac_entry *mac;
1880
46c88433 1881 if (!mac_learning_may_learn(xbridge->ml, flow->dl_src, vlan)) {
ee047520 1882 return;
9583bc14
EJ
1883 }
1884
46c88433 1885 mac = mac_learning_insert(xbridge->ml, flow->dl_src, vlan);
9583bc14
EJ
1886 if (is_gratuitous_arp(flow, wc)) {
1887 /* We don't want to learn from gratuitous ARP packets that are
1888 * reflected back over bond slaves so we lock the learning table. */
46c88433 1889 if (!in_xbundle->bond) {
9583bc14
EJ
1890 mac_entry_set_grat_arp_lock(mac);
1891 } else if (mac_entry_is_grat_arp_locked(mac)) {
ee047520 1892 return;
9583bc14
EJ
1893 }
1894 }
1895
9d078ec2 1896 if (mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle) {
9583bc14
EJ
1897 /* The log messages here could actually be useful in debugging,
1898 * so keep the rate limit relatively high. */
1899 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
ee047520 1900
9583bc14
EJ
1901 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
1902 "on port %s in VLAN %d",
46c88433
EJ
1903 xbridge->name, ETH_ADDR_ARGS(flow->dl_src),
1904 in_xbundle->name, vlan);
9583bc14 1905
9d078ec2 1906 mac_entry_set_port(xbridge->ml, mac, in_xbundle->ofbundle);
9583bc14 1907 }
ee047520
BP
1908}
1909
1910static void
1911update_learning_table(const struct xbridge *xbridge,
1912 const struct flow *flow, struct flow_wildcards *wc,
1913 int vlan, struct xbundle *in_xbundle)
1914{
1915 bool need_update;
1916
1917 /* Don't learn the OFPP_NONE port. */
1918 if (in_xbundle == &ofpp_none_bundle) {
1919 return;
1920 }
1921
1922 /* First try the common case: no change to MAC learning table. */
1923 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
1924 need_update = is_mac_learning_update_needed(xbridge->ml, flow, wc, vlan,
1925 in_xbundle);
509c0149 1926 ovs_rwlock_unlock(&xbridge->ml->rwlock);
ee047520
BP
1927
1928 if (need_update) {
1929 /* Slow path: MAC learning table might need an update. */
1930 ovs_rwlock_wrlock(&xbridge->ml->rwlock);
1931 update_learning_table__(xbridge, flow, wc, vlan, in_xbundle);
1932 ovs_rwlock_unlock(&xbridge->ml->rwlock);
1933 }
9583bc14
EJ
1934}
1935
86e2dcdd
FL
1936/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
1937 * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
1938static void
1939update_mcast_snooping_table__(const struct xbridge *xbridge,
1940 const struct flow *flow,
1941 struct mcast_snooping *ms,
1942 ovs_be32 ip4, int vlan,
1943 struct xbundle *in_xbundle)
1944 OVS_REQ_WRLOCK(ms->rwlock)
1945{
1946 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 30);
1947
1948 switch (ntohs(flow->tp_src)) {
1949 case IGMP_HOST_MEMBERSHIP_REPORT:
1950 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1951 if (mcast_snooping_add_group(ms, ip4, vlan, in_xbundle->ofbundle)) {
1952 VLOG_DBG_RL(&rl, "bridge %s: multicast snooping learned that "
1953 IP_FMT" is on port %s in VLAN %d",
1954 xbridge->name, IP_ARGS(ip4), in_xbundle->name, vlan);
1955 }
1956 break;
1957 case IGMP_HOST_LEAVE_MESSAGE:
1958 if (mcast_snooping_leave_group(ms, ip4, vlan, in_xbundle->ofbundle)) {
1959 VLOG_DBG_RL(&rl, "bridge %s: multicast snooping leaving "
1960 IP_FMT" is on port %s in VLAN %d",
1961 xbridge->name, IP_ARGS(ip4), in_xbundle->name, vlan);
1962 }
1963 break;
1964 case IGMP_HOST_MEMBERSHIP_QUERY:
1965 if (flow->nw_src && mcast_snooping_add_mrouter(ms, vlan,
1966 in_xbundle->ofbundle)) {
1967 VLOG_DBG_RL(&rl, "bridge %s: multicast snooping query from "
1968 IP_FMT" is on port %s in VLAN %d",
1969 xbridge->name, IP_ARGS(flow->nw_src),
1970 in_xbundle->name, vlan);
1971 }
1972 break;
1973 }
1974}
1975
1976/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
1977 * was received on 'in_xbundle' in 'vlan'. */
1978static void
1979update_mcast_snooping_table(const struct xbridge *xbridge,
1980 const struct flow *flow, int vlan,
1981 struct xbundle *in_xbundle)
1982{
1983 struct mcast_snooping *ms = xbridge->ms;
1984 struct xlate_cfg *xcfg;
1985 struct xbundle *mcast_xbundle;
f4ae6e23 1986 struct mcast_port_bundle *fport;
86e2dcdd
FL
1987
1988 /* Don't learn the OFPP_NONE port. */
1989 if (in_xbundle == &ofpp_none_bundle) {
1990 return;
1991 }
1992
1993 /* Don't learn from flood ports */
1994 mcast_xbundle = NULL;
1995 ovs_rwlock_wrlock(&ms->rwlock);
1996 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
f4ae6e23 1997 LIST_FOR_EACH(fport, node, &ms->fport_list) {
86e2dcdd
FL
1998 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
1999 if (mcast_xbundle == in_xbundle) {
2000 break;
2001 }
2002 }
2003
2004 if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
2005 update_mcast_snooping_table__(xbridge, flow, ms, flow->igmp_group_ip4,
2006 vlan, in_xbundle);
2007 }
2008 ovs_rwlock_unlock(&ms->rwlock);
2009}
2010
2011/* send the packet to ports having the multicast group learned */
2012static void
2013xlate_normal_mcast_send_group(struct xlate_ctx *ctx,
2014 struct mcast_snooping *ms OVS_UNUSED,
2015 struct mcast_group *grp,
2016 struct xbundle *in_xbundle, uint16_t vlan)
2017 OVS_REQ_RDLOCK(ms->rwlock)
2018{
2019 struct xlate_cfg *xcfg;
2020 struct mcast_group_bundle *b;
2021 struct xbundle *mcast_xbundle;
2022
2023 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2024 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
2025 mcast_xbundle = xbundle_lookup(xcfg, b->port);
2026 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2027 xlate_report(ctx, "forwarding to mcast group port");
2028 output_normal(ctx, mcast_xbundle, vlan);
2029 } else if (!mcast_xbundle) {
2030 xlate_report(ctx, "mcast group port is unknown, dropping");
2031 } else {
2032 xlate_report(ctx, "mcast group port is input port, dropping");
2033 }
2034 }
2035}
2036
2037/* send the packet to ports connected to multicast routers */
2038static void
2039xlate_normal_mcast_send_mrouters(struct xlate_ctx *ctx,
2040 struct mcast_snooping *ms,
2041 struct xbundle *in_xbundle, uint16_t vlan)
2042 OVS_REQ_RDLOCK(ms->rwlock)
2043{
2044 struct xlate_cfg *xcfg;
2045 struct mcast_mrouter_bundle *mrouter;
2046 struct xbundle *mcast_xbundle;
2047
2048 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2049 LIST_FOR_EACH(mrouter, mrouter_node, &ms->mrouter_lru) {
2050 mcast_xbundle = xbundle_lookup(xcfg, mrouter->port);
2051 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2052 xlate_report(ctx, "forwarding to mcast router port");
2053 output_normal(ctx, mcast_xbundle, vlan);
2054 } else if (!mcast_xbundle) {
2055 xlate_report(ctx, "mcast router port is unknown, dropping");
2056 } else {
2057 xlate_report(ctx, "mcast router port is input port, dropping");
2058 }
2059 }
2060}
2061
2062/* send the packet to ports flagged to be flooded */
2063static void
2064xlate_normal_mcast_send_fports(struct xlate_ctx *ctx,
2065 struct mcast_snooping *ms,
2066 struct xbundle *in_xbundle, uint16_t vlan)
2067 OVS_REQ_RDLOCK(ms->rwlock)
2068{
2069 struct xlate_cfg *xcfg;
f4ae6e23 2070 struct mcast_port_bundle *fport;
86e2dcdd
FL
2071 struct xbundle *mcast_xbundle;
2072
2073 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
f4ae6e23 2074 LIST_FOR_EACH(fport, node, &ms->fport_list) {
86e2dcdd
FL
2075 mcast_xbundle = xbundle_lookup(xcfg, fport->port);
2076 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2077 xlate_report(ctx, "forwarding to mcast flood port");
2078 output_normal(ctx, mcast_xbundle, vlan);
2079 } else if (!mcast_xbundle) {
2080 xlate_report(ctx, "mcast flood port is unknown, dropping");
2081 } else {
2082 xlate_report(ctx, "mcast flood port is input port, dropping");
2083 }
2084 }
2085}
2086
8e04a33f
FL
2087/* forward the Reports to configured ports */
2088static void
2089xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
2090 struct mcast_snooping *ms,
2091 struct xbundle *in_xbundle, uint16_t vlan)
2092 OVS_REQ_RDLOCK(ms->rwlock)
2093{
2094 struct xlate_cfg *xcfg;
2095 struct mcast_port_bundle *rport;
2096 struct xbundle *mcast_xbundle;
2097
2098 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2099 LIST_FOR_EACH(rport, node, &ms->rport_list) {
2100 mcast_xbundle = xbundle_lookup(xcfg, rport->port);
2101 if (mcast_xbundle && mcast_xbundle != in_xbundle) {
2102 xlate_report(ctx, "forwarding Report to mcast flagged port");
2103 output_normal(ctx, mcast_xbundle, vlan);
2104 } else if (!mcast_xbundle) {
2105 xlate_report(ctx, "mcast port is unknown, dropping the Report");
2106 } else {
2107 xlate_report(ctx, "mcast port is input port, dropping the Report");
2108 }
2109 }
2110}
2111
682800a4
FL
2112static void
2113xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
2114 uint16_t vlan)
2115{
2116 struct xbundle *xbundle;
2117
2118 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
2119 if (xbundle != in_xbundle
2120 && xbundle_includes_vlan(xbundle, vlan)
2121 && xbundle->floodable
2122 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
2123 output_normal(ctx, xbundle, vlan);
2124 }
2125 }
2126 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
2127}
2128
9583bc14
EJ
2129static void
2130xlate_normal(struct xlate_ctx *ctx)
2131{
33bf9176
BP
2132 struct flow_wildcards *wc = &ctx->xout->wc;
2133 struct flow *flow = &ctx->xin->flow;
46c88433
EJ
2134 struct xbundle *in_xbundle;
2135 struct xport *in_port;
9583bc14 2136 struct mac_entry *mac;
d6d5bbc9 2137 void *mac_port;
9583bc14
EJ
2138 uint16_t vlan;
2139 uint16_t vid;
2140
2141 ctx->xout->has_normal = true;
2142
33bf9176
BP
2143 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
2144 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1dd35f8a 2145 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
9583bc14 2146
46c88433
EJ
2147 in_xbundle = lookup_input_bundle(ctx->xbridge, flow->in_port.ofp_port,
2148 ctx->xin->packet != NULL, &in_port);
2149 if (!in_xbundle) {
9583bc14
EJ
2150 xlate_report(ctx, "no input bundle, dropping");
2151 return;
2152 }
2153
2154 /* Drop malformed frames. */
33bf9176
BP
2155 if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
2156 !(flow->vlan_tci & htons(VLAN_CFI))) {
9583bc14
EJ
2157 if (ctx->xin->packet != NULL) {
2158 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2159 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
2160 "VLAN tag received on port %s",
46c88433 2161 ctx->xbridge->name, in_xbundle->name);
9583bc14
EJ
2162 }
2163 xlate_report(ctx, "partial VLAN tag, dropping");
2164 return;
2165 }
2166
2167 /* Drop frames on bundles reserved for mirroring. */
46c88433 2168 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
9583bc14
EJ
2169 if (ctx->xin->packet != NULL) {
2170 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2171 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
2172 "%s, which is reserved exclusively for mirroring",
46c88433 2173 ctx->xbridge->name, in_xbundle->name);
9583bc14
EJ
2174 }
2175 xlate_report(ctx, "input port is mirror output port, dropping");
2176 return;
2177 }
2178
2179 /* Check VLAN. */
33bf9176 2180 vid = vlan_tci_to_vid(flow->vlan_tci);
46c88433 2181 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
9583bc14
EJ
2182 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
2183 return;
2184 }
46c88433 2185 vlan = input_vid_to_vlan(in_xbundle, vid);
9583bc14
EJ
2186
2187 /* Check other admissibility requirements. */
2188 if (in_port && !is_admissible(ctx, in_port, vlan)) {
2189 return;
2190 }
2191
2192 /* Learn source MAC. */
2193 if (ctx->xin->may_learn) {
46c88433 2194 update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
9583bc14 2195 }
b256dc52
JS
2196 if (ctx->xin->xcache) {
2197 struct xc_entry *entry;
2198
2199 /* Save enough info to update mac learning table later. */
2200 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
9edf6b48 2201 entry->u.normal.ofproto = ctx->xbridge->ofproto;
b256dc52
JS
2202 entry->u.normal.flow = xmemdup(flow, sizeof *flow);
2203 entry->u.normal.vlan = vlan;
2204 }
9583bc14
EJ
2205
2206 /* Determine output bundle. */
86e2dcdd
FL
2207 if (mcast_snooping_enabled(ctx->xbridge->ms)
2208 && !eth_addr_is_broadcast(flow->dl_dst)
2209 && eth_addr_is_multicast(flow->dl_dst)
2210 && flow->dl_type == htons(ETH_TYPE_IP)) {
2211 struct mcast_snooping *ms = ctx->xbridge->ms;
2212 struct mcast_group *grp;
2213
2214 if (flow->nw_proto == IPPROTO_IGMP) {
2215 if (ctx->xin->may_learn) {
2216 if (mcast_snooping_is_membership(flow->tp_src) ||
2217 mcast_snooping_is_query(flow->tp_src)) {
2218 update_mcast_snooping_table(ctx->xbridge, flow, vlan,
2219 in_xbundle);
2220 }
2221 }
d6d5bbc9 2222
86e2dcdd
FL
2223 if (mcast_snooping_is_membership(flow->tp_src)) {
2224 ovs_rwlock_rdlock(&ms->rwlock);
2225 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
8e04a33f
FL
2226 /* RFC4541: section 2.1.1, item 1: A snooping switch should
2227 * forward IGMP Membership Reports only to those ports where
2228 * multicast routers are attached. Alternatively stated: a
2229 * snooping switch should not forward IGMP Membership Reports
2230 * to ports on which only hosts are attached.
2231 * An administrative control may be provided to override this
2232 * restriction, allowing the report messages to be flooded to
2233 * other ports. */
2234 xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, vlan);
86e2dcdd
FL
2235 ovs_rwlock_unlock(&ms->rwlock);
2236 } else {
2237 xlate_report(ctx, "multicast traffic, flooding");
2238 xlate_normal_flood(ctx, in_xbundle, vlan);
2239 }
2240 return;
2241 } else {
2242 if (ip_is_local_multicast(flow->nw_dst)) {
2243 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
2244 * address in the 224.0.0.x range which are not IGMP must
2245 * be forwarded on all ports */
2246 xlate_report(ctx, "RFC4541: section 2.1.2, item 2, flooding");
2247 xlate_normal_flood(ctx, in_xbundle, vlan);
2248 return;
2249 }
2250 }
2251
2252 /* forwarding to group base ports */
2253 ovs_rwlock_rdlock(&ms->rwlock);
2254 grp = mcast_snooping_lookup(ms, flow->nw_dst, vlan);
2255 if (grp) {
2256 xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, vlan);
2257 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, vlan);
2258 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
9583bc14 2259 } else {
86e2dcdd
FL
2260 if (mcast_snooping_flood_unreg(ms)) {
2261 xlate_report(ctx, "unregistered multicast, flooding");
2262 xlate_normal_flood(ctx, in_xbundle, vlan);
2263 } else {
2264 xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
2265 xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, vlan);
2266 }
9583bc14 2267 }
86e2dcdd 2268 ovs_rwlock_unlock(&ms->rwlock);
9583bc14 2269 } else {
86e2dcdd
FL
2270 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
2271 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
9d078ec2 2272 mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
86e2dcdd
FL
2273 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
2274
2275 if (mac_port) {
2276 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2277 struct xbundle *mac_xbundle = xbundle_lookup(xcfg, mac_port);
2278 if (mac_xbundle && mac_xbundle != in_xbundle) {
2279 xlate_report(ctx, "forwarding to learned port");
2280 output_normal(ctx, mac_xbundle, vlan);
2281 } else if (!mac_xbundle) {
2282 xlate_report(ctx, "learned port is unknown, dropping");
2283 } else {
2284 xlate_report(ctx, "learned port is input port, dropping");
2285 }
2286 } else {
2287 xlate_report(ctx, "no learned MAC for destination, flooding");
2288 xlate_normal_flood(ctx, in_xbundle, vlan);
2289 }
9583bc14
EJ
2290 }
2291}
2292
2293/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
2294 * the number of packets out of UINT32_MAX to sample. The given
2295 * cookie is passed back in the callback for each sampled packet.
2296 */
2297static size_t
46c88433 2298compose_sample_action(const struct xbridge *xbridge,
9583bc14
EJ
2299 struct ofpbuf *odp_actions,
2300 const struct flow *flow,
2301 const uint32_t probability,
2302 const union user_action_cookie *cookie,
8b7ea2d4
WZ
2303 const size_t cookie_size,
2304 const odp_port_t tunnel_out_port)
9583bc14
EJ
2305{
2306 size_t sample_offset, actions_offset;
89a8a7f0 2307 odp_port_t odp_port;
9583bc14 2308 int cookie_offset;
89a8a7f0 2309 uint32_t pid;
9583bc14
EJ
2310
2311 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
2312
2313 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
2314
2315 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
89a8a7f0
EJ
2316
2317 odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
9a159f74
AW
2318 pid = dpif_port_get_pid(xbridge->dpif, odp_port,
2319 flow_hash_5tuple(flow, 0));
2320 cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
8b7ea2d4 2321 tunnel_out_port, odp_actions);
9583bc14
EJ
2322
2323 nl_msg_end_nested(odp_actions, actions_offset);
2324 nl_msg_end_nested(odp_actions, sample_offset);
2325 return cookie_offset;
2326}
2327
2328static void
46c88433
EJ
2329compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
2330 odp_port_t odp_port, unsigned int n_outputs,
2331 union user_action_cookie *cookie)
9583bc14
EJ
2332{
2333 int ifindex;
2334
2335 cookie->type = USER_ACTION_COOKIE_SFLOW;
2336 cookie->sflow.vlan_tci = vlan_tci;
2337
2338 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
2339 * port information") for the interpretation of cookie->output. */
2340 switch (n_outputs) {
2341 case 0:
2342 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
2343 cookie->sflow.output = 0x40000000 | 256;
2344 break;
2345
2346 case 1:
46c88433 2347 ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
9583bc14
EJ
2348 if (ifindex) {
2349 cookie->sflow.output = ifindex;
2350 break;
2351 }
2352 /* Fall through. */
2353 default:
2354 /* 0x80000000 means "multiple output ports. */
2355 cookie->sflow.output = 0x80000000 | n_outputs;
2356 break;
2357 }
2358}
2359
2360/* Compose SAMPLE action for sFlow bridge sampling. */
2361static size_t
46c88433 2362compose_sflow_action(const struct xbridge *xbridge,
9583bc14
EJ
2363 struct ofpbuf *odp_actions,
2364 const struct flow *flow,
4e022ec0 2365 odp_port_t odp_port)
9583bc14
EJ
2366{
2367 uint32_t probability;
2368 union user_action_cookie cookie;
2369
46c88433 2370 if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2371 return 0;
2372 }
2373
46c88433
EJ
2374 probability = dpif_sflow_get_probability(xbridge->sflow);
2375 compose_sflow_cookie(xbridge, htons(0), odp_port,
4e022ec0 2376 odp_port == ODPP_NONE ? 0 : 1, &cookie);
9583bc14 2377
46c88433 2378 return compose_sample_action(xbridge, odp_actions, flow, probability,
8b7ea2d4 2379 &cookie, sizeof cookie.sflow, ODPP_NONE);
9583bc14
EJ
2380}
2381
2382static void
2383compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
2384 uint32_t obs_domain_id, uint32_t obs_point_id,
2385 union user_action_cookie *cookie)
2386{
2387 cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
2388 cookie->flow_sample.probability = probability;
2389 cookie->flow_sample.collector_set_id = collector_set_id;
2390 cookie->flow_sample.obs_domain_id = obs_domain_id;
2391 cookie->flow_sample.obs_point_id = obs_point_id;
2392}
2393
2394static void
8b7ea2d4
WZ
2395compose_ipfix_cookie(union user_action_cookie *cookie,
2396 odp_port_t output_odp_port)
9583bc14
EJ
2397{
2398 cookie->type = USER_ACTION_COOKIE_IPFIX;
8b7ea2d4 2399 cookie->ipfix.output_odp_port = output_odp_port;
9583bc14
EJ
2400}
2401
2402/* Compose SAMPLE action for IPFIX bridge sampling. */
2403static void
46c88433 2404compose_ipfix_action(const struct xbridge *xbridge,
9583bc14 2405 struct ofpbuf *odp_actions,
8b7ea2d4
WZ
2406 const struct flow *flow,
2407 odp_port_t output_odp_port)
9583bc14
EJ
2408{
2409 uint32_t probability;
2410 union user_action_cookie cookie;
8b7ea2d4 2411 odp_port_t tunnel_out_port = ODPP_NONE;
9583bc14 2412
46c88433 2413 if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
9583bc14
EJ
2414 return;
2415 }
2416
8b7ea2d4
WZ
2417 /* For input case, output_odp_port is ODPP_NONE, which is an invalid port
2418 * number. */
2419 if (output_odp_port == ODPP_NONE &&
2420 !dpif_ipfix_get_bridge_exporter_input_sampling(xbridge->ipfix)) {
2421 return;
2422 }
2423
2424 /* For output case, output_odp_port is valid*/
2425 if (output_odp_port != ODPP_NONE) {
2426 if (!dpif_ipfix_get_bridge_exporter_output_sampling(xbridge->ipfix)) {
2427 return;
2428 }
2429 /* If tunnel sampling is enabled, put an additional option attribute:
2430 * OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
2431 */
2432 if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(xbridge->ipfix) &&
2433 dpif_ipfix_get_tunnel_port(xbridge->ipfix, output_odp_port) ) {
2434 tunnel_out_port = output_odp_port;
2435 }
2436 }
2437
46c88433 2438 probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
8b7ea2d4 2439 compose_ipfix_cookie(&cookie, output_odp_port);
9583bc14 2440
46c88433 2441 compose_sample_action(xbridge, odp_actions, flow, probability,
8b7ea2d4 2442 &cookie, sizeof cookie.ipfix, tunnel_out_port);
9583bc14
EJ
2443}
2444
2445/* SAMPLE action for sFlow must be first action in any given list of
2446 * actions. At this point we do not have all information required to
2447 * build it. So try to build sample action as complete as possible. */
2448static void
2449add_sflow_action(struct xlate_ctx *ctx)
2450{
46c88433 2451 ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
cc377352 2452 ctx->xout->odp_actions,
4e022ec0 2453 &ctx->xin->flow, ODPP_NONE);
9583bc14
EJ
2454 ctx->sflow_odp_port = 0;
2455 ctx->sflow_n_outputs = 0;
2456}
2457
2458/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
2459 * of actions, eventually after the SAMPLE action for sFlow. */
2460static void
2461add_ipfix_action(struct xlate_ctx *ctx)
2462{
cc377352 2463 compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions,
8b7ea2d4
WZ
2464 &ctx->xin->flow, ODPP_NONE);
2465}
2466
2467static void
2468add_ipfix_output_action(struct xlate_ctx *ctx, odp_port_t port)
2469{
2470 compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions,
2471 &ctx->xin->flow, port);
9583bc14
EJ
2472}
2473
2474/* Fix SAMPLE action according to data collected while composing ODP actions.
2475 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
2476 * USERSPACE action's user-cookie which is required for sflow. */
2477static void
2478fix_sflow_action(struct xlate_ctx *ctx)
2479{
2480 const struct flow *base = &ctx->base_flow;
2481 union user_action_cookie *cookie;
2482
2483 if (!ctx->user_cookie_offset) {
2484 return;
2485 }
2486
cc377352 2487 cookie = ofpbuf_at(ctx->xout->odp_actions, ctx->user_cookie_offset,
9583bc14
EJ
2488 sizeof cookie->sflow);
2489 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
2490
46c88433 2491 compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
9583bc14
EJ
2492 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
2493}
2494
db7d4e46 2495static enum slow_path_reason
642dc74d 2496process_special(struct xlate_ctx *ctx, const struct flow *flow,
46c88433 2497 const struct xport *xport, const struct ofpbuf *packet)
db7d4e46 2498{
642dc74d 2499 struct flow_wildcards *wc = &ctx->xout->wc;
46c88433 2500 const struct xbridge *xbridge = ctx->xbridge;
642dc74d 2501
46c88433 2502 if (!xport) {
db7d4e46 2503 return 0;
46c88433 2504 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
db7d4e46 2505 if (packet) {
46c88433 2506 cfm_process_heartbeat(xport->cfm, packet);
db7d4e46
JP
2507 }
2508 return SLOW_CFM;
fab52e16 2509 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
db7d4e46 2510 if (packet) {
46c88433 2511 bfd_process_packet(xport->bfd, flow, packet);
60d02c72
AW
2512 /* If POLL received, immediately sends FINAL back. */
2513 if (bfd_should_send_packet(xport->bfd)) {
6d308b28 2514 ofproto_dpif_monitor_port_send_soon(xport->ofport);
60d02c72 2515 }
db7d4e46
JP
2516 }
2517 return SLOW_BFD;
46c88433 2518 } else if (xport->xbundle && xport->xbundle->lacp
db7d4e46
JP
2519 && flow->dl_type == htons(ETH_TYPE_LACP)) {
2520 if (packet) {
46c88433 2521 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
db7d4e46
JP
2522 }
2523 return SLOW_LACP;
9efd308e
DV
2524 } else if ((xbridge->stp || xbridge->rstp) &&
2525 stp_should_process_flow(flow, wc)) {
db7d4e46 2526 if (packet) {
f025bcb7
JR
2527 xbridge->stp
2528 ? stp_process_packet(xport, packet)
2529 : rstp_process_packet(xport, packet);
db7d4e46
JP
2530 }
2531 return SLOW_STP;
2532 } else {
2533 return 0;
2534 }
2535}
2536
a36de779
PS
2537static int
2538tnl_route_lookup_flow(const struct flow *oflow,
2539 ovs_be32 *ip, struct xport **out_port)
2540{
2541 char out_dev[IFNAMSIZ];
2542 struct xbridge *xbridge;
2543 struct xlate_cfg *xcfg;
2544 ovs_be32 gw;
2545
2546 if (!ovs_router_lookup(oflow->tunnel.ip_dst, out_dev, &gw)) {
2547 return -ENOENT;
2548 }
2549
2550 if (gw) {
2551 *ip = gw;
2552 } else {
2553 *ip = oflow->tunnel.ip_dst;
2554 }
2555
2556 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
2557 ovs_assert(xcfg);
2558
2559 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
2560 if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
2561 struct xport *port;
2562
2563 HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
2564 if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
2565 *out_port = port;
2566 return 0;
2567 }
2568 }
2569 }
2570 }
2571 return -ENOENT;
2572}
2573
2574static int
2575xlate_flood_packet(struct xbridge *xbridge, struct ofpbuf *packet)
2576{
2577 struct ofpact_output output;
2578 struct flow flow;
2579
2580 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
2581 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
2582 flow_extract(packet, NULL, &flow);
2583 flow.in_port.ofp_port = OFPP_NONE;
2584 output.port = OFPP_FLOOD;
2585 output.max_len = 0;
2586
2587 return ofproto_dpif_execute_actions(xbridge->ofproto, &flow, NULL,
2588 &output.ofpact, sizeof output,
2589 packet);
2590}
2591
2592static void
2593tnl_send_arp_request(const struct xport *out_dev, const uint8_t eth_src[ETH_ADDR_LEN],
2594 ovs_be32 ip_src, ovs_be32 ip_dst)
2595{
2596 struct xbridge *xbridge = out_dev->xbridge;
2597 struct ofpbuf packet;
2598
2599 ofpbuf_init(&packet, 0);
2600 compose_arp(&packet, eth_src, ip_src, ip_dst);
2601
2602 xlate_flood_packet(xbridge, &packet);
2603 ofpbuf_uninit(&packet);
2604}
2605
2606static int
2607build_tunnel_send(const struct xlate_ctx *ctx, const struct xport *xport,
2608 const struct flow *flow, odp_port_t tunnel_odp_port)
2609{
2610 struct ovs_action_push_tnl tnl_push_data;
2611 struct xport *out_dev = NULL;
2612 ovs_be32 s_ip, d_ip = 0;
2613 uint8_t smac[ETH_ADDR_LEN];
2614 uint8_t dmac[ETH_ADDR_LEN];
2615 int err;
2616
2617 err = tnl_route_lookup_flow(flow, &d_ip, &out_dev);
2618 if (err) {
2619 return err;
2620 }
2621
2622 /* Use mac addr of bridge port of the peer. */
2623 err = netdev_get_etheraddr(out_dev->netdev, smac);
2624 if (err) {
2625 return err;
2626 }
2627
2628 err = netdev_get_in4(out_dev->netdev, (struct in_addr *) &s_ip, NULL);
2629 if (err) {
2630 return err;
2631 }
2632
2633 err = tnl_arp_lookup(out_dev->xbridge->name, d_ip, dmac);
2634 if (err) {
2635 tnl_send_arp_request(out_dev, smac, s_ip, d_ip);
2636 return err;
2637 }
2638 if (ctx->xin->xcache) {
2639 struct xc_entry *entry;
2640
2641 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_ARP);
8742957c
BP
2642 ovs_strlcpy(entry->u.tnl_arp_cache.br_name, out_dev->xbridge->name,
2643 sizeof entry->u.tnl_arp_cache.br_name);
a36de779
PS
2644 entry->u.tnl_arp_cache.d_ip = d_ip;
2645 }
2646 err = tnl_port_build_header(xport->ofport, flow,
2647 dmac, smac, s_ip, &tnl_push_data);
2648 if (err) {
2649 return err;
2650 }
2651 tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port);
2652 tnl_push_data.out_port = odp_to_u32(out_dev->odp_port);
2653 odp_put_tnl_push_action(ctx->xout->odp_actions, &tnl_push_data);
2654 return 0;
2655}
2656
9583bc14 2657static void
4e022ec0 2658compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
9583bc14
EJ
2659 bool check_stp)
2660{
46c88433 2661 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
1dd35f8a 2662 struct flow_wildcards *wc = &ctx->xout->wc;
33bf9176 2663 struct flow *flow = &ctx->xin->flow;
a36de779 2664 struct flow_tnl flow_tnl;
9583bc14 2665 ovs_be16 flow_vlan_tci;
1362e248 2666 uint32_t flow_pkt_mark;
9583bc14 2667 uint8_t flow_nw_tos;
4e022ec0 2668 odp_port_t out_port, odp_port;
a36de779 2669 bool tnl_push_pop_send = false;
ca077186 2670 uint8_t dscp;
9583bc14
EJ
2671
2672 /* If 'struct flow' gets additional metadata, we'll need to zero it out
2673 * before traversing a patch port. */
ac6073e3 2674 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
a36de779 2675 memset(&flow_tnl, 0, sizeof flow_tnl);
9583bc14 2676
46c88433 2677 if (!xport) {
9583bc14
EJ
2678 xlate_report(ctx, "Nonexistent output port");
2679 return;
46c88433 2680 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
9583bc14
EJ
2681 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
2682 return;
0d1cee12 2683 } else if (check_stp) {
bbbca389 2684 if (is_stp(&ctx->base_flow)) {
9efd308e
DV
2685 if (!xport_stp_should_forward_bpdu(xport) &&
2686 !xport_rstp_should_manage_bpdu(xport)) {
2687 if (ctx->xbridge->stp != NULL) {
2688 xlate_report(ctx, "STP not in listening state, "
2689 "skipping bpdu output");
2690 } else if (ctx->xbridge->rstp != NULL) {
2691 xlate_report(ctx, "RSTP not managing BPDU in this state, "
2692 "skipping bpdu output");
2693 }
0d1cee12
K
2694 return;
2695 }
9efd308e
DV
2696 } else if (!xport_stp_forward_state(xport) ||
2697 !xport_rstp_forward_state(xport)) {
2698 if (ctx->xbridge->stp != NULL) {
2699 xlate_report(ctx, "STP not in forwarding state, "
2700 "skipping output");
2701 } else if (ctx->xbridge->rstp != NULL) {
2702 xlate_report(ctx, "RSTP not in forwarding state, "
2703 "skipping output");
2704 }
0d1cee12
K
2705 return;
2706 }
9583bc14
EJ
2707 }
2708
46c88433
EJ
2709 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
2710 ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
2711 xport->xbundle);
cdf5d3a5
EJ
2712 }
2713
46c88433
EJ
2714 if (xport->peer) {
2715 const struct xport *peer = xport->peer;
9583bc14 2716 struct flow old_flow = ctx->xin->flow;
9583bc14 2717 enum slow_path_reason special;
0c7812e5 2718 uint8_t table_id = rule_dpif_lookup_get_init_table_id(&ctx->xin->flow);
9583bc14 2719
46c88433
EJ
2720 ctx->xbridge = peer->xbridge;
2721 flow->in_port.ofp_port = peer->ofp_port;
33bf9176
BP
2722 flow->metadata = htonll(0);
2723 memset(&flow->tunnel, 0, sizeof flow->tunnel);
2724 memset(flow->regs, 0, sizeof flow->regs);
c61f3870 2725 flow->actset_output = OFPP_UNSET;
9583bc14 2726
642dc74d 2727 special = process_special(ctx, &ctx->xin->flow, peer,
9583bc14
EJ
2728 ctx->xin->packet);
2729 if (special) {
04594cd5 2730 ctx->xout->slow |= special;
ddd3c975 2731 } else if (may_receive(peer, ctx)) {
9efd308e 2732 if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
0c7812e5
AW
2733 xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
2734 true, true);
9583bc14 2735 } else {
9efd308e
DV
2736 /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
2737 * the learning action look at the packet, then drop it. */
9583bc14 2738 struct flow old_base_flow = ctx->base_flow;
cc377352 2739 size_t old_size = ofpbuf_size(ctx->xout->odp_actions);
cdf5d3a5 2740 mirror_mask_t old_mirrors = ctx->xout->mirrors;
0c7812e5
AW
2741 xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
2742 true, true);
cdf5d3a5 2743 ctx->xout->mirrors = old_mirrors;
9583bc14 2744 ctx->base_flow = old_base_flow;
cc377352 2745 ofpbuf_set_size(ctx->xout->odp_actions, old_size);
9583bc14
EJ
2746 }
2747 }
2748
2749 ctx->xin->flow = old_flow;
832554e3 2750 ctx->xbridge = xport->xbridge;
9583bc14
EJ
2751
2752 if (ctx->xin->resubmit_stats) {
46c88433
EJ
2753 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
2754 netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
a1aeea86
AW
2755 if (peer->bfd) {
2756 bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats);
2757 }
9583bc14 2758 }
b256dc52
JS
2759 if (ctx->xin->xcache) {
2760 struct xc_entry *entry;
2761
2762 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
2763 entry->u.dev.tx = netdev_ref(xport->netdev);
2764 entry->u.dev.rx = netdev_ref(peer->netdev);
2765 entry->u.dev.bfd = bfd_ref(peer->bfd);
2766 }
9583bc14
EJ
2767 return;
2768 }
2769
33bf9176 2770 flow_vlan_tci = flow->vlan_tci;
1362e248 2771 flow_pkt_mark = flow->pkt_mark;
33bf9176 2772 flow_nw_tos = flow->nw_tos;
9583bc14 2773
16194afd
DDP
2774 if (count_skb_priorities(xport)) {
2775 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
2776 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
2777 wc->masks.nw_tos |= IP_DSCP_MASK;
2778 flow->nw_tos &= ~IP_DSCP_MASK;
2779 flow->nw_tos |= dscp;
2780 }
9583bc14
EJ
2781 }
2782
46c88433 2783 if (xport->is_tunnel) {
9583bc14
EJ
2784 /* Save tunnel metadata so that changes made due to
2785 * the Logical (tunnel) Port are not visible for any further
2786 * matches, while explicit set actions on tunnel metadata are.
2787 */
a36de779 2788 flow_tnl = flow->tunnel;
46c88433 2789 odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
4e022ec0 2790 if (odp_port == ODPP_NONE) {
9583bc14
EJ
2791 xlate_report(ctx, "Tunneling decided against output");
2792 goto out; /* restore flow_nw_tos */
2793 }
33bf9176 2794 if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
9583bc14
EJ
2795 xlate_report(ctx, "Not tunneling to our own address");
2796 goto out; /* restore flow_nw_tos */
2797 }
2798 if (ctx->xin->resubmit_stats) {
46c88433 2799 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
9583bc14 2800 }
b256dc52
JS
2801 if (ctx->xin->xcache) {
2802 struct xc_entry *entry;
2803
2804 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
2805 entry->u.dev.tx = netdev_ref(xport->netdev);
2806 }
9583bc14 2807 out_port = odp_port;
a36de779
PS
2808 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
2809 tnl_push_pop_send = true;
2810 } else {
2811 commit_odp_tunnel_action(flow, &ctx->base_flow,
2812 ctx->xout->odp_actions);
2813 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
2814 }
9583bc14 2815 } else {
46c88433 2816 odp_port = xport->odp_port;
7614e5d0 2817 out_port = odp_port;
46c88433 2818 if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
7614e5d0
JR
2819 ofp_port_t vlandev_port;
2820
1dd35f8a 2821 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
7614e5d0
JR
2822 vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto,
2823 ofp_port, flow->vlan_tci);
2824 if (vlandev_port != ofp_port) {
2825 out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
2826 flow->vlan_tci = htons(0);
2827 }
9583bc14 2828 }
9583bc14 2829 }
9583bc14 2830
4e022ec0 2831 if (out_port != ODPP_NONE) {
7fd91025 2832 ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
cc377352 2833 ctx->xout->odp_actions,
a36de779 2834 wc,
d23df9a8 2835 ctx->xbridge->masked_set_action);
adcf00ba 2836
92c08f09 2837 if (ctx->use_recirc) {
347bf289 2838 struct ovs_action_hash *act_hash;
92c08f09 2839 struct xlate_recirc *xr = &ctx->recirc;
adcf00ba 2840
347bf289 2841 /* Hash action. */
cc377352 2842 act_hash = nl_msg_put_unspec_uninit(ctx->xout->odp_actions,
347bf289
AZ
2843 OVS_ACTION_ATTR_HASH,
2844 sizeof *act_hash);
2845 act_hash->hash_alg = xr->hash_alg;
62ac1f20 2846 act_hash->hash_basis = xr->hash_basis;
347bf289
AZ
2847
2848 /* Recirc action. */
cc377352 2849 nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
347bf289 2850 xr->recirc_id);
adcf00ba 2851 } else {
a36de779
PS
2852
2853 if (tnl_push_pop_send) {
2854 build_tunnel_send(ctx, xport, flow, odp_port);
2855 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
2856 } else {
2857 odp_port_t odp_tnl_port = ODPP_NONE;
2858
2859 /* XXX: Write better Filter for tunnel port. We can use inport
2860 * int tunnel-port flow to avoid these checks completely. */
2861 if (ofp_port == OFPP_LOCAL &&
2862 ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
2863
2864 odp_tnl_port = tnl_port_map_lookup(flow, wc);
2865 }
2866
2867 if (odp_tnl_port != ODPP_NONE) {
2868 nl_msg_put_odp_port(ctx->xout->odp_actions,
2869 OVS_ACTION_ATTR_TUNNEL_POP,
2870 odp_tnl_port);
2871 } else {
2872 /* Tunnel push-pop action is not compatible with
2873 * IPFIX action. */
2874 add_ipfix_output_action(ctx, out_port);
2875 nl_msg_put_odp_port(ctx->xout->odp_actions,
2876 OVS_ACTION_ATTR_OUTPUT,
2877 out_port);
2878 }
2879 }
adcf00ba 2880 }
9583bc14 2881
6cbbf4fa
EJ
2882 ctx->sflow_odp_port = odp_port;
2883 ctx->sflow_n_outputs++;
2884 ctx->xout->nf_output_iface = ofp_port;
2885 }
2886
2887 out:
9583bc14 2888 /* Restore flow */
33bf9176 2889 flow->vlan_tci = flow_vlan_tci;
1362e248 2890 flow->pkt_mark = flow_pkt_mark;
33bf9176 2891 flow->nw_tos = flow_nw_tos;
9583bc14
EJ
2892}
2893
2894static void
4e022ec0 2895compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port)
9583bc14
EJ
2896{
2897 compose_output_action__(ctx, ofp_port, true);
2898}
2899
bb61b33d
BP
2900static void
2901xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
bb61b33d
BP
2902{
2903 struct rule_dpif *old_rule = ctx->rule;
dc723c44 2904 const struct rule_actions *actions;
bb61b33d
BP
2905
2906 if (ctx->xin->resubmit_stats) {
70742c7f 2907 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
bb61b33d
BP
2908 }
2909
98b07853 2910 ctx->resubmits++;
bb61b33d
BP
2911 ctx->recurse++;
2912 ctx->rule = rule;
6f00e29b
BP
2913 actions = rule_dpif_get_actions(rule);
2914 do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
bb61b33d
BP
2915 ctx->rule = old_rule;
2916 ctx->recurse--;
bb61b33d
BP
2917}
2918
bd3240ba
SH
2919static bool
2920xlate_resubmit_resource_check(struct xlate_ctx *ctx)
9583bc14 2921{
98b07853
BP
2922 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2923
adcf00ba 2924 if (ctx->recurse >= MAX_RESUBMIT_RECURSION + MAX_INTERNAL_RESUBMITS) {
98b07853
BP
2925 VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
2926 MAX_RESUBMIT_RECURSION);
adcf00ba 2927 } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
98b07853 2928 VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
cc377352 2929 } else if (ofpbuf_size(ctx->xout->odp_actions) > UINT16_MAX) {
98b07853 2930 VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
1f317cb5 2931 } else if (ofpbuf_size(&ctx->stack) >= 65536) {
98b07853
BP
2932 VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
2933 } else {
bd3240ba
SH
2934 return true;
2935 }
2936
2937 return false;
2938}
2939
2940static void
6d328fa2
SH
2941xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
2942 bool may_packet_in, bool honor_table_miss)
bd3240ba
SH
2943{
2944 if (xlate_resubmit_resource_check(ctx)) {
34dd0d78 2945 struct flow_wildcards *wc;
9583bc14 2946 uint8_t old_table_id = ctx->table_id;
3f207910 2947 struct rule_dpif *rule;
9583bc14
EJ
2948
2949 ctx->table_id = table_id;
34dd0d78 2950 wc = (ctx->xin->skip_wildcards) ? NULL : &ctx->xout->wc;
9583bc14 2951
34dd0d78
JR
2952 rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
2953 &ctx->xin->flow, wc,
2954 ctx->xin->xcache != NULL,
2955 ctx->xin->resubmit_stats,
2956 &ctx->table_id, in_port,
2957 may_packet_in, honor_table_miss);
9583bc14 2958
a8c31348
BP
2959 if (OVS_UNLIKELY(ctx->xin->resubmit_hook)) {
2960 ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse + 1);
ad3efdcb
EJ
2961 }
2962
a2143702 2963 if (rule) {
83709dfa
JR
2964 /* Fill in the cache entry here instead of xlate_recursively
2965 * to make the reference counting more explicit. We take a
2966 * reference in the lookups above if we are going to cache the
2967 * rule. */
2968 if (ctx->xin->xcache) {
2969 struct xc_entry *entry;
2970
2971 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
2972 entry->u.rule = rule;
2973 }
bb61b33d 2974 xlate_recursively(ctx, rule);
ad3efdcb
EJ
2975 }
2976
9583bc14 2977 ctx->table_id = old_table_id;
98b07853 2978 return;
9583bc14 2979 }
98b07853
BP
2980
2981 ctx->exit = true;
9583bc14
EJ
2982}
2983
f4fb341b 2984static void
1e684d7d
RW
2985xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
2986 struct ofputil_bucket *bucket)
2987{
2988 if (ctx->xin->resubmit_stats) {
2989 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
2990 }
2991 if (ctx->xin->xcache) {
2992 struct xc_entry *entry;
2993
2994 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
2995 entry->u.group.group = group_dpif_ref(group);
2996 entry->u.group.bucket = bucket;
2997 }
2998}
2999
3000static void
3001xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket)
f4fb341b
SH
3002{
3003 uint64_t action_list_stub[1024 / 8];
3004 struct ofpbuf action_list, action_set;
3005
3006 ofpbuf_use_const(&action_set, bucket->ofpacts, bucket->ofpacts_len);
3007 ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
3008
3009 ofpacts_execute_action_set(&action_list, &action_set);
3010 ctx->recurse++;
1f317cb5 3011 do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
f4fb341b
SH
3012 ctx->recurse--;
3013
3014 ofpbuf_uninit(&action_set);
3015 ofpbuf_uninit(&action_list);
3016}
3017
3018static void
3019xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
3020{
1e684d7d 3021 struct ofputil_bucket *bucket;
ca6ba700 3022 const struct ovs_list *buckets;
f4fb341b
SH
3023 struct flow old_flow = ctx->xin->flow;
3024
3025 group_dpif_get_buckets(group, &buckets);
3026
3027 LIST_FOR_EACH (bucket, list_node, buckets) {
3028 xlate_group_bucket(ctx, bucket);
3029 /* Roll back flow to previous state.
3030 * This is equivalent to cloning the packet for each bucket.
3031 *
3032 * As a side effect any subsequently applied actions will
3033 * also effectively be applied to a clone of the packet taken
3034 * just before applying the all or indirect group. */
3035 ctx->xin->flow = old_flow;
3036 }
1e684d7d 3037 xlate_group_stats(ctx, group, NULL);
f4fb341b
SH
3038}
3039
dd8cd4b4
SH
3040static void
3041xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
3042{
1e684d7d 3043 struct ofputil_bucket *bucket;
dd8cd4b4
SH
3044
3045 bucket = group_first_live_bucket(ctx, group, 0);
3046 if (bucket) {
3047 xlate_group_bucket(ctx, bucket);
1e684d7d 3048 xlate_group_stats(ctx, group, bucket);
dd8cd4b4
SH
3049 }
3050}
3051
fe7e5749
SH
3052static void
3053xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
3054{
3055 struct flow_wildcards *wc = &ctx->xout->wc;
1e684d7d 3056 struct ofputil_bucket *bucket;
fe7e5749
SH
3057 uint32_t basis;
3058
1d1aae0b 3059 basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
fe7e5749
SH
3060 bucket = group_best_live_bucket(ctx, group, basis);
3061 if (bucket) {
3062 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1d1aae0b
SS
3063 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
3064 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
3065 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
3066 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
3067 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
3068 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
3069 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
3070 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
3071
fe7e5749 3072 xlate_group_bucket(ctx, bucket);
1e684d7d 3073 xlate_group_stats(ctx, group, bucket);
fe7e5749
SH
3074 }
3075}
3076
f4fb341b
SH
3077static void
3078xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
3079{
5a070238
BP
3080 ctx->in_group = true;
3081
f4fb341b
SH
3082 switch (group_dpif_get_type(group)) {
3083 case OFPGT11_ALL:
3084 case OFPGT11_INDIRECT:
3085 xlate_all_group(ctx, group);
3086 break;
3087 case OFPGT11_SELECT:
fe7e5749 3088 xlate_select_group(ctx, group);
f4fb341b 3089 break;
dd8cd4b4
SH
3090 case OFPGT11_FF:
3091 xlate_ff_group(ctx, group);
3092 break;
f4fb341b 3093 default:
428b2edd 3094 OVS_NOT_REACHED();
f4fb341b 3095 }
809c7548 3096 group_dpif_unref(group);
5a070238
BP
3097
3098 ctx->in_group = false;
3099}
3100
3101static bool
3102xlate_group_resource_check(struct xlate_ctx *ctx)
3103{
3104 if (!xlate_resubmit_resource_check(ctx)) {
3105 return false;
3106 } else if (ctx->in_group) {
3107 /* Prevent nested translation of OpenFlow groups.
3108 *
3109 * OpenFlow allows this restriction. We enforce this restriction only
3110 * because, with the current architecture, we would otherwise have to
3111 * take a possibly recursive read lock on the ofgroup rwlock, which is
3112 * unsafe given that POSIX allows taking a read lock to block if there
3113 * is a thread blocked on taking the write lock. Other solutions
3114 * without this restriction are also possible, but seem unwarranted
3115 * given the current limited use of groups. */
3116 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
3117
3118 VLOG_ERR_RL(&rl, "cannot recursively translate OpenFlow group");
3119 return false;
3120 } else {
3121 return true;
3122 }
f4fb341b
SH
3123}
3124
3125static bool
3126xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
3127{
5a070238 3128 if (xlate_group_resource_check(ctx)) {
f4fb341b
SH
3129 struct group_dpif *group;
3130 bool got_group;
3131
3132 got_group = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
3133 if (got_group) {
3134 xlate_group_action__(ctx, group);
3135 } else {
3136 return true;
3137 }
3138 }
3139
3140 return false;
3141}
3142
9583bc14
EJ
3143static void
3144xlate_ofpact_resubmit(struct xlate_ctx *ctx,
3145 const struct ofpact_resubmit *resubmit)
3146{
4e022ec0 3147 ofp_port_t in_port;
9583bc14 3148 uint8_t table_id;
adcf00ba
AZ
3149 bool may_packet_in = false;
3150 bool honor_table_miss = false;
3151
3152 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
3153 /* Still allow missed packets to be sent to the controller
3154 * if resubmitting from an internal table. */
3155 may_packet_in = true;
3156 honor_table_miss = true;
3157 }
9583bc14
EJ
3158
3159 in_port = resubmit->in_port;
3160 if (in_port == OFPP_IN_PORT) {
4e022ec0 3161 in_port = ctx->xin->flow.in_port.ofp_port;
9583bc14
EJ
3162 }
3163
3164 table_id = resubmit->table_id;
3165 if (table_id == 255) {
3166 table_id = ctx->table_id;
3167 }
3168
adcf00ba
AZ
3169 xlate_table_action(ctx, in_port, table_id, may_packet_in,
3170 honor_table_miss);
9583bc14
EJ
3171}
3172
3173static void
3174flood_packets(struct xlate_ctx *ctx, bool all)
3175{
46c88433 3176 const struct xport *xport;
9583bc14 3177
46c88433
EJ
3178 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
3179 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
3180 continue;
3181 }
3182
3183 if (all) {
46c88433
EJ
3184 compose_output_action__(ctx, xport->ofp_port, false);
3185 } else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
3186 compose_output_action(ctx, xport->ofp_port);
9583bc14
EJ
3187 }
3188 }
3189
3190 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
3191}
3192
3193static void
3194execute_controller_action(struct xlate_ctx *ctx, int len,
3195 enum ofp_packet_in_reason reason,
3196 uint16_t controller_id)
3197{
0fb7792a 3198 struct ofproto_packet_in *pin;
91088554 3199 struct dpif_packet *packet;
9583bc14 3200
04594cd5 3201 ctx->xout->slow |= SLOW_CONTROLLER;
9583bc14
EJ
3202 if (!ctx->xin->packet) {
3203 return;
3204 }
3205
91088554 3206 packet = dpif_packet_clone_from_ofpbuf(ctx->xin->packet);
9583bc14 3207
7fd91025 3208 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
cc377352 3209 ctx->xout->odp_actions,
d23df9a8
JR
3210 &ctx->xout->wc,
3211 ctx->xbridge->masked_set_action);
9583bc14 3212
41ccaa24 3213 odp_execute_actions(NULL, &packet, 1, false,
cc377352
EJ
3214 ofpbuf_data(ctx->xout->odp_actions),
3215 ofpbuf_size(ctx->xout->odp_actions), NULL);
9583bc14 3216
ada3a58d 3217 pin = xmalloc(sizeof *pin);
91088554
DDP
3218 pin->up.packet_len = ofpbuf_size(&packet->ofpbuf);
3219 pin->up.packet = ofpbuf_steal_data(&packet->ofpbuf);
0fb7792a 3220 pin->up.reason = reason;
0fb7792a 3221 pin->up.table_id = ctx->table_id;
d4fa4e79
BP
3222 pin->up.cookie = (ctx->rule
3223 ? rule_dpif_get_flow_cookie(ctx->rule)
3224 : OVS_BE64_MAX);
0fb7792a 3225
0fb7792a 3226 flow_get_metadata(&ctx->xin->flow, &pin->up.fmd);
9583bc14 3227
f11c7538 3228 pin->controller_id = controller_id;
d38a3c7b 3229 pin->send_len = len;
32260212
SH
3230 /* If a rule is a table-miss rule then this is
3231 * a table-miss handled by a table-miss rule.
3232 *
3233 * Else, if rule is internal and has a controller action,
3234 * the later being implied by the rule being processed here,
3235 * then this is a table-miss handled without a table-miss rule.
3236 *
3237 * Otherwise this is not a table-miss. */
3238 pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
3239 if (ctx->rule) {
3240 if (rule_dpif_is_table_miss(ctx->rule)) {
3241 pin->miss_type = OFPROTO_PACKET_IN_MISS_FLOW;
3242 } else if (rule_dpif_is_internal(ctx->rule)) {
3243 pin->miss_type = OFPROTO_PACKET_IN_MISS_WITHOUT_FLOW;
3244 }
3245 }
ada3a58d 3246 ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, pin);
91088554 3247 dpif_packet_delete(packet);
9583bc14
EJ
3248}
3249
7bbdd84f
SH
3250static void
3251compose_recirculate_action(struct xlate_ctx *ctx,
3252 const struct ofpact *ofpacts_base,
3253 const struct ofpact *ofpact_current,
3254 size_t ofpacts_base_len)
3255{
3256 uint32_t id;
3257 int error;
3258 unsigned ofpacts_len;
3259 struct match match;
3260 struct rule *rule;
3261 struct ofpbuf ofpacts;
3262
3263 ctx->exit = true;
3264
3265 ofpacts_len = ofpacts_base_len -
3266 ((uint8_t *)ofpact_current - (uint8_t *)ofpacts_base);
3267
3268 if (ctx->rule) {
3269 id = rule_dpif_get_recirc_id(ctx->rule);
3270 } else {
3271 /* In the case where ctx has no rule then allocate a recirc id.
3272 * The life-cycle of this recirc id is managed by associating it
3273 * with the internal rule that is created to to handle
3274 * recirculation below.
3275 *
3276 * The known use-case of this is packet_out which
3277 * translates actions without a rule */
3278 id = ofproto_dpif_alloc_recirc_id(ctx->xbridge->ofproto);
3279 }
3280 if (!id) {
3281 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3282 VLOG_ERR_RL(&rl, "Failed to allocate recirculation id");
3283 return;
3284 }
3285
3286 match_init_catchall(&match);
3287 match_set_recirc_id(&match, id);
3288 ofpbuf_use_const(&ofpacts, ofpact_current, ofpacts_len);
3289 error = ofproto_dpif_add_internal_flow(ctx->xbridge->ofproto, &match,
3290 RECIRC_RULE_PRIORITY,
3291 RECIRC_TIMEOUT, &ofpacts, &rule);
3292 if (error) {
3293 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3294 VLOG_ERR_RL(&rl, "Failed to add post recirculation flow %s",
3295 match_to_string(&match, 0));
3296 return;
3297 }
3298 /* If ctx has no rule then associate the recirc id, which
3299 * was allocated above, with the internal rule. This allows
3300 * the recirc id to be released when the internal rule times out. */
3301 if (!ctx->rule) {
3302 rule_set_recirc_id(rule, id);
3303 }
3304
3305 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
cc377352 3306 ctx->xout->odp_actions,
d23df9a8
JR
3307 &ctx->xout->wc,
3308 ctx->xbridge->masked_set_action);
cc377352 3309 nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
7bbdd84f
SH
3310}
3311
8bfd0fda
BP
3312static void
3313compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
9583bc14 3314{
33bf9176
BP
3315 struct flow_wildcards *wc = &ctx->xout->wc;
3316 struct flow *flow = &ctx->xin->flow;
8bfd0fda 3317 int n;
33bf9176 3318
8bfd0fda 3319 ovs_assert(eth_type_mpls(mpls->ethertype));
b0a17866 3320
8bfd0fda
BP
3321 n = flow_count_mpls_labels(flow, wc);
3322 if (!n) {
8bfd0fda 3323 ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
cc377352 3324 ctx->xout->odp_actions,
d23df9a8
JR
3325 &ctx->xout->wc,
3326 ctx->xbridge->masked_set_action);
8bfd0fda
BP
3327 } else if (n >= FLOW_MAX_MPLS_LABELS) {
3328 if (ctx->xin->packet != NULL) {
3329 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3330 VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
3331 "MPLS push action can't be performed as it would "
3332 "have more MPLS LSEs than the %d supported.",
3333 ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
9583bc14 3334 }
8bfd0fda
BP
3335 ctx->exit = true;
3336 return;
9583bc14 3337 }
b0a17866 3338
8bfd0fda 3339 flow_push_mpls(flow, n, mpls->ethertype, wc);
9583bc14
EJ
3340}
3341
8bfd0fda 3342static void
9cfef3d0 3343compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
9583bc14 3344{
33bf9176 3345 struct flow_wildcards *wc = &ctx->xout->wc;
8bfd0fda
BP
3346 struct flow *flow = &ctx->xin->flow;
3347 int n = flow_count_mpls_labels(flow, wc);
33bf9176 3348
7bbdd84f 3349 if (flow_pop_mpls(flow, n, eth_type, wc)) {
5af43325 3350 if (ctx->xbridge->enable_recirc) {
7bbdd84f
SH
3351 ctx->was_mpls = true;
3352 }
3353 } else if (n >= FLOW_MAX_MPLS_LABELS) {
8bfd0fda
BP
3354 if (ctx->xin->packet != NULL) {
3355 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3356 VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
3357 "MPLS pop action can't be performed as it has "
3358 "more MPLS LSEs than the %d supported.",
3359 ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
3360 }
3361 ctx->exit = true;
cc377352 3362 ofpbuf_clear(ctx->xout->odp_actions);
9583bc14
EJ
3363 }
3364}
3365
3366static bool
3367compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
3368{
33bf9176
BP
3369 struct flow *flow = &ctx->xin->flow;
3370
3371 if (!is_ip_any(flow)) {
9583bc14
EJ
3372 return false;
3373 }
3374
1dd35f8a 3375 ctx->xout->wc.masks.nw_ttl = 0xff;
33bf9176
BP
3376 if (flow->nw_ttl > 1) {
3377 flow->nw_ttl--;
9583bc14
EJ
3378 return false;
3379 } else {
3380 size_t i;
3381
3382 for (i = 0; i < ids->n_controllers; i++) {
3383 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
3384 ids->cnt_ids[i]);
3385 }
3386
3387 /* Stop processing for current table. */
3388 return true;
3389 }
3390}
3391
8bfd0fda 3392static void
097d4939
JR
3393compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
3394{
8bfd0fda
BP
3395 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
3396 ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
3397 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
097d4939 3398 }
097d4939
JR
3399}
3400
8bfd0fda 3401static void
097d4939
JR
3402compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
3403{
8bfd0fda
BP
3404 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
3405 ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
3406 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
097d4939 3407 }
097d4939
JR
3408}
3409
8bfd0fda 3410static void
9cfef3d0 3411compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
9583bc14 3412{
8bfd0fda
BP
3413 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
3414 ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
3415 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
b0a17866 3416 }
9583bc14
EJ
3417}
3418
3419static bool
9cfef3d0 3420compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
9583bc14 3421{
33bf9176 3422 struct flow *flow = &ctx->xin->flow;
1dd35f8a
JP
3423 struct flow_wildcards *wc = &ctx->xout->wc;
3424
8bfd0fda 3425 if (eth_type_mpls(flow->dl_type)) {
22d38fca
JR
3426 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
3427
3428 wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
8bfd0fda
BP
3429 if (ttl > 1) {
3430 ttl--;
3431 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
3432 return false;
3433 } else {
3434 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
8bfd0fda 3435 }
9583bc14 3436 }
22d38fca
JR
3437
3438 /* Stop processing for current table. */
3439 return true;
9583bc14
EJ
3440}
3441
3442static void
3443xlate_output_action(struct xlate_ctx *ctx,
4e022ec0 3444 ofp_port_t port, uint16_t max_len, bool may_packet_in)
9583bc14 3445{
4e022ec0 3446 ofp_port_t prev_nf_output_iface = ctx->xout->nf_output_iface;
9583bc14
EJ
3447
3448 ctx->xout->nf_output_iface = NF_OUT_DROP;
3449
3450 switch (port) {
3451 case OFPP_IN_PORT:
4e022ec0 3452 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port);
9583bc14
EJ
3453 break;
3454 case OFPP_TABLE:
4e022ec0 3455 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
6d328fa2 3456 0, may_packet_in, true);
9583bc14
EJ
3457 break;
3458 case OFPP_NORMAL:
3459 xlate_normal(ctx);
3460 break;
3461 case OFPP_FLOOD:
3462 flood_packets(ctx, false);
3463 break;
3464 case OFPP_ALL:
3465 flood_packets(ctx, true);
3466 break;
3467 case OFPP_CONTROLLER:
3a11fd5b 3468 execute_controller_action(ctx, max_len,
029ca940
SS
3469 (ctx->in_group ? OFPR_GROUP
3470 : ctx->in_action_set ? OFPR_ACTION_SET
3471 : OFPR_ACTION),
3472 0);
9583bc14
EJ
3473 break;
3474 case OFPP_NONE:
3475 break;
3476 case OFPP_LOCAL:
3477 default:
4e022ec0 3478 if (port != ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
3479 compose_output_action(ctx, port);
3480 } else {
3481 xlate_report(ctx, "skipping output to input port");
3482 }
3483 break;
3484 }
3485
3486 if (prev_nf_output_iface == NF_OUT_FLOOD) {
3487 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
3488 } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
3489 ctx->xout->nf_output_iface = prev_nf_output_iface;
3490 } else if (prev_nf_output_iface != NF_OUT_DROP &&
3491 ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
3492 ctx->xout->nf_output_iface = NF_OUT_MULTI;
3493 }
3494}
3495
3496static void
3497xlate_output_reg_action(struct xlate_ctx *ctx,
3498 const struct ofpact_output_reg *or)
3499{
3500 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
3501 if (port <= UINT16_MAX) {
3502 union mf_subvalue value;
3503
3504 memset(&value, 0xff, sizeof value);
3505 mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
4e022ec0
AW
3506 xlate_output_action(ctx, u16_to_ofp(port),
3507 or->max_len, false);
9583bc14
EJ
3508 }
3509}
3510
3511static void
3512xlate_enqueue_action(struct xlate_ctx *ctx,
3513 const struct ofpact_enqueue *enqueue)
3514{
4e022ec0 3515 ofp_port_t ofp_port = enqueue->port;
9583bc14
EJ
3516 uint32_t queue_id = enqueue->queue;
3517 uint32_t flow_priority, priority;
3518 int error;
3519
3520 /* Translate queue to priority. */
89a8a7f0 3521 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
9583bc14
EJ
3522 if (error) {
3523 /* Fall back to ordinary output action. */
3524 xlate_output_action(ctx, enqueue->port, 0, false);
3525 return;
3526 }
3527
3528 /* Check output port. */
3529 if (ofp_port == OFPP_IN_PORT) {
4e022ec0
AW
3530 ofp_port = ctx->xin->flow.in_port.ofp_port;
3531 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
9583bc14
EJ
3532 return;
3533 }
3534
3535 /* Add datapath actions. */
3536 flow_priority = ctx->xin->flow.skb_priority;
3537 ctx->xin->flow.skb_priority = priority;
3538 compose_output_action(ctx, ofp_port);
3539 ctx->xin->flow.skb_priority = flow_priority;
3540
3541 /* Update NetFlow output port. */
3542 if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
3543 ctx->xout->nf_output_iface = ofp_port;
3544 } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
3545 ctx->xout->nf_output_iface = NF_OUT_MULTI;
3546 }
3547}
3548
3549static void
3550xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
3551{
3552 uint32_t skb_priority;
3553
89a8a7f0 3554 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
9583bc14
EJ
3555 ctx->xin->flow.skb_priority = skb_priority;
3556 } else {
3557 /* Couldn't translate queue to a priority. Nothing to do. A warning
3558 * has already been logged. */
3559 }
3560}
3561
3562static bool
46c88433 3563slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
9583bc14 3564{
46c88433
EJ
3565 const struct xbridge *xbridge = xbridge_;
3566 struct xport *port;
9583bc14
EJ
3567
3568 switch (ofp_port) {
3569 case OFPP_IN_PORT:
3570 case OFPP_TABLE:
3571 case OFPP_NORMAL:
3572 case OFPP_FLOOD:
3573 case OFPP_ALL:
3574 case OFPP_NONE:
3575 return true;
3576 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
3577 return false;
3578 default:
46c88433 3579 port = get_ofp_port(xbridge, ofp_port);
9583bc14
EJ
3580 return port ? port->may_enable : false;
3581 }
3582}
3583
3584static void
3585xlate_bundle_action(struct xlate_ctx *ctx,
3586 const struct ofpact_bundle *bundle)
3587{
4e022ec0 3588 ofp_port_t port;
9583bc14
EJ
3589
3590 port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
46c88433
EJ
3591 slave_enabled_cb,
3592 CONST_CAST(struct xbridge *, ctx->xbridge));
9583bc14 3593 if (bundle->dst.field) {
f74e7df7
JP
3594 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow,
3595 &ctx->xout->wc);
9583bc14
EJ
3596 } else {
3597 xlate_output_action(ctx, port, 0, false);
3598 }
3599}
3600
3601static void
4165b5e0
JS
3602xlate_learn_action__(struct xlate_ctx *ctx, const struct ofpact_learn *learn,
3603 struct ofputil_flow_mod *fm, struct ofpbuf *ofpacts)
9583bc14 3604{
4165b5e0
JS
3605 learn_execute(learn, &ctx->xin->flow, fm, ofpacts);
3606 if (ctx->xin->may_learn) {
3607 ofproto_dpif_flow_mod(ctx->xbridge->ofproto, fm);
3608 }
3609}
9583bc14 3610
4165b5e0
JS
3611static void
3612xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
3613{
9583bc14 3614 ctx->xout->has_learn = true;
9583bc14
EJ
3615 learn_mask(learn, &ctx->xout->wc);
3616
b256dc52
JS
3617 if (ctx->xin->xcache) {
3618 struct xc_entry *entry;
3619
3620 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
4165b5e0
JS
3621 entry->u.learn.ofproto = ctx->xbridge->ofproto;
3622 entry->u.learn.fm = xmalloc(sizeof *entry->u.learn.fm);
3623 entry->u.learn.ofpacts = ofpbuf_new(64);
3624 xlate_learn_action__(ctx, learn, entry->u.learn.fm,
3625 entry->u.learn.ofpacts);
3626 } else if (ctx->xin->may_learn) {
3627 uint64_t ofpacts_stub[1024 / 8];
3628 struct ofputil_flow_mod fm;
3629 struct ofpbuf ofpacts;
3630
3631 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
3632 xlate_learn_action__(ctx, learn, &fm, &ofpacts);
3633 ofpbuf_uninit(&ofpacts);
b256dc52
JS
3634 }
3635}
3636
3637static void
3638xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
3639 uint16_t idle_timeout, uint16_t hard_timeout)
3640{
3641 if (tcp_flags & (TCP_FIN | TCP_RST)) {
3642 rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout);
3643 }
9583bc14
EJ
3644}
3645
9583bc14
EJ
3646static void
3647xlate_fin_timeout(struct xlate_ctx *ctx,
3648 const struct ofpact_fin_timeout *oft)
3649{
b256dc52
JS
3650 if (ctx->rule) {
3651 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
3652 oft->fin_idle_timeout, oft->fin_hard_timeout);
3653 if (ctx->xin->xcache) {
3654 struct xc_entry *entry;
3655
3656 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
83709dfa
JR
3657 /* XC_RULE already holds a reference on the rule, none is taken
3658 * here. */
b256dc52
JS
3659 entry->u.fin.rule = ctx->rule;
3660 entry->u.fin.idle = oft->fin_idle_timeout;
3661 entry->u.fin.hard = oft->fin_hard_timeout;
b256dc52 3662 }
9583bc14
EJ
3663 }
3664}
3665
3666static void
3667xlate_sample_action(struct xlate_ctx *ctx,
3668 const struct ofpact_sample *os)
3669{
3670 union user_action_cookie cookie;
3671 /* Scale the probability from 16-bit to 32-bit while representing
3672 * the same percentage. */
3673 uint32_t probability = (os->probability << 16) | os->probability;
3674
4b97b70d
BP
3675 if (!ctx->xbridge->variable_length_userdata) {
3676 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
3677
3678 VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
3679 "lacks support (needs Linux 3.10+ or kernel module from "
3680 "OVS 1.11+)");
3681 return;
3682 }
3683
7fd91025 3684 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
cc377352 3685 ctx->xout->odp_actions,
d23df9a8
JR
3686 &ctx->xout->wc,
3687 ctx->xbridge->masked_set_action);
9583bc14
EJ
3688
3689 compose_flow_sample_cookie(os->probability, os->collector_set_id,
3690 os->obs_domain_id, os->obs_point_id, &cookie);
cc377352 3691 compose_sample_action(ctx->xbridge, ctx->xout->odp_actions, &ctx->xin->flow,
8b7ea2d4
WZ
3692 probability, &cookie, sizeof cookie.flow_sample,
3693 ODPP_NONE);
9583bc14
EJ
3694}
3695
3696static bool
46c88433 3697may_receive(const struct xport *xport, struct xlate_ctx *ctx)
9583bc14 3698{
bbbca389 3699 if (xport->config & (is_stp(&ctx->xin->flow)
46c88433
EJ
3700 ? OFPUTIL_PC_NO_RECV_STP
3701 : OFPUTIL_PC_NO_RECV)) {
9583bc14
EJ
3702 return false;
3703 }
3704
3705 /* Only drop packets here if both forwarding and learning are
3706 * disabled. If just learning is enabled, we need to have
3707 * OFPP_NORMAL and the learning action have a look at the packet
3708 * before we can drop it. */
9efd308e
DV
3709 if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
3710 (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
9583bc14
EJ
3711 return false;
3712 }
3713
3714 return true;
3715}
3716
7fdb60a7
SH
3717static void
3718xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact *a)
3719{
c61f3870
BP
3720 const struct ofpact_nest *on = ofpact_get_WRITE_ACTIONS(a);
3721 size_t on_len = ofpact_nest_get_action_len(on);
3722 const struct ofpact *inner;
3723
3724 /* Maintain actset_output depending on the contents of the action set:
3725 *
3726 * - OFPP_UNSET, if there is no "output" action.
3727 *
3728 * - The output port, if there is an "output" action and no "group"
3729 * action.
3730 *
3731 * - OFPP_UNSET, if there is a "group" action.
3732 */
3733 if (!ctx->action_set_has_group) {
3734 OFPACT_FOR_EACH (inner, on->actions, on_len) {
3735 if (inner->type == OFPACT_OUTPUT) {
3736 ctx->xin->flow.actset_output = ofpact_get_OUTPUT(inner)->port;
3737 } else if (inner->type == OFPACT_GROUP) {
3738 ctx->xin->flow.actset_output = OFPP_UNSET;
3739 ctx->action_set_has_group = true;
3740 }
3741 }
3742 }
3743
3744 ofpbuf_put(&ctx->action_set, on->actions, on_len);
7fdb60a7
SH
3745 ofpact_pad(&ctx->action_set);
3746}
3747
3748static void
3749xlate_action_set(struct xlate_ctx *ctx)
3750{
3751 uint64_t action_list_stub[1024 / 64];
3752 struct ofpbuf action_list;
3753
029ca940 3754 ctx->in_action_set = true;
7fdb60a7
SH
3755 ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
3756 ofpacts_execute_action_set(&action_list, &ctx->action_set);
1f317cb5 3757 do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
029ca940 3758 ctx->in_action_set = false;
7fdb60a7
SH
3759 ofpbuf_uninit(&action_list);
3760}
3761
7bbdd84f 3762static bool
5af43325 3763ofpact_needs_recirculation_after_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
7bbdd84f
SH
3764{
3765 struct flow_wildcards *wc = &ctx->xout->wc;
3766 struct flow *flow = &ctx->xin->flow;
3767
5af43325
PS
3768 if (!ctx->was_mpls) {
3769 return false;
3770 }
3771
7bbdd84f
SH
3772 switch (a->type) {
3773 case OFPACT_OUTPUT:
3774 case OFPACT_GROUP:
3775 case OFPACT_CONTROLLER:
3776 case OFPACT_STRIP_VLAN:
3777 case OFPACT_SET_VLAN_PCP:
3778 case OFPACT_SET_VLAN_VID:
3779 case OFPACT_ENQUEUE:
3780 case OFPACT_PUSH_VLAN:
3781 case OFPACT_SET_ETH_SRC:
3782 case OFPACT_SET_ETH_DST:
3783 case OFPACT_SET_TUNNEL:
3784 case OFPACT_SET_QUEUE:
3785 case OFPACT_POP_QUEUE:
18080541 3786 case OFPACT_CONJUNCTION:
7bbdd84f
SH
3787 case OFPACT_NOTE:
3788 case OFPACT_OUTPUT_REG:
3789 case OFPACT_EXIT:
3790 case OFPACT_METER:
3791 case OFPACT_WRITE_METADATA:
3792 case OFPACT_WRITE_ACTIONS:
3793 case OFPACT_CLEAR_ACTIONS:
3794 case OFPACT_SAMPLE:
3795 return false;
3796
5af43325
PS
3797 case OFPACT_POP_MPLS:
3798 case OFPACT_DEC_MPLS_TTL:
3799 case OFPACT_SET_MPLS_TTL:
3800 case OFPACT_SET_MPLS_TC:
3801 case OFPACT_SET_MPLS_LABEL:
7bbdd84f
SH
3802 case OFPACT_SET_IPV4_SRC:
3803 case OFPACT_SET_IPV4_DST:
3804 case OFPACT_SET_IP_DSCP:
3805 case OFPACT_SET_IP_ECN:
3806 case OFPACT_SET_IP_TTL:
3807 case OFPACT_SET_L4_SRC_PORT:
3808 case OFPACT_SET_L4_DST_PORT:
3809 case OFPACT_RESUBMIT:
3810 case OFPACT_STACK_PUSH:
3811 case OFPACT_STACK_POP:
3812 case OFPACT_DEC_TTL:
3813 case OFPACT_MULTIPATH:
3814 case OFPACT_BUNDLE:
3815 case OFPACT_LEARN:
3816 case OFPACT_FIN_TIMEOUT:
3817 case OFPACT_GOTO_TABLE:
3818 return true;
3819
3820 case OFPACT_REG_MOVE:
3821 return (mf_is_l3_or_higher(ofpact_get_REG_MOVE(a)->dst.field) ||
3822 mf_is_l3_or_higher(ofpact_get_REG_MOVE(a)->src.field));
3823
7bbdd84f
SH
3824 case OFPACT_SET_FIELD:
3825 return mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field);
3826
3827 case OFPACT_PUSH_MPLS:
3828 /* Recirculate if it is an IP packet with a zero ttl. This may
3829 * indicate that the packet was previously MPLS and an MPLS pop action
3830 * converted it to IP. In this case recirculating should reveal the IP
3831 * TTL which is used as the basis for a new MPLS LSE. */
3832 return (!flow_count_mpls_labels(flow, wc)
3833 && flow->nw_ttl == 0
3834 && is_ip_any(flow));
3835 }
3836
3837 OVS_NOT_REACHED();
3838}
3839
9583bc14
EJ
3840static void
3841do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
3842 struct xlate_ctx *ctx)
3843{
33bf9176
BP
3844 struct flow_wildcards *wc = &ctx->xout->wc;
3845 struct flow *flow = &ctx->xin->flow;
9583bc14
EJ
3846 const struct ofpact *a;
3847
a36de779
PS
3848 if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
3849 tnl_arp_snoop(flow, wc, ctx->xbridge->name);
3850 }
f47ea021
JR
3851 /* dl_type already in the mask, not set below. */
3852
9583bc14
EJ
3853 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
3854 struct ofpact_controller *controller;
3855 const struct ofpact_metadata *metadata;
b2dd70be
JR
3856 const struct ofpact_set_field *set_field;
3857 const struct mf_field *mf;
9583bc14
EJ
3858
3859 if (ctx->exit) {
3860 break;
3861 }
3862
5af43325 3863 if (ofpact_needs_recirculation_after_mpls(a, ctx)) {
7bbdd84f
SH
3864 compose_recirculate_action(ctx, ofpacts, a, ofpacts_len);
3865 return;
3866 }
3867
9583bc14
EJ
3868 switch (a->type) {
3869 case OFPACT_OUTPUT:
3870 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
3871 ofpact_get_OUTPUT(a)->max_len, true);
3872 break;
3873
7395c052 3874 case OFPACT_GROUP:
f4fb341b
SH
3875 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) {
3876 return;
3877 }
7395c052
NZ
3878 break;
3879
9583bc14
EJ
3880 case OFPACT_CONTROLLER:
3881 controller = ofpact_get_CONTROLLER(a);
3882 execute_controller_action(ctx, controller->max_len,
3883 controller->reason,
3884 controller->controller_id);
3885 break;
3886
3887 case OFPACT_ENQUEUE:
16194afd
DDP
3888 memset(&wc->masks.skb_priority, 0xff,
3889 sizeof wc->masks.skb_priority);
9583bc14
EJ
3890 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
3891 break;
3892
3893 case OFPACT_SET_VLAN_VID:
f74e7df7 3894 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
ca287d20
JR
3895 if (flow->vlan_tci & htons(VLAN_CFI) ||
3896 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
3897 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
3898 flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
3899 | htons(VLAN_CFI));
3900 }
9583bc14
EJ
3901 break;
3902
3903 case OFPACT_SET_VLAN_PCP:
f74e7df7 3904 wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
ca287d20
JR
3905 if (flow->vlan_tci & htons(VLAN_CFI) ||
3906 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
3907 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
3908 flow->vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
3909 << VLAN_PCP_SHIFT) | VLAN_CFI);
3910 }
9583bc14
EJ
3911 break;
3912
3913 case OFPACT_STRIP_VLAN:
f74e7df7 3914 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
33bf9176 3915 flow->vlan_tci = htons(0);
9583bc14
EJ
3916 break;
3917
3918 case OFPACT_PUSH_VLAN:
3919 /* XXX 802.1AD(QinQ) */
f74e7df7 3920 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
33bf9176 3921 flow->vlan_tci = htons(VLAN_CFI);
9583bc14
EJ
3922 break;
3923
3924 case OFPACT_SET_ETH_SRC:
f74e7df7 3925 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
33bf9176 3926 memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
3927 break;
3928
3929 case OFPACT_SET_ETH_DST:
f74e7df7 3930 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
33bf9176 3931 memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
9583bc14
EJ
3932 break;
3933
3934 case OFPACT_SET_IPV4_SRC:
33bf9176 3935 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 3936 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
33bf9176 3937 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
9583bc14
EJ
3938 }
3939 break;
3940
3941 case OFPACT_SET_IPV4_DST:
33bf9176 3942 if (flow->dl_type == htons(ETH_TYPE_IP)) {
f47ea021 3943 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
33bf9176 3944 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
9583bc14
EJ
3945 }
3946 break;
3947
04f01c24
BP
3948 case OFPACT_SET_IP_DSCP:
3949 if (is_ip_any(flow)) {
f47ea021 3950 wc->masks.nw_tos |= IP_DSCP_MASK;
33bf9176 3951 flow->nw_tos &= ~IP_DSCP_MASK;
04f01c24 3952 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
9583bc14
EJ
3953 }
3954 break;
3955
ff14eb7a
JR
3956 case OFPACT_SET_IP_ECN:
3957 if (is_ip_any(flow)) {
3958 wc->masks.nw_tos |= IP_ECN_MASK;
3959 flow->nw_tos &= ~IP_ECN_MASK;
3960 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
3961 }
3962 break;
3963
0c20dbe4
JR
3964 case OFPACT_SET_IP_TTL:
3965 if (is_ip_any(flow)) {
3966 wc->masks.nw_ttl = 0xff;
3967 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
3968 }
3969 break;
3970
9583bc14 3971 case OFPACT_SET_L4_SRC_PORT:
b8778a0d 3972 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
3973 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
3974 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
33bf9176 3975 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
9583bc14
EJ
3976 }
3977 break;
3978
3979 case OFPACT_SET_L4_DST_PORT:
b8778a0d 3980 if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
f47ea021
JR
3981 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
3982 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
33bf9176 3983 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
9583bc14
EJ
3984 }
3985 break;
3986
3987 case OFPACT_RESUBMIT:
3988 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
3989 break;
3990
3991 case OFPACT_SET_TUNNEL:
33bf9176 3992 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
9583bc14
EJ
3993 break;
3994
3995 case OFPACT_SET_QUEUE:
16194afd
DDP
3996 memset(&wc->masks.skb_priority, 0xff,
3997 sizeof wc->masks.skb_priority);
9583bc14
EJ
3998 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
3999 break;
4000
4001 case OFPACT_POP_QUEUE:
16194afd
DDP
4002 memset(&wc->masks.skb_priority, 0xff,
4003 sizeof wc->masks.skb_priority);
33bf9176 4004 flow->skb_priority = ctx->orig_skb_priority;
9583bc14
EJ
4005 break;
4006
4007 case OFPACT_REG_MOVE:
33bf9176 4008 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
9583bc14
EJ
4009 break;
4010
b2dd70be
JR
4011 case OFPACT_SET_FIELD:
4012 set_field = ofpact_get_SET_FIELD(a);
4013 mf = set_field->field;
b2dd70be
JR
4014
4015 /* Set field action only ever overwrites packet's outermost
4016 * applicable header fields. Do nothing if no header exists. */
4479846e
JR
4017 if (mf->id == MFF_VLAN_VID) {
4018 wc->masks.vlan_tci |= htons(VLAN_CFI);
4019 if (!(flow->vlan_tci & htons(VLAN_CFI))) {
4020 break;
4021 }
4022 } else if ((mf->id == MFF_MPLS_LABEL || mf->id == MFF_MPLS_TC)
4023 /* 'dl_type' is already unwildcarded. */
4024 && !eth_type_mpls(flow->dl_type)) {
4025 break;
b2dd70be 4026 }
b8778a0d
JR
4027 /* A flow may wildcard nw_frag. Do nothing if setting a trasport
4028 * header field on a packet that does not have them. */
4479846e 4029 mf_mask_field_and_prereqs(mf, &wc->masks);
b8778a0d
JR
4030 if (mf_are_prereqs_ok(mf, flow)) {
4031 mf_set_flow_value_masked(mf, &set_field->value,
4032 &set_field->mask, flow);
4033 }
b2dd70be
JR
4034 break;
4035
9583bc14 4036 case OFPACT_STACK_PUSH:
33bf9176
BP
4037 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
4038 &ctx->stack);
9583bc14
EJ
4039 break;
4040
4041 case OFPACT_STACK_POP:
f74e7df7
JP
4042 nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, wc,
4043 &ctx->stack);
9583bc14
EJ
4044 break;
4045
4046 case OFPACT_PUSH_MPLS:
8bfd0fda 4047 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
9583bc14
EJ
4048 break;
4049
4050 case OFPACT_POP_MPLS:
8bfd0fda 4051 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
9583bc14
EJ
4052 break;
4053
097d4939 4054 case OFPACT_SET_MPLS_LABEL:
8bfd0fda
BP
4055 compose_set_mpls_label_action(
4056 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
4057 break;
097d4939
JR
4058
4059 case OFPACT_SET_MPLS_TC:
8bfd0fda 4060 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
097d4939
JR
4061 break;
4062
9583bc14 4063 case OFPACT_SET_MPLS_TTL:
8bfd0fda 4064 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
9583bc14
EJ
4065 break;
4066
4067 case OFPACT_DEC_MPLS_TTL:
9cfef3d0 4068 if (compose_dec_mpls_ttl_action(ctx)) {
ad3efdcb 4069 return;
9583bc14
EJ
4070 }
4071 break;
4072
4073 case OFPACT_DEC_TTL:
f74e7df7 4074 wc->masks.nw_ttl = 0xff;
9583bc14 4075 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
ad3efdcb 4076 return;
9583bc14
EJ
4077 }
4078 break;
4079
4080 case OFPACT_NOTE:
4081 /* Nothing to do. */
4082 break;
4083
4084 case OFPACT_MULTIPATH:
33bf9176 4085 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
9583bc14
EJ
4086 break;
4087
4088 case OFPACT_BUNDLE:
9583bc14
EJ
4089 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
4090 break;
4091
4092 case OFPACT_OUTPUT_REG:
4093 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
4094 break;
4095
4096 case OFPACT_LEARN:
4097 xlate_learn_action(ctx, ofpact_get_LEARN(a));
4098 break;
4099
afc3987b
BP
4100 case OFPACT_CONJUNCTION: {
4101 /* A flow with a "conjunction" action represents part of a special
4102 * kind of "set membership match". Such a flow should not actually
4103 * get executed, but it could via, say, a "packet-out", even though
4104 * that wouldn't be useful. Log it to help debugging. */
4105 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
4106 VLOG_INFO_RL(&rl, "executing no-op conjunction action");
18080541 4107 break;
afc3987b 4108 }
18080541 4109
9583bc14
EJ
4110 case OFPACT_EXIT:
4111 ctx->exit = true;
4112 break;
4113
4114 case OFPACT_FIN_TIMEOUT:
33bf9176 4115 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
9583bc14
EJ
4116 ctx->xout->has_fin_timeout = true;
4117 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
4118 break;
4119
4120 case OFPACT_CLEAR_ACTIONS:
7fdb60a7 4121 ofpbuf_clear(&ctx->action_set);
c61f3870
BP
4122 ctx->xin->flow.actset_output = OFPP_UNSET;
4123 ctx->action_set_has_group = false;
7fdb60a7
SH
4124 break;
4125
4126 case OFPACT_WRITE_ACTIONS:
4127 xlate_write_actions(ctx, a);
9583bc14
EJ
4128 break;
4129
4130 case OFPACT_WRITE_METADATA:
4131 metadata = ofpact_get_WRITE_METADATA(a);
33bf9176
BP
4132 flow->metadata &= ~metadata->mask;
4133 flow->metadata |= metadata->metadata & metadata->mask;
9583bc14
EJ
4134 break;
4135
638a19b0
JR
4136 case OFPACT_METER:
4137 /* Not implemented yet. */
4138 break;
4139
9583bc14 4140 case OFPACT_GOTO_TABLE: {
9583bc14 4141 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
9583bc14 4142
7bbdd84f
SH
4143 /* Allow ctx->table_id == TBL_INTERNAL, which will be greater
4144 * than ogt->table_id. This is to allow goto_table actions that
4145 * triggered recirculation: ctx->table_id will be TBL_INTERNAL
4146 * after recirculation. */
4147 ovs_assert(ctx->table_id == TBL_INTERNAL
4148 || ctx->table_id < ogt->table_id);
4468099e 4149 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
6d328fa2 4150 ogt->table_id, true, true);
9583bc14
EJ
4151 break;
4152 }
4153
4154 case OFPACT_SAMPLE:
4155 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
4156 break;
4157 }
4158 }
9583bc14
EJ
4159}
4160
4161void
4162xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
cc377352
EJ
4163 const struct flow *flow, ofp_port_t in_port,
4164 struct rule_dpif *rule, uint16_t tcp_flags,
4165 const struct ofpbuf *packet)
9583bc14
EJ
4166{
4167 xin->ofproto = ofproto;
4168 xin->flow = *flow;
cc377352 4169 xin->flow.in_port.ofp_port = in_port;
c61f3870 4170 xin->flow.actset_output = OFPP_UNSET;
9583bc14
EJ
4171 xin->packet = packet;
4172 xin->may_learn = packet != NULL;
4173 xin->rule = rule;
b256dc52 4174 xin->xcache = NULL;
9583bc14
EJ
4175 xin->ofpacts = NULL;
4176 xin->ofpacts_len = 0;
4177 xin->tcp_flags = tcp_flags;
4178 xin->resubmit_hook = NULL;
4179 xin->report_hook = NULL;
4180 xin->resubmit_stats = NULL;
3f207910 4181 xin->skip_wildcards = false;
cc377352 4182 xin->odp_actions = NULL;
9583bc14
EJ
4183}
4184
4185void
4186xlate_out_uninit(struct xlate_out *xout)
4187{
cc377352
EJ
4188 if (xout && xout->odp_actions == &xout->odp_actions_buf) {
4189 ofpbuf_uninit(xout->odp_actions);
9583bc14
EJ
4190 }
4191}
4192
4193/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
4194 * into datapath actions, using 'ctx', and discards the datapath actions. */
4195void
4196xlate_actions_for_side_effects(struct xlate_in *xin)
4197{
4198 struct xlate_out xout;
4199
4200 xlate_actions(xin, &xout);
4201 xlate_out_uninit(&xout);
4202}
4203
9583bc14
EJ
4204void
4205xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
4206{
4207 dst->wc = src->wc;
9583bc14
EJ
4208 dst->slow = src->slow;
4209 dst->has_learn = src->has_learn;
4210 dst->has_normal = src->has_normal;
4211 dst->has_fin_timeout = src->has_fin_timeout;
4212 dst->nf_output_iface = src->nf_output_iface;
4213 dst->mirrors = src->mirrors;
4214
cc377352
EJ
4215 dst->odp_actions = &dst->odp_actions_buf;
4216 ofpbuf_use_stub(dst->odp_actions, dst->odp_actions_stub,
9583bc14 4217 sizeof dst->odp_actions_stub);
cc377352
EJ
4218 ofpbuf_put(dst->odp_actions, ofpbuf_data(src->odp_actions),
4219 ofpbuf_size(src->odp_actions));
9583bc14
EJ
4220}
4221\f
55954f6e
EJ
4222static struct skb_priority_to_dscp *
4223get_skb_priority(const struct xport *xport, uint32_t skb_priority)
4224{
4225 struct skb_priority_to_dscp *pdscp;
4226 uint32_t hash;
4227
4228 hash = hash_int(skb_priority, 0);
4229 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
4230 if (pdscp->skb_priority == skb_priority) {
4231 return pdscp;
4232 }
4233 }
4234 return NULL;
4235}
4236
4237static bool
4238dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
4239 uint8_t *dscp)
4240{
4241 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
4242 *dscp = pdscp ? pdscp->dscp : 0;
4243 return pdscp != NULL;
4244}
4245
16194afd
DDP
4246static size_t
4247count_skb_priorities(const struct xport *xport)
4248{
4249 return hmap_count(&xport->skb_priorities);
4250}
4251
55954f6e
EJ
4252static void
4253clear_skb_priorities(struct xport *xport)
4254{
4255 struct skb_priority_to_dscp *pdscp, *next;
4256
4257 HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &xport->skb_priorities) {
4258 hmap_remove(&xport->skb_priorities, &pdscp->hmap_node);
4259 free(pdscp);
4260 }
4261}
4262
ce4a6b76
BP
4263static bool
4264actions_output_to_local_port(const struct xlate_ctx *ctx)
4265{
46c88433 4266 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
ce4a6b76
BP
4267 const struct nlattr *a;
4268 unsigned int left;
4269
cc377352
EJ
4270 NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(ctx->xout->odp_actions),
4271 ofpbuf_size(ctx->xout->odp_actions)) {
ce4a6b76
BP
4272 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
4273 && nl_attr_get_odp_port(a) == local_odp_port) {
4274 return true;
4275 }
4276 }
4277 return false;
4278}
9583bc14 4279
5e2a6702 4280#if defined(__linux__)
7d031d7e
BP
4281/* Returns the maximum number of packets that the Linux kernel is willing to
4282 * queue up internally to certain kinds of software-implemented ports, or the
4283 * default (and rarely modified) value if it cannot be determined. */
4284static int
4285netdev_max_backlog(void)
4286{
4287 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
4288 static int max_backlog = 1000; /* The normal default value. */
4289
4290 if (ovsthread_once_start(&once)) {
4291 static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
4292 FILE *stream;
4293 int n;
4294
4295 stream = fopen(filename, "r");
4296 if (!stream) {
4297 VLOG_WARN("%s: open failed (%s)", filename, ovs_strerror(errno));
4298 } else {
4299 if (fscanf(stream, "%d", &n) != 1) {
4300 VLOG_WARN("%s: read error", filename);
4301 } else if (n <= 100) {
4302 VLOG_WARN("%s: unexpectedly small value %d", filename, n);
4303 } else {
4304 max_backlog = n;
4305 }
4306 fclose(stream);
4307 }
4308 ovsthread_once_done(&once);
4309
4310 VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
4311 }
4312
4313 return max_backlog;
4314}
4315
4316/* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
4317 * 'odp_actions'. */
4318static int
4319count_output_actions(const struct ofpbuf *odp_actions)
4320{
4321 const struct nlattr *a;
4322 size_t left;
4323 int n = 0;
4324
4325 NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(odp_actions),
4326 ofpbuf_size(odp_actions)) {
4327 if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
4328 n++;
4329 }
4330 }
4331 return n;
4332}
5e2a6702 4333#endif /* defined(__linux__) */
7d031d7e
BP
4334
4335/* Returns true if 'odp_actions' contains more output actions than the datapath
4336 * can reliably handle in one go. On Linux, this is the value of the
4337 * net.core.netdev_max_backlog sysctl, which limits the maximum number of
4338 * packets that the kernel is willing to queue up for processing while the
4339 * datapath is processing a set of actions. */
4340static bool
5e2a6702 4341too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
7d031d7e
BP
4342{
4343#ifdef __linux__
4344 return (ofpbuf_size(odp_actions) / NL_A_U32_SIZE > netdev_max_backlog()
4345 && count_output_actions(odp_actions) > netdev_max_backlog());
4346#else
4347 /* OSes other than Linux might have similar limits, but we don't know how
4348 * to determine them.*/
4349 return false;
4350#endif
4351}
4352
9583bc14 4353/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
56450a41
BP
4354 * into datapath actions in 'odp_actions', using 'ctx'.
4355 *
4356 * The caller must take responsibility for eventually freeing 'xout', with
4357 * xlate_out_uninit(). */
84f0f298
RW
4358void
4359xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
9583bc14 4360{
84f0f298 4361 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
33bf9176
BP
4362 struct flow_wildcards *wc = &xout->wc;
4363 struct flow *flow = &xin->flow;
10c44245 4364 struct rule_dpif *rule = NULL;
33bf9176 4365
9583bc14
EJ
4366 enum slow_path_reason special;
4367 const struct ofpact *ofpacts;
46c88433 4368 struct xport *in_port;
9583bc14
EJ
4369 struct flow orig_flow;
4370 struct xlate_ctx ctx;
4371 size_t ofpacts_len;
62a7cc71 4372 bool tnl_may_send;
c56fac1b 4373 bool is_icmp;
9583bc14 4374
46c88433 4375 COVERAGE_INC(xlate_actions);
9583bc14
EJ
4376
4377 /* Flow initialization rules:
4378 * - 'base_flow' must match the kernel's view of the packet at the
4379 * time that action processing starts. 'flow' represents any
4380 * transformations we wish to make through actions.
4381 * - By default 'base_flow' and 'flow' are the same since the input
4382 * packet matches the output before any actions are applied.
4383 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
4384 * of the received packet as seen by the kernel. If we later output
4385 * to another device without any modifications this will cause us to
4386 * insert a new tag since the original one was stripped off by the
4387 * VLAN device.
4388 * - Tunnel metadata as received is retained in 'flow'. This allows
4389 * tunnel metadata matching also in later tables.
4390 * Since a kernel action for setting the tunnel metadata will only be
4391 * generated with actual tunnel output, changing the tunnel metadata
4392 * values in 'flow' (such as tun_id) will only have effect with a later
4393 * tunnel output action.
4394 * - Tunnel 'base_flow' is completely cleared since that is what the
4395 * kernel does. If we wish to maintain the original values an action
4396 * needs to be generated. */
4397
4398 ctx.xin = xin;
4399 ctx.xout = xout;
46c88433
EJ
4400 ctx.xout->slow = 0;
4401 ctx.xout->has_learn = false;
4402 ctx.xout->has_normal = false;
4403 ctx.xout->has_fin_timeout = false;
4404 ctx.xout->nf_output_iface = NF_OUT_DROP;
4405 ctx.xout->mirrors = 0;
cc377352
EJ
4406
4407 xout->odp_actions = xin->odp_actions;
4408 if (!xout->odp_actions) {
4409 xout->odp_actions = &xout->odp_actions_buf;
4410 ofpbuf_use_stub(xout->odp_actions, xout->odp_actions_stub,
4411 sizeof xout->odp_actions_stub);
4412 }
4413 ofpbuf_reserve(xout->odp_actions, NL_A_U32_SIZE);
46c88433 4414
84f0f298 4415 ctx.xbridge = xbridge_lookup(xcfg, xin->ofproto);
46c88433 4416 if (!ctx.xbridge) {
83709dfa 4417 return;
46c88433 4418 }
9583bc14 4419
9583bc14
EJ
4420 ctx.rule = xin->rule;
4421
33bf9176 4422 ctx.base_flow = *flow;
9583bc14 4423 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
33bf9176 4424 ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
9583bc14 4425
33bf9176
BP
4426 flow_wildcards_init_catchall(wc);
4427 memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
7431e171 4428 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
0934ebba
BP
4429 if (is_ip_any(flow)) {
4430 wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
4431 }
c56fac1b 4432 is_icmp = is_icmpv4(flow) || is_icmpv6(flow);
9583bc14 4433
62a7cc71 4434 tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
ce3955be 4435 if (ctx.xbridge->netflow) {
9b658910 4436 netflow_mask_wc(flow, wc);
9583bc14
EJ
4437 }
4438
9583bc14 4439 ctx.recurse = 0;
98b07853 4440 ctx.resubmits = 0;
5a070238 4441 ctx.in_group = false;
029ca940 4442 ctx.in_action_set = false;
33bf9176 4443 ctx.orig_skb_priority = flow->skb_priority;
9583bc14
EJ
4444 ctx.table_id = 0;
4445 ctx.exit = false;
92c08f09 4446 ctx.use_recirc = false;
7bbdd84f 4447 ctx.was_mpls = false;
9583bc14 4448
10c44245 4449 if (!xin->ofpacts && !ctx.rule) {
34dd0d78
JR
4450 rule = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
4451 xin->skip_wildcards ? NULL : wc,
4452 ctx.xin->xcache != NULL,
4453 ctx.xin->resubmit_stats, &ctx.table_id);
10c44245
EJ
4454 if (ctx.xin->resubmit_stats) {
4455 rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
4456 }
b256dc52
JS
4457 if (ctx.xin->xcache) {
4458 struct xc_entry *entry;
4459
4460 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
b256dc52
JS
4461 entry->u.rule = rule;
4462 }
10c44245 4463 ctx.rule = rule;
a8c31348
BP
4464
4465 if (OVS_UNLIKELY(ctx.xin->resubmit_hook)) {
4466 ctx.xin->resubmit_hook(ctx.xin, rule, 0);
4467 }
10c44245 4468 }
ee1afdd5 4469 xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
10c44245 4470
9583bc14
EJ
4471 if (xin->ofpacts) {
4472 ofpacts = xin->ofpacts;
4473 ofpacts_len = xin->ofpacts_len;
10c44245 4474 } else if (ctx.rule) {
d8227eba
JR
4475 const struct rule_actions *actions = rule_dpif_get_actions(ctx.rule);
4476
6f00e29b
BP
4477 ofpacts = actions->ofpacts;
4478 ofpacts_len = actions->ofpacts_len;
9583bc14 4479 } else {
428b2edd 4480 OVS_NOT_REACHED();
9583bc14
EJ
4481 }
4482
4483 ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
c61f3870
BP
4484
4485 ctx.action_set_has_group = false;
7fdb60a7
SH
4486 ofpbuf_use_stub(&ctx.action_set,
4487 ctx.action_set_stub, sizeof ctx.action_set_stub);
9583bc14 4488
ade6ad9c 4489 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
9583bc14
EJ
4490 /* Do this conditionally because the copy is expensive enough that it
4491 * shows up in profiles. */
33bf9176 4492 orig_flow = *flow;
9583bc14
EJ
4493 }
4494
46c88433 4495 in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
b256dc52
JS
4496 if (in_port && in_port->is_tunnel) {
4497 if (ctx.xin->resubmit_stats) {
4498 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
4499 if (in_port->bfd) {
4500 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
4501 }
4502 }
4503 if (ctx.xin->xcache) {
4504 struct xc_entry *entry;
4505
4506 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
4507 entry->u.dev.rx = netdev_ref(in_port->netdev);
4508 entry->u.dev.bfd = bfd_ref(in_port->bfd);
d6fc5f57
EJ
4509 }
4510 }
4511
642dc74d 4512 special = process_special(&ctx, flow, in_port, ctx.xin->packet);
9583bc14 4513 if (special) {
04594cd5 4514 ctx.xout->slow |= special;
9583bc14 4515 } else {
9583bc14 4516 size_t sample_actions_len;
9583bc14 4517
4e022ec0 4518 if (flow->in_port.ofp_port
46c88433
EJ
4519 != vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
4520 flow->in_port.ofp_port,
33bf9176 4521 flow->vlan_tci)) {
9583bc14
EJ
4522 ctx.base_flow.vlan_tci = 0;
4523 }
4524
4525 add_sflow_action(&ctx);
4526 add_ipfix_action(&ctx);
cc377352 4527 sample_actions_len = ofpbuf_size(ctx.xout->odp_actions);
9583bc14 4528
62a7cc71 4529 if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
9583bc14
EJ
4530 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
4531
4532 /* We've let OFPP_NORMAL and the learning action look at the
4533 * packet, so drop it now if forwarding is disabled. */
9efd308e
DV
4534 if (in_port && (!xport_stp_forward_state(in_port) ||
4535 !xport_rstp_forward_state(in_port))) {
cc377352 4536 ofpbuf_set_size(ctx.xout->odp_actions, sample_actions_len);
9583bc14
EJ
4537 }
4538 }
4539
1f317cb5 4540 if (ofpbuf_size(&ctx.action_set)) {
7fdb60a7
SH
4541 xlate_action_set(&ctx);
4542 }
4543
46c88433 4544 if (ctx.xbridge->has_in_band
ce4a6b76
BP
4545 && in_band_must_output_to_local_port(flow)
4546 && !actions_output_to_local_port(&ctx)) {
9583bc14
EJ
4547 compose_output_action(&ctx, OFPP_LOCAL);
4548 }
aaa0fbae
EJ
4549
4550 fix_sflow_action(&ctx);
4551
46c88433 4552 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
9583bc14
EJ
4553 add_mirror_actions(&ctx, &orig_flow);
4554 }
9583bc14
EJ
4555 }
4556
cc377352 4557 if (nl_attr_oversized(ofpbuf_size(ctx.xout->odp_actions))) {
542024c4 4558 /* These datapath actions are too big for a Netlink attribute, so we
0f032e95
BP
4559 * can't hand them to the kernel directly. dpif_execute() can execute
4560 * them one by one with help, so just mark the result as SLOW_ACTION to
4561 * prevent the flow from being installed. */
4562 COVERAGE_INC(xlate_actions_oversize);
4563 ctx.xout->slow |= SLOW_ACTION;
7d031d7e
BP
4564 } else if (too_many_output_actions(ctx.xout->odp_actions)) {
4565 COVERAGE_INC(xlate_actions_too_many_output);
4566 ctx.xout->slow |= SLOW_ACTION;
542024c4
BP
4567 }
4568
b256dc52
JS
4569 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
4570 if (ctx.xin->resubmit_stats) {
4571 mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
4572 ctx.xin->resubmit_stats->n_packets,
4573 ctx.xin->resubmit_stats->n_bytes);
4574 }
4575 if (ctx.xin->xcache) {
4576 struct xc_entry *entry;
4577
4578 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR);
4579 entry->u.mirror.mbridge = mbridge_ref(ctx.xbridge->mbridge);
4580 entry->u.mirror.mirrors = xout->mirrors;
4581 }
4582 }
4583
4584 if (ctx.xbridge->netflow) {
b256dc52
JS
4585 /* Only update netflow if we don't have controller flow. We don't
4586 * report NetFlow expiration messages for such facets because they
4587 * are just part of the control logic for the network, not real
4588 * traffic. */
4589 if (ofpacts_len == 0
4590 || ofpacts->type != OFPACT_CONTROLLER
4591 || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
4592 if (ctx.xin->resubmit_stats) {
d6fc5f57
EJ
4593 netflow_flow_update(ctx.xbridge->netflow, flow,
4594 xout->nf_output_iface,
4595 ctx.xin->resubmit_stats);
4596 }
b256dc52
JS
4597 if (ctx.xin->xcache) {
4598 struct xc_entry *entry;
4599
4600 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
4601 entry->u.nf.netflow = netflow_ref(ctx.xbridge->netflow);
4602 entry->u.nf.flow = xmemdup(flow, sizeof *flow);
4603 entry->u.nf.iface = xout->nf_output_iface;
4604 }
d6fc5f57
EJ
4605 }
4606 }
4607
9583bc14 4608 ofpbuf_uninit(&ctx.stack);
7fdb60a7 4609 ofpbuf_uninit(&ctx.action_set);
9583bc14
EJ
4610
4611 /* Clear the metadata and register wildcard masks, because we won't
4612 * use non-header fields as part of the cache. */
c11c6faa 4613 flow_wildcards_clear_non_packet_fields(wc);
dc24a00f 4614
c56fac1b
BP
4615 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow uses
4616 * the low 8 bits of the 16-bit tp_src and tp_dst members to represent
4617 * these fields. The datapath interface, on the other hand, represents
4618 * them with just 8 bits each. This means that if the high 8 bits of the
4619 * masks for these fields somehow become set, then they will get chopped
4620 * off by a round trip through the datapath, and revalidation will spot
4621 * that as an inconsistency and delete the flow. Avoid the problem here by
4622 * making sure that only the low 8 bits of either field can be unwildcarded
4623 * for ICMP.
4624 */
4625 if (is_icmp) {
4626 wc->masks.tp_src &= htons(UINT8_MAX);
4627 wc->masks.tp_dst &= htons(UINT8_MAX);
4628 }
91d6cd12
AW
4629}
4630
4631/* Sends 'packet' out 'ofport'.
4632 * May modify 'packet'.
4633 * Returns 0 if successful, otherwise a positive errno value. */
4634int
4635xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
4636{
84f0f298 4637 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
91d6cd12 4638 struct xport *xport;
91d6cd12 4639 struct ofpact_output output;
91d6cd12 4640 struct flow flow;
91d6cd12 4641
91d6cd12
AW
4642 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
4643 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
b5e7e61a
AZ
4644 flow_extract(packet, NULL, &flow);
4645 flow.in_port.ofp_port = OFPP_NONE;
91d6cd12 4646
84f0f298 4647 xport = xport_lookup(xcfg, ofport);
91d6cd12 4648 if (!xport) {
02ea2703 4649 return EINVAL;
91d6cd12 4650 }
91d6cd12
AW
4651 output.port = xport->ofp_port;
4652 output.max_len = 0;
e491a67a
YT
4653
4654 return ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
4655 &output.ofpact, sizeof output,
4656 packet);
9583bc14 4657}
b256dc52
JS
4658
4659struct xlate_cache *
4660xlate_cache_new(void)
4661{
4662 struct xlate_cache *xcache = xmalloc(sizeof *xcache);
4663
4664 ofpbuf_init(&xcache->entries, 512);
4665 return xcache;
4666}
4667
4668static struct xc_entry *
4669xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type)
4670{
4671 struct xc_entry *entry;
4672
4673 entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry);
4674 entry->type = type;
4675
4676 return entry;
4677}
4678
4679static void
4680xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
4681{
4682 if (entry->u.dev.tx) {
4683 netdev_vport_inc_tx(entry->u.dev.tx, stats);
4684 }
4685 if (entry->u.dev.rx) {
4686 netdev_vport_inc_rx(entry->u.dev.rx, stats);
4687 }
4688 if (entry->u.dev.bfd) {
4689 bfd_account_rx(entry->u.dev.bfd, stats);
4690 }
4691}
4692
4693static void
4694xlate_cache_normal(struct ofproto_dpif *ofproto, struct flow *flow, int vlan)
4695{
84f0f298 4696 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
b256dc52
JS
4697 struct xbridge *xbridge;
4698 struct xbundle *xbundle;
4699 struct flow_wildcards wc;
4700
84f0f298 4701 xbridge = xbridge_lookup(xcfg, ofproto);
b256dc52
JS
4702 if (!xbridge) {
4703 return;
4704 }
4705
4706 xbundle = lookup_input_bundle(xbridge, flow->in_port.ofp_port, false,
4707 NULL);
4708 if (!xbundle) {
4709 return;
4710 }
4711
4712 update_learning_table(xbridge, flow, &wc, vlan, xbundle);
4713}
4714
4715/* Push stats and perform side effects of flow translation. */
4716void
0725e747 4717xlate_push_stats(struct xlate_cache *xcache,
b256dc52
JS
4718 const struct dpif_flow_stats *stats)
4719{
4720 struct xc_entry *entry;
4721 struct ofpbuf entries = xcache->entries;
a36de779 4722 uint8_t dmac[ETH_ADDR_LEN];
b256dc52 4723
bc388583
BP
4724 if (!stats->n_packets) {
4725 return;
4726 }
4727
b256dc52
JS
4728 XC_ENTRY_FOR_EACH (entry, entries, xcache) {
4729 switch (entry->type) {
4730 case XC_RULE:
4731 rule_dpif_credit_stats(entry->u.rule, stats);
4732 break;
4733 case XC_BOND:
4734 bond_account(entry->u.bond.bond, entry->u.bond.flow,
4735 entry->u.bond.vid, stats->n_bytes);
4736 break;
4737 case XC_NETDEV:
4738 xlate_cache_netdev(entry, stats);
4739 break;
4740 case XC_NETFLOW:
4741 netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow,
4742 entry->u.nf.iface, stats);
4743 break;
4744 case XC_MIRROR:
4745 mirror_update_stats(entry->u.mirror.mbridge,
4746 entry->u.mirror.mirrors,
4747 stats->n_packets, stats->n_bytes);
4748 break;
4749 case XC_LEARN:
0725e747 4750 ofproto_dpif_flow_mod(entry->u.learn.ofproto, entry->u.learn.fm);
b256dc52
JS
4751 break;
4752 case XC_NORMAL:
0725e747
BP
4753 xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
4754 entry->u.normal.vlan);
b256dc52
JS
4755 break;
4756 case XC_FIN_TIMEOUT:
4757 xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
4758 entry->u.fin.idle, entry->u.fin.hard);
4759 break;
1e684d7d
RW
4760 case XC_GROUP:
4761 group_dpif_credit_stats(entry->u.group.group, entry->u.group.bucket,
4762 stats);
4763 break;
a36de779
PS
4764 case XC_TNL_ARP:
4765 /* Lookup arp to avoid arp timeout. */
4766 tnl_arp_lookup(entry->u.tnl_arp_cache.br_name, entry->u.tnl_arp_cache.d_ip, dmac);
4767 break;
b256dc52
JS
4768 default:
4769 OVS_NOT_REACHED();
4770 }
4771 }
4772}
4773
4774static void
4775xlate_dev_unref(struct xc_entry *entry)
4776{
4777 if (entry->u.dev.tx) {
4778 netdev_close(entry->u.dev.tx);
4779 }
4780 if (entry->u.dev.rx) {
4781 netdev_close(entry->u.dev.rx);
4782 }
4783 if (entry->u.dev.bfd) {
4784 bfd_unref(entry->u.dev.bfd);
4785 }
4786}
4787
4788static void
4789xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
4790{
b256dc52
JS
4791 netflow_flow_clear(netflow, flow);
4792 netflow_unref(netflow);
4793 free(flow);
4794}
4795
4796void
4797xlate_cache_clear(struct xlate_cache *xcache)
4798{
4799 struct xc_entry *entry;
4800 struct ofpbuf entries;
4801
4802 if (!xcache) {
4803 return;
4804 }
4805
4806 XC_ENTRY_FOR_EACH (entry, entries, xcache) {
4807 switch (entry->type) {
4808 case XC_RULE:
4809 rule_dpif_unref(entry->u.rule);
4810 break;
4811 case XC_BOND:
4812 free(entry->u.bond.flow);
4813 bond_unref(entry->u.bond.bond);
4814 break;
4815 case XC_NETDEV:
4816 xlate_dev_unref(entry);
4817 break;
4818 case XC_NETFLOW:
4819 xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow);
4820 break;
4821 case XC_MIRROR:
4822 mbridge_unref(entry->u.mirror.mbridge);
4823 break;
4824 case XC_LEARN:
4165b5e0
JS
4825 free(entry->u.learn.fm);
4826 ofpbuf_delete(entry->u.learn.ofpacts);
b256dc52
JS
4827 break;
4828 case XC_NORMAL:
4829 free(entry->u.normal.flow);
4830 break;
4831 case XC_FIN_TIMEOUT:
83709dfa
JR
4832 /* 'u.fin.rule' is always already held as a XC_RULE, which
4833 * has already released it's reference above. */
b256dc52 4834 break;
1e684d7d
RW
4835 case XC_GROUP:
4836 group_dpif_unref(entry->u.group.group);
4837 break;
a36de779
PS
4838 case XC_TNL_ARP:
4839 break;
b256dc52
JS
4840 default:
4841 OVS_NOT_REACHED();
4842 }
4843 }
4844
4845 ofpbuf_clear(&xcache->entries);
4846}
4847
4848void
4849xlate_cache_delete(struct xlate_cache *xcache)
4850{
4851 xlate_cache_clear(xcache);
4852 ofpbuf_uninit(&xcache->entries);
4853 free(xcache);
4854}