]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-xlate.c
netflow: Fold netflow_expire() into netflow_flow_clear().
[mirror_ovs.git] / ofproto / ofproto-dpif-xlate.c
1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15 #include <config.h>
16
17 #include "ofproto/ofproto-dpif-xlate.h"
18
19 #include <errno.h>
20
21 #include "bfd.h"
22 #include "bitmap.h"
23 #include "bond.h"
24 #include "bundle.h"
25 #include "byte-order.h"
26 #include "cfm.h"
27 #include "connmgr.h"
28 #include "coverage.h"
29 #include "dpif.h"
30 #include "dynamic-string.h"
31 #include "in-band.h"
32 #include "lacp.h"
33 #include "learn.h"
34 #include "list.h"
35 #include "mac-learning.h"
36 #include "meta-flow.h"
37 #include "multipath.h"
38 #include "netdev-vport.h"
39 #include "netlink.h"
40 #include "nx-match.h"
41 #include "odp-execute.h"
42 #include "ofp-actions.h"
43 #include "ofproto/ofproto-dpif-ipfix.h"
44 #include "ofproto/ofproto-dpif-mirror.h"
45 #include "ofproto/ofproto-dpif-monitor.h"
46 #include "ofproto/ofproto-dpif-sflow.h"
47 #include "ofproto/ofproto-dpif.h"
48 #include "ofproto/ofproto-provider.h"
49 #include "tunnel.h"
50 #include "vlog.h"
51
52 COVERAGE_DEFINE(xlate_actions);
53 COVERAGE_DEFINE(xlate_actions_oversize);
54 COVERAGE_DEFINE(xlate_actions_mpls_overflow);
55
56 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
57
58 /* Maximum depth of flow table recursion (due to resubmit actions) in a
59 * flow translation. */
60 #define MAX_RESUBMIT_RECURSION 64
61 #define MAX_INTERNAL_RESUBMITS 1 /* Max resbmits allowed using rules in
62 internal table. */
63
64 /* Maximum number of resubmit actions in a flow translation, whether they are
65 * recursive or not. */
66 #define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
67
68 struct xbridge {
69 struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
70 struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
71
72 struct list xbundles; /* Owned xbundles. */
73 struct hmap xports; /* Indexed by ofp_port. */
74
75 char *name; /* Name used in log messages. */
76 struct dpif *dpif; /* Datapath interface. */
77 struct mac_learning *ml; /* Mac learning handle. */
78 struct mbridge *mbridge; /* Mirroring. */
79 struct dpif_sflow *sflow; /* SFlow handle, or null. */
80 struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
81 struct netflow *netflow; /* Netflow handle, or null. */
82 struct stp *stp; /* STP or null if disabled. */
83
84 /* Special rules installed by ofproto-dpif. */
85 struct rule_dpif *miss_rule;
86 struct rule_dpif *no_packet_in_rule;
87
88 enum ofp_config_flags frag; /* Fragmentation handling. */
89 bool has_in_band; /* Bridge has in band control? */
90 bool forward_bpdu; /* Bridge forwards STP BPDUs? */
91
92 /* True if the datapath supports recirculation. */
93 bool enable_recirc;
94
95 /* True if the datapath supports variable-length
96 * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
97 * False if the datapath supports only 8-byte (or shorter) userdata. */
98 bool variable_length_userdata;
99
100 /* Number of MPLS label stack entries that the datapath supports
101 * in matches. */
102 size_t max_mpls_depth;
103 };
104
105 struct xbundle {
106 struct hmap_node hmap_node; /* In global 'xbundles' map. */
107 struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
108
109 struct list list_node; /* In parent 'xbridges' list. */
110 struct xbridge *xbridge; /* Parent xbridge. */
111
112 struct list xports; /* Contains "struct xport"s. */
113
114 char *name; /* Name used in log messages. */
115 struct bond *bond; /* Nonnull iff more than one port. */
116 struct lacp *lacp; /* LACP handle or null. */
117
118 enum port_vlan_mode vlan_mode; /* VLAN mode. */
119 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
120 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
121 * NULL if all VLANs are trunked. */
122 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
123 bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
124 };
125
126 struct xport {
127 struct hmap_node hmap_node; /* Node in global 'xports' map. */
128 struct ofport_dpif *ofport; /* Key in global 'xports map. */
129
130 struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
131 ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
132
133 odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
134
135 struct list bundle_node; /* In parent xbundle (if it exists). */
136 struct xbundle *xbundle; /* Parent xbundle or null. */
137
138 struct netdev *netdev; /* 'ofport''s netdev. */
139
140 struct xbridge *xbridge; /* Parent bridge. */
141 struct xport *peer; /* Patch port peer or null. */
142
143 enum ofputil_port_config config; /* OpenFlow port configuration. */
144 enum ofputil_port_state state; /* OpenFlow port state. */
145 int stp_port_no; /* STP port number or -1 if not in use. */
146
147 struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
148
149 bool may_enable; /* May be enabled in bonds. */
150 bool is_tunnel; /* Is a tunnel port. */
151
152 struct cfm *cfm; /* CFM handle or null. */
153 struct bfd *bfd; /* BFD handle or null. */
154 };
155
156 struct xlate_ctx {
157 struct xlate_in *xin;
158 struct xlate_out *xout;
159
160 const struct xbridge *xbridge;
161
162 /* Flow at the last commit. */
163 struct flow base_flow;
164
165 /* Tunnel IP destination address as received. This is stored separately
166 * as the base_flow.tunnel is cleared on init to reflect the datapath
167 * behavior. Used to make sure not to send tunneled output to ourselves,
168 * which might lead to an infinite loop. This could happen easily
169 * if a tunnel is marked as 'ip_remote=flow', and the flow does not
170 * actually set the tun_dst field. */
171 ovs_be32 orig_tunnel_ip_dst;
172
173 /* Stack for the push and pop actions. Each stack element is of type
174 * "union mf_subvalue". */
175 union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
176 struct ofpbuf stack;
177
178 /* The rule that we are currently translating, or NULL. */
179 struct rule_dpif *rule;
180
181 /* Resubmit statistics, via xlate_table_action(). */
182 int recurse; /* Current resubmit nesting depth. */
183 int resubmits; /* Total number of resubmits. */
184 bool in_group; /* Currently translating ofgroup, if true. */
185
186 uint32_t orig_skb_priority; /* Priority when packet arrived. */
187 uint8_t table_id; /* OpenFlow table ID where flow was found. */
188 uint32_t sflow_n_outputs; /* Number of output ports. */
189 odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
190 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
191 bool exit; /* No further actions should be processed. */
192
193 bool use_recirc; /* Should generate recirc? */
194 struct xlate_recirc recirc; /* Information used for generating
195 * recirculation actions */
196
197 /* OpenFlow 1.1+ action set.
198 *
199 * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
200 * When translation is otherwise complete, ofpacts_execute_action_set()
201 * converts it to a set of "struct ofpact"s that can be translated into
202 * datapath actions. */
203 struct ofpbuf action_set; /* Action set. */
204 uint64_t action_set_stub[1024 / 8];
205 };
206
207 /* A controller may use OFPP_NONE as the ingress port to indicate that
208 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
209 * when an input bundle is needed for validation (e.g., mirroring or
210 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
211 * any 'port' structs, so care must be taken when dealing with it. */
212 static struct xbundle ofpp_none_bundle = {
213 .name = "OFPP_NONE",
214 .vlan_mode = PORT_VLAN_TRUNK
215 };
216
217 /* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
218 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
219 * traffic egressing the 'ofport' with that priority should be marked with. */
220 struct skb_priority_to_dscp {
221 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
222 uint32_t skb_priority; /* Priority of this queue (see struct flow). */
223
224 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
225 };
226
227 enum xc_type {
228 XC_RULE,
229 XC_BOND,
230 XC_NETDEV,
231 XC_NETFLOW,
232 XC_MIRROR,
233 XC_LEARN,
234 XC_NORMAL,
235 XC_FIN_TIMEOUT,
236 XC_GROUP,
237 };
238
239 /* xlate_cache entries hold enough information to perform the side effects of
240 * xlate_actions() for a rule, without needing to perform rule translation
241 * from scratch. The primary usage of these is to submit statistics to objects
242 * that a flow relates to, although they may be used for other effects as well
243 * (for instance, refreshing hard timeouts for learned flows). */
244 struct xc_entry {
245 enum xc_type type;
246 union {
247 struct rule_dpif *rule;
248 struct {
249 struct netdev *tx;
250 struct netdev *rx;
251 struct bfd *bfd;
252 } dev;
253 struct {
254 struct netflow *netflow;
255 struct flow *flow;
256 ofp_port_t iface;
257 } nf;
258 struct {
259 struct mbridge *mbridge;
260 mirror_mask_t mirrors;
261 } mirror;
262 struct {
263 struct bond *bond;
264 struct flow *flow;
265 uint16_t vid;
266 } bond;
267 struct {
268 struct ofproto_dpif *ofproto;
269 struct ofputil_flow_mod *fm;
270 struct ofpbuf *ofpacts;
271 } learn;
272 struct {
273 struct ofproto_dpif *ofproto;
274 struct flow *flow;
275 int vlan;
276 } normal;
277 struct {
278 struct rule_dpif *rule;
279 uint16_t idle;
280 uint16_t hard;
281 } fin;
282 struct {
283 struct group_dpif *group;
284 struct ofputil_bucket *bucket;
285 } group;
286 } u;
287 };
288
289 #define XC_ENTRY_FOR_EACH(entry, entries, xcache) \
290 entries = xcache->entries; \
291 for (entry = ofpbuf_try_pull(&entries, sizeof *entry); \
292 entry; \
293 entry = ofpbuf_try_pull(&entries, sizeof *entry))
294
295 struct xlate_cache {
296 struct ofpbuf entries;
297 };
298
299 /* Xlate config contains hash maps of all bridges, bundles and ports.
300 * Xcfgp contains the pointer to the current xlate configuration.
301 * When the main thread needs to change the configuration, it copies xcfgp to
302 * new_xcfg and edits new_xcfg. This enables the use of RCU locking which
303 * does not block handler and revalidator threads. */
304 struct xlate_cfg {
305 struct hmap xbridges;
306 struct hmap xbundles;
307 struct hmap xports;
308 };
309 static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_TYPE_INITIALIZER;
310 static struct xlate_cfg *new_xcfg = NULL;
311
312 static bool may_receive(const struct xport *, struct xlate_ctx *);
313 static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
314 struct xlate_ctx *);
315 static void xlate_normal(struct xlate_ctx *);
316 static void xlate_report(struct xlate_ctx *, const char *);
317 static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
318 uint8_t table_id, bool may_packet_in,
319 bool honor_table_miss);
320 static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
321 static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
322 static void output_normal(struct xlate_ctx *, const struct xbundle *,
323 uint16_t vlan);
324 static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
325
326 static struct xbridge *xbridge_lookup(struct xlate_cfg *,
327 const struct ofproto_dpif *);
328 static struct xbundle *xbundle_lookup(struct xlate_cfg *,
329 const struct ofbundle *);
330 static struct xport *xport_lookup(struct xlate_cfg *,
331 const struct ofport_dpif *);
332 static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
333 static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
334 uint32_t skb_priority);
335 static void clear_skb_priorities(struct xport *);
336 static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
337 uint8_t *dscp);
338
339 static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc,
340 enum xc_type type);
341 static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
342 static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
343 static void xlate_xport_init(struct xlate_cfg *, struct xport *);
344 static void xlate_xbridge_set(struct xbridge *xbridge,
345 struct dpif *dpif,
346 struct rule_dpif *miss_rule,
347 struct rule_dpif *no_packet_in_rule,
348 const struct mac_learning *ml, struct stp *stp,
349 const struct mbridge *mbridge,
350 const struct dpif_sflow *sflow,
351 const struct dpif_ipfix *ipfix,
352 const struct netflow *netflow,
353 enum ofp_config_flags frag,
354 bool forward_bpdu, bool has_in_band,
355 bool enable_recirc,
356 bool variable_length_userdata,
357 size_t max_mpls_depth);
358 static void xlate_xbundle_set(struct xbundle *xbundle,
359 enum port_vlan_mode vlan_mode, int vlan,
360 unsigned long *trunks, bool use_priority_tags,
361 const struct bond *bond, const struct lacp *lacp,
362 bool floodable);
363 static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
364 const struct netdev *netdev, const struct cfm *cfm,
365 const struct bfd *bfd, int stp_port_no,
366 enum ofputil_port_config config,
367 enum ofputil_port_state state, bool is_tunnel,
368 bool may_enable);
369 static void xlate_xbridge_remove(struct xlate_cfg *, struct xbridge *);
370 static void xlate_xbundle_remove(struct xlate_cfg *, struct xbundle *);
371 static void xlate_xport_remove(struct xlate_cfg *, struct xport *);
372 static void xlate_xbridge_copy(struct xbridge *);
373 static void xlate_xbundle_copy(struct xbridge *, struct xbundle *);
374 static void xlate_xport_copy(struct xbridge *, struct xbundle *,
375 struct xport *);
376 static void xlate_xcfg_free(struct xlate_cfg *);
377
378
379 static void
380 xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
381 {
382 list_init(&xbridge->xbundles);
383 hmap_init(&xbridge->xports);
384 hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
385 hash_pointer(xbridge->ofproto, 0));
386 }
387
388 static void
389 xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
390 {
391 list_init(&xbundle->xports);
392 list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
393 hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
394 hash_pointer(xbundle->ofbundle, 0));
395 }
396
397 static void
398 xlate_xport_init(struct xlate_cfg *xcfg, struct xport *xport)
399 {
400 hmap_init(&xport->skb_priorities);
401 hmap_insert(&xcfg->xports, &xport->hmap_node,
402 hash_pointer(xport->ofport, 0));
403 hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
404 hash_ofp_port(xport->ofp_port));
405 }
406
407 static void
408 xlate_xbridge_set(struct xbridge *xbridge,
409 struct dpif *dpif,
410 struct rule_dpif *miss_rule,
411 struct rule_dpif *no_packet_in_rule,
412 const struct mac_learning *ml, struct stp *stp,
413 const struct mbridge *mbridge,
414 const struct dpif_sflow *sflow,
415 const struct dpif_ipfix *ipfix,
416 const struct netflow *netflow, enum ofp_config_flags frag,
417 bool forward_bpdu, bool has_in_band,
418 bool enable_recirc,
419 bool variable_length_userdata,
420 size_t max_mpls_depth)
421 {
422 if (xbridge->ml != ml) {
423 mac_learning_unref(xbridge->ml);
424 xbridge->ml = mac_learning_ref(ml);
425 }
426
427 if (xbridge->mbridge != mbridge) {
428 mbridge_unref(xbridge->mbridge);
429 xbridge->mbridge = mbridge_ref(mbridge);
430 }
431
432 if (xbridge->sflow != sflow) {
433 dpif_sflow_unref(xbridge->sflow);
434 xbridge->sflow = dpif_sflow_ref(sflow);
435 }
436
437 if (xbridge->ipfix != ipfix) {
438 dpif_ipfix_unref(xbridge->ipfix);
439 xbridge->ipfix = dpif_ipfix_ref(ipfix);
440 }
441
442 if (xbridge->stp != stp) {
443 stp_unref(xbridge->stp);
444 xbridge->stp = stp_ref(stp);
445 }
446
447 if (xbridge->netflow != netflow) {
448 netflow_unref(xbridge->netflow);
449 xbridge->netflow = netflow_ref(netflow);
450 }
451
452 xbridge->dpif = dpif;
453 xbridge->forward_bpdu = forward_bpdu;
454 xbridge->has_in_band = has_in_band;
455 xbridge->frag = frag;
456 xbridge->miss_rule = miss_rule;
457 xbridge->no_packet_in_rule = no_packet_in_rule;
458 xbridge->enable_recirc = enable_recirc;
459 xbridge->variable_length_userdata = variable_length_userdata;
460 xbridge->max_mpls_depth = max_mpls_depth;
461 }
462
463 static void
464 xlate_xbundle_set(struct xbundle *xbundle,
465 enum port_vlan_mode vlan_mode, int vlan,
466 unsigned long *trunks, bool use_priority_tags,
467 const struct bond *bond, const struct lacp *lacp,
468 bool floodable)
469 {
470 ovs_assert(xbundle->xbridge);
471
472 xbundle->vlan_mode = vlan_mode;
473 xbundle->vlan = vlan;
474 xbundle->trunks = trunks;
475 xbundle->use_priority_tags = use_priority_tags;
476 xbundle->floodable = floodable;
477
478 if (xbundle->bond != bond) {
479 bond_unref(xbundle->bond);
480 xbundle->bond = bond_ref(bond);
481 }
482
483 if (xbundle->lacp != lacp) {
484 lacp_unref(xbundle->lacp);
485 xbundle->lacp = lacp_ref(lacp);
486 }
487 }
488
489 static void
490 xlate_xport_set(struct xport *xport, odp_port_t odp_port,
491 const struct netdev *netdev, const struct cfm *cfm,
492 const struct bfd *bfd, int stp_port_no,
493 enum ofputil_port_config config, enum ofputil_port_state state,
494 bool is_tunnel, bool may_enable)
495 {
496 xport->config = config;
497 xport->state = state;
498 xport->stp_port_no = stp_port_no;
499 xport->is_tunnel = is_tunnel;
500 xport->may_enable = may_enable;
501 xport->odp_port = odp_port;
502
503 if (xport->cfm != cfm) {
504 cfm_unref(xport->cfm);
505 xport->cfm = cfm_ref(cfm);
506 }
507
508 if (xport->bfd != bfd) {
509 bfd_unref(xport->bfd);
510 xport->bfd = bfd_ref(bfd);
511 }
512
513 if (xport->netdev != netdev) {
514 netdev_close(xport->netdev);
515 xport->netdev = netdev_ref(netdev);
516 }
517 }
518
519 static void
520 xlate_xbridge_copy(struct xbridge *xbridge)
521 {
522 struct xbundle *xbundle;
523 struct xport *xport;
524 struct xbridge *new_xbridge = xzalloc(sizeof *xbridge);
525 new_xbridge->ofproto = xbridge->ofproto;
526 new_xbridge->name = xstrdup(xbridge->name);
527 xlate_xbridge_init(new_xcfg, new_xbridge);
528
529 xlate_xbridge_set(new_xbridge,
530 xbridge->dpif, xbridge->miss_rule,
531 xbridge->no_packet_in_rule, xbridge->ml, xbridge->stp,
532 xbridge->mbridge, xbridge->sflow, xbridge->ipfix,
533 xbridge->netflow, xbridge->frag, xbridge->forward_bpdu,
534 xbridge->has_in_band, xbridge->enable_recirc,
535 xbridge->variable_length_userdata,
536 xbridge->max_mpls_depth);
537 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
538 xlate_xbundle_copy(new_xbridge, xbundle);
539 }
540
541 /* Copy xports which are not part of a xbundle */
542 HMAP_FOR_EACH (xport, ofp_node, &xbridge->xports) {
543 if (!xport->xbundle) {
544 xlate_xport_copy(new_xbridge, NULL, xport);
545 }
546 }
547 }
548
549 static void
550 xlate_xbundle_copy(struct xbridge *xbridge, struct xbundle *xbundle)
551 {
552 struct xport *xport;
553 struct xbundle *new_xbundle = xzalloc(sizeof *xbundle);
554 new_xbundle->ofbundle = xbundle->ofbundle;
555 new_xbundle->xbridge = xbridge;
556 new_xbundle->name = xstrdup(xbundle->name);
557 xlate_xbundle_init(new_xcfg, new_xbundle);
558
559 xlate_xbundle_set(new_xbundle, xbundle->vlan_mode,
560 xbundle->vlan, xbundle->trunks,
561 xbundle->use_priority_tags, xbundle->bond, xbundle->lacp,
562 xbundle->floodable);
563 LIST_FOR_EACH (xport, bundle_node, &xbundle->xports) {
564 xlate_xport_copy(xbridge, new_xbundle, xport);
565 }
566 }
567
568 static void
569 xlate_xport_copy(struct xbridge *xbridge, struct xbundle *xbundle,
570 struct xport *xport)
571 {
572 struct skb_priority_to_dscp *pdscp, *new_pdscp;
573 struct xport *new_xport = xzalloc(sizeof *xport);
574 new_xport->ofport = xport->ofport;
575 new_xport->ofp_port = xport->ofp_port;
576 new_xport->xbridge = xbridge;
577 xlate_xport_init(new_xcfg, new_xport);
578
579 xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
580 xport->bfd, xport->stp_port_no, xport->config, xport->state,
581 xport->is_tunnel, xport->may_enable);
582
583 if (xport->peer) {
584 struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
585 if (peer) {
586 new_xport->peer = peer;
587 new_xport->peer->peer = new_xport;
588 }
589 }
590
591 if (xbundle) {
592 new_xport->xbundle = xbundle;
593 list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
594 }
595
596 HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
597 new_pdscp = xmalloc(sizeof *pdscp);
598 new_pdscp->skb_priority = pdscp->skb_priority;
599 new_pdscp->dscp = pdscp->dscp;
600 hmap_insert(&new_xport->skb_priorities, &new_pdscp->hmap_node,
601 hash_int(new_pdscp->skb_priority, 0));
602 }
603 }
604
605 /* Sets the current xlate configuration to new_xcfg and frees the old xlate
606 * configuration in xcfgp.
607 *
608 * This needs to be called after editing the xlate configuration.
609 *
610 * Functions that edit the new xlate configuration are
611 * xlate_<ofport/bundle/ofport>_set and xlate_<ofport/bundle/ofport>_remove.
612 *
613 * A sample workflow:
614 *
615 * xlate_txn_start();
616 * ...
617 * edit_xlate_configuration();
618 * ...
619 * xlate_txn_commit(); */
620 void
621 xlate_txn_commit(void)
622 {
623 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
624
625 ovsrcu_set(&xcfgp, new_xcfg);
626 ovsrcu_postpone(xlate_xcfg_free, xcfg);
627
628 new_xcfg = NULL;
629 }
630
631 /* Copies the current xlate configuration in xcfgp to new_xcfg.
632 *
633 * This needs to be called prior to editing the xlate configuration. */
634 void
635 xlate_txn_start(void)
636 {
637 struct xbridge *xbridge;
638 struct xlate_cfg *xcfg;
639
640 ovs_assert(!new_xcfg);
641
642 new_xcfg = xmalloc(sizeof *new_xcfg);
643 hmap_init(&new_xcfg->xbridges);
644 hmap_init(&new_xcfg->xbundles);
645 hmap_init(&new_xcfg->xports);
646
647 xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
648 if (!xcfg) {
649 return;
650 }
651
652 HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
653 xlate_xbridge_copy(xbridge);
654 }
655 }
656
657
658 static void
659 xlate_xcfg_free(struct xlate_cfg *xcfg)
660 {
661 struct xbridge *xbridge, *next_xbridge;
662
663 if (!xcfg) {
664 return;
665 }
666
667 HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
668 xlate_xbridge_remove(xcfg, xbridge);
669 }
670
671 hmap_destroy(&xcfg->xbridges);
672 hmap_destroy(&xcfg->xbundles);
673 hmap_destroy(&xcfg->xports);
674 free(xcfg);
675 }
676
677 void
678 xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
679 struct dpif *dpif, struct rule_dpif *miss_rule,
680 struct rule_dpif *no_packet_in_rule,
681 const struct mac_learning *ml, struct stp *stp,
682 const struct mbridge *mbridge,
683 const struct dpif_sflow *sflow,
684 const struct dpif_ipfix *ipfix,
685 const struct netflow *netflow, enum ofp_config_flags frag,
686 bool forward_bpdu, bool has_in_band,
687 bool enable_recirc,
688 bool variable_length_userdata,
689 size_t max_mpls_depth)
690 {
691 struct xbridge *xbridge;
692
693 ovs_assert(new_xcfg);
694
695 xbridge = xbridge_lookup(new_xcfg, ofproto);
696 if (!xbridge) {
697 xbridge = xzalloc(sizeof *xbridge);
698 xbridge->ofproto = ofproto;
699
700 xlate_xbridge_init(new_xcfg, xbridge);
701 }
702
703 free(xbridge->name);
704 xbridge->name = xstrdup(name);
705
706 xlate_xbridge_set(xbridge, dpif, miss_rule, no_packet_in_rule, ml, stp,
707 mbridge, sflow, ipfix, netflow, frag, forward_bpdu,
708 has_in_band, enable_recirc, variable_length_userdata,
709 max_mpls_depth);
710 }
711
712 static void
713 xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
714 {
715 struct xbundle *xbundle, *next_xbundle;
716 struct xport *xport, *next_xport;
717
718 if (!xbridge) {
719 return;
720 }
721
722 HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
723 xlate_xport_remove(xcfg, xport);
724 }
725
726 LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
727 xlate_xbundle_remove(xcfg, xbundle);
728 }
729
730 hmap_remove(&xcfg->xbridges, &xbridge->hmap_node);
731 mac_learning_unref(xbridge->ml);
732 mbridge_unref(xbridge->mbridge);
733 dpif_sflow_unref(xbridge->sflow);
734 dpif_ipfix_unref(xbridge->ipfix);
735 stp_unref(xbridge->stp);
736 hmap_destroy(&xbridge->xports);
737 free(xbridge->name);
738 free(xbridge);
739 }
740
741 void
742 xlate_remove_ofproto(struct ofproto_dpif *ofproto)
743 {
744 struct xbridge *xbridge;
745
746 ovs_assert(new_xcfg);
747
748 xbridge = xbridge_lookup(new_xcfg, ofproto);
749 xlate_xbridge_remove(new_xcfg, xbridge);
750 }
751
752 void
753 xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
754 const char *name, enum port_vlan_mode vlan_mode, int vlan,
755 unsigned long *trunks, bool use_priority_tags,
756 const struct bond *bond, const struct lacp *lacp,
757 bool floodable)
758 {
759 struct xbundle *xbundle;
760
761 ovs_assert(new_xcfg);
762
763 xbundle = xbundle_lookup(new_xcfg, ofbundle);
764 if (!xbundle) {
765 xbundle = xzalloc(sizeof *xbundle);
766 xbundle->ofbundle = ofbundle;
767 xbundle->xbridge = xbridge_lookup(new_xcfg, ofproto);
768
769 xlate_xbundle_init(new_xcfg, xbundle);
770 }
771
772 free(xbundle->name);
773 xbundle->name = xstrdup(name);
774
775 xlate_xbundle_set(xbundle, vlan_mode, vlan, trunks,
776 use_priority_tags, bond, lacp, floodable);
777 }
778
779 static void
780 xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
781 {
782 struct xport *xport, *next;
783
784 if (!xbundle) {
785 return;
786 }
787
788 LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
789 list_remove(&xport->bundle_node);
790 xport->xbundle = NULL;
791 }
792
793 hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
794 list_remove(&xbundle->list_node);
795 bond_unref(xbundle->bond);
796 lacp_unref(xbundle->lacp);
797 free(xbundle->name);
798 free(xbundle);
799 }
800
801 void
802 xlate_bundle_remove(struct ofbundle *ofbundle)
803 {
804 struct xbundle *xbundle;
805
806 ovs_assert(new_xcfg);
807
808 xbundle = xbundle_lookup(new_xcfg, ofbundle);
809 xlate_xbundle_remove(new_xcfg, xbundle);
810 }
811
812 void
813 xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
814 struct ofport_dpif *ofport, ofp_port_t ofp_port,
815 odp_port_t odp_port, const struct netdev *netdev,
816 const struct cfm *cfm, const struct bfd *bfd,
817 struct ofport_dpif *peer, int stp_port_no,
818 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
819 enum ofputil_port_config config,
820 enum ofputil_port_state state, bool is_tunnel,
821 bool may_enable)
822 {
823 size_t i;
824 struct xport *xport;
825
826 ovs_assert(new_xcfg);
827
828 xport = xport_lookup(new_xcfg, ofport);
829 if (!xport) {
830 xport = xzalloc(sizeof *xport);
831 xport->ofport = ofport;
832 xport->xbridge = xbridge_lookup(new_xcfg, ofproto);
833 xport->ofp_port = ofp_port;
834
835 xlate_xport_init(new_xcfg, xport);
836 }
837
838 ovs_assert(xport->ofp_port == ofp_port);
839
840 xlate_xport_set(xport, odp_port, netdev, cfm, bfd, stp_port_no, config,
841 state, is_tunnel, may_enable);
842
843 if (xport->peer) {
844 xport->peer->peer = NULL;
845 }
846 xport->peer = xport_lookup(new_xcfg, peer);
847 if (xport->peer) {
848 xport->peer->peer = xport;
849 }
850
851 if (xport->xbundle) {
852 list_remove(&xport->bundle_node);
853 }
854 xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
855 if (xport->xbundle) {
856 list_insert(&xport->xbundle->xports, &xport->bundle_node);
857 }
858
859 clear_skb_priorities(xport);
860 for (i = 0; i < n_qdscp; i++) {
861 struct skb_priority_to_dscp *pdscp;
862 uint32_t skb_priority;
863
864 if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
865 &skb_priority)) {
866 continue;
867 }
868
869 pdscp = xmalloc(sizeof *pdscp);
870 pdscp->skb_priority = skb_priority;
871 pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
872 hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
873 hash_int(pdscp->skb_priority, 0));
874 }
875 }
876
877 static void
878 xlate_xport_remove(struct xlate_cfg *xcfg, struct xport *xport)
879 {
880 if (!xport) {
881 return;
882 }
883
884 if (xport->peer) {
885 xport->peer->peer = NULL;
886 xport->peer = NULL;
887 }
888
889 if (xport->xbundle) {
890 list_remove(&xport->bundle_node);
891 }
892
893 clear_skb_priorities(xport);
894 hmap_destroy(&xport->skb_priorities);
895
896 hmap_remove(&xcfg->xports, &xport->hmap_node);
897 hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
898
899 netdev_close(xport->netdev);
900 cfm_unref(xport->cfm);
901 bfd_unref(xport->bfd);
902 free(xport);
903 }
904
905 void
906 xlate_ofport_remove(struct ofport_dpif *ofport)
907 {
908 struct xport *xport;
909
910 ovs_assert(new_xcfg);
911
912 xport = xport_lookup(new_xcfg, ofport);
913 xlate_xport_remove(new_xcfg, xport);
914 }
915
916 /* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
917 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
918 * Optionally populates 'ofproto' with the ofproto_dpif, 'odp_in_port' with
919 * the datapath in_port, that 'packet' ingressed, and 'ipfix', 'sflow', and
920 * 'netflow' with the appropriate handles for those protocols if they're
921 * enabled. Caller is responsible for unrefing them.
922 *
923 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
924 * 'flow''s in_port to OFPP_NONE.
925 *
926 * This function does post-processing on data returned from
927 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
928 * of the upcall processing logic. In particular, if the extracted in_port is
929 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
930 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
931 * a VLAN header onto 'packet' (if it is nonnull).
932 *
933 * Similarly, this function also includes some logic to help with tunnels. It
934 * may modify 'flow' as necessary to make the tunneling implementation
935 * transparent to the upcall processing logic.
936 *
937 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
938 * or some other positive errno if there are other problems. */
939 int
940 xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
941 const struct nlattr *key, size_t key_len, struct flow *flow,
942 struct ofproto_dpif **ofproto, struct dpif_ipfix **ipfix,
943 struct dpif_sflow **sflow, struct netflow **netflow,
944 odp_port_t *odp_in_port)
945 {
946 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
947 int error = ENODEV;
948 const struct xport *xport;
949
950 if (odp_flow_key_to_flow(key, key_len, flow) == ODP_FIT_ERROR) {
951 error = EINVAL;
952 return error;
953 }
954
955 if (odp_in_port) {
956 *odp_in_port = flow->in_port.odp_port;
957 }
958
959 xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
960 ? tnl_port_receive(flow)
961 : odp_port_to_ofport(backer, flow->in_port.odp_port));
962
963 flow->in_port.ofp_port = xport ? xport->ofp_port : OFPP_NONE;
964 if (!xport) {
965 return error;
966 }
967
968 if (vsp_adjust_flow(xport->xbridge->ofproto, flow)) {
969 if (packet) {
970 /* Make the packet resemble the flow, so that it gets sent to
971 * an OpenFlow controller properly, so that it looks correct
972 * for sFlow, and so that flow_extract() will get the correct
973 * vlan_tci if it is called on 'packet'. */
974 eth_push_vlan(packet, htons(ETH_TYPE_VLAN), flow->vlan_tci);
975 }
976 }
977 error = 0;
978
979 if (ofproto) {
980 *ofproto = xport->xbridge->ofproto;
981 }
982
983 if (ipfix) {
984 *ipfix = dpif_ipfix_ref(xport->xbridge->ipfix);
985 }
986
987 if (sflow) {
988 *sflow = dpif_sflow_ref(xport->xbridge->sflow);
989 }
990
991 if (netflow) {
992 *netflow = netflow_ref(xport->xbridge->netflow);
993 }
994 return error;
995 }
996
997 static struct xbridge *
998 xbridge_lookup(struct xlate_cfg *xcfg, const struct ofproto_dpif *ofproto)
999 {
1000 struct hmap *xbridges;
1001 struct xbridge *xbridge;
1002
1003 if (!ofproto || !xcfg) {
1004 return NULL;
1005 }
1006
1007 xbridges = &xcfg->xbridges;
1008
1009 HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
1010 xbridges) {
1011 if (xbridge->ofproto == ofproto) {
1012 return xbridge;
1013 }
1014 }
1015 return NULL;
1016 }
1017
1018 static struct xbundle *
1019 xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
1020 {
1021 struct hmap *xbundles;
1022 struct xbundle *xbundle;
1023
1024 if (!ofbundle || !xcfg) {
1025 return NULL;
1026 }
1027
1028 xbundles = &xcfg->xbundles;
1029
1030 HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
1031 xbundles) {
1032 if (xbundle->ofbundle == ofbundle) {
1033 return xbundle;
1034 }
1035 }
1036 return NULL;
1037 }
1038
1039 static struct xport *
1040 xport_lookup(struct xlate_cfg *xcfg, const struct ofport_dpif *ofport)
1041 {
1042 struct hmap *xports;
1043 struct xport *xport;
1044
1045 if (!ofport || !xcfg) {
1046 return NULL;
1047 }
1048
1049 xports = &xcfg->xports;
1050
1051 HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
1052 xports) {
1053 if (xport->ofport == ofport) {
1054 return xport;
1055 }
1056 }
1057 return NULL;
1058 }
1059
1060 static struct stp_port *
1061 xport_get_stp_port(const struct xport *xport)
1062 {
1063 return xport->xbridge->stp && xport->stp_port_no != -1
1064 ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
1065 : NULL;
1066 }
1067
1068 static bool
1069 xport_stp_learn_state(const struct xport *xport)
1070 {
1071 struct stp_port *sp = xport_get_stp_port(xport);
1072 return stp_learn_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
1073 }
1074
1075 static bool
1076 xport_stp_forward_state(const struct xport *xport)
1077 {
1078 struct stp_port *sp = xport_get_stp_port(xport);
1079 return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
1080 }
1081
1082 static bool
1083 xport_stp_listen_state(const struct xport *xport)
1084 {
1085 struct stp_port *sp = xport_get_stp_port(xport);
1086 return stp_listen_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
1087 }
1088
1089 /* Returns true if STP should process 'flow'. Sets fields in 'wc' that
1090 * were used to make the determination.*/
1091 static bool
1092 stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
1093 {
1094 /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
1095 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1096 return is_stp(flow);
1097 }
1098
1099 static void
1100 stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
1101 {
1102 struct stp_port *sp = xport_get_stp_port(xport);
1103 struct ofpbuf payload = *packet;
1104 struct eth_header *eth = ofpbuf_data(&payload);
1105
1106 /* Sink packets on ports that have STP disabled when the bridge has
1107 * STP enabled. */
1108 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1109 return;
1110 }
1111
1112 /* Trim off padding on payload. */
1113 if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1114 ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
1115 }
1116
1117 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1118 stp_received_bpdu(sp, ofpbuf_data(&payload), ofpbuf_size(&payload));
1119 }
1120 }
1121
1122 static struct xport *
1123 get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1124 {
1125 struct xport *xport;
1126
1127 HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
1128 &xbridge->xports) {
1129 if (xport->ofp_port == ofp_port) {
1130 return xport;
1131 }
1132 }
1133 return NULL;
1134 }
1135
1136 static odp_port_t
1137 ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
1138 {
1139 const struct xport *xport = get_ofp_port(xbridge, ofp_port);
1140 return xport ? xport->odp_port : ODPP_NONE;
1141 }
1142
1143 static bool
1144 odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
1145 {
1146 struct xport *xport;
1147
1148 xport = get_ofp_port(ctx->xbridge, ofp_port);
1149 if (!xport || xport->config & OFPUTIL_PC_PORT_DOWN ||
1150 xport->state & OFPUTIL_PS_LINK_DOWN) {
1151 return false;
1152 }
1153
1154 return true;
1155 }
1156
1157 static struct ofputil_bucket *
1158 group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
1159 int depth);
1160
1161 static bool
1162 group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
1163 {
1164 struct group_dpif *group;
1165
1166 if (group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group)) {
1167 struct ofputil_bucket *bucket;
1168
1169 bucket = group_first_live_bucket(ctx, group, depth);
1170 group_dpif_unref(group);
1171 return bucket == NULL;
1172 }
1173
1174 return false;
1175 }
1176
1177 #define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
1178
1179 static bool
1180 bucket_is_alive(const struct xlate_ctx *ctx,
1181 struct ofputil_bucket *bucket, int depth)
1182 {
1183 if (depth >= MAX_LIVENESS_RECURSION) {
1184 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
1185
1186 VLOG_WARN_RL(&rl, "bucket chaining exceeded %d links",
1187 MAX_LIVENESS_RECURSION);
1188 return false;
1189 }
1190
1191 return (!ofputil_bucket_has_liveness(bucket)
1192 || (bucket->watch_port != OFPP_ANY
1193 && odp_port_is_alive(ctx, bucket->watch_port))
1194 || (bucket->watch_group != OFPG_ANY
1195 && group_is_alive(ctx, bucket->watch_group, depth + 1)));
1196 }
1197
1198 static struct ofputil_bucket *
1199 group_first_live_bucket(const struct xlate_ctx *ctx,
1200 const struct group_dpif *group, int depth)
1201 {
1202 struct ofputil_bucket *bucket;
1203 const struct list *buckets;
1204
1205 group_dpif_get_buckets(group, &buckets);
1206 LIST_FOR_EACH (bucket, list_node, buckets) {
1207 if (bucket_is_alive(ctx, bucket, depth)) {
1208 return bucket;
1209 }
1210 }
1211
1212 return NULL;
1213 }
1214
1215 static struct ofputil_bucket *
1216 group_best_live_bucket(const struct xlate_ctx *ctx,
1217 const struct group_dpif *group,
1218 uint32_t basis)
1219 {
1220 struct ofputil_bucket *best_bucket = NULL;
1221 uint32_t best_score = 0;
1222 int i = 0;
1223
1224 struct ofputil_bucket *bucket;
1225 const struct list *buckets;
1226
1227 group_dpif_get_buckets(group, &buckets);
1228 LIST_FOR_EACH (bucket, list_node, buckets) {
1229 if (bucket_is_alive(ctx, bucket, 0)) {
1230 uint32_t score = (hash_int(i, basis) & 0xffff) * bucket->weight;
1231 if (score >= best_score) {
1232 best_bucket = bucket;
1233 best_score = score;
1234 }
1235 }
1236 i++;
1237 }
1238
1239 return best_bucket;
1240 }
1241
1242 static bool
1243 xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
1244 {
1245 return (bundle->vlan_mode != PORT_VLAN_ACCESS
1246 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
1247 }
1248
1249 static bool
1250 xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
1251 {
1252 return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
1253 }
1254
1255 static mirror_mask_t
1256 xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
1257 {
1258 return xbundle != &ofpp_none_bundle
1259 ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
1260 : 0;
1261 }
1262
1263 static mirror_mask_t
1264 xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
1265 {
1266 return xbundle != &ofpp_none_bundle
1267 ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
1268 : 0;
1269 }
1270
1271 static mirror_mask_t
1272 xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
1273 {
1274 return xbundle != &ofpp_none_bundle
1275 ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
1276 : 0;
1277 }
1278
1279 static struct xbundle *
1280 lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
1281 bool warn, struct xport **in_xportp)
1282 {
1283 struct xport *xport;
1284
1285 /* Find the port and bundle for the received packet. */
1286 xport = get_ofp_port(xbridge, in_port);
1287 if (in_xportp) {
1288 *in_xportp = xport;
1289 }
1290 if (xport && xport->xbundle) {
1291 return xport->xbundle;
1292 }
1293
1294 /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
1295 * which a controller may use as the ingress port for traffic that
1296 * it is sourcing. */
1297 if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
1298 return &ofpp_none_bundle;
1299 }
1300
1301 /* Odd. A few possible reasons here:
1302 *
1303 * - We deleted a port but there are still a few packets queued up
1304 * from it.
1305 *
1306 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
1307 * we don't know about.
1308 *
1309 * - The ofproto client didn't configure the port as part of a bundle.
1310 * This is particularly likely to happen if a packet was received on the
1311 * port after it was created, but before the client had a chance to
1312 * configure its bundle.
1313 */
1314 if (warn) {
1315 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1316
1317 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
1318 "port %"PRIu16, xbridge->name, in_port);
1319 }
1320 return NULL;
1321 }
1322
1323 static void
1324 add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
1325 {
1326 const struct xbridge *xbridge = ctx->xbridge;
1327 mirror_mask_t mirrors;
1328 struct xbundle *in_xbundle;
1329 uint16_t vlan;
1330 uint16_t vid;
1331
1332 mirrors = ctx->xout->mirrors;
1333 ctx->xout->mirrors = 0;
1334
1335 in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
1336 ctx->xin->packet != NULL, NULL);
1337 if (!in_xbundle) {
1338 return;
1339 }
1340 mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
1341
1342 /* Drop frames on bundles reserved for mirroring. */
1343 if (xbundle_mirror_out(xbridge, in_xbundle)) {
1344 if (ctx->xin->packet != NULL) {
1345 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1346 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
1347 "%s, which is reserved exclusively for mirroring",
1348 ctx->xbridge->name, in_xbundle->name);
1349 }
1350 ofpbuf_clear(&ctx->xout->odp_actions);
1351 return;
1352 }
1353
1354 /* Check VLAN. */
1355 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
1356 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
1357 return;
1358 }
1359 vlan = input_vid_to_vlan(in_xbundle, vid);
1360
1361 if (!mirrors) {
1362 return;
1363 }
1364
1365 /* Restore the original packet before adding the mirror actions. */
1366 ctx->xin->flow = *orig_flow;
1367
1368 while (mirrors) {
1369 mirror_mask_t dup_mirrors;
1370 struct ofbundle *out;
1371 unsigned long *vlans;
1372 bool vlan_mirrored;
1373 bool has_mirror;
1374 int out_vlan;
1375
1376 has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
1377 &vlans, &dup_mirrors, &out, &out_vlan);
1378 ovs_assert(has_mirror);
1379
1380 if (vlans) {
1381 ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
1382 }
1383 vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
1384 free(vlans);
1385
1386 if (!vlan_mirrored) {
1387 mirrors = zero_rightmost_1bit(mirrors);
1388 continue;
1389 }
1390
1391 mirrors &= ~dup_mirrors;
1392 ctx->xout->mirrors |= dup_mirrors;
1393 if (out) {
1394 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1395 struct xbundle *out_xbundle = xbundle_lookup(xcfg, out);
1396 if (out_xbundle) {
1397 output_normal(ctx, out_xbundle, vlan);
1398 }
1399 } else if (vlan != out_vlan
1400 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
1401 struct xbundle *xbundle;
1402
1403 LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
1404 if (xbundle_includes_vlan(xbundle, out_vlan)
1405 && !xbundle_mirror_out(xbridge, xbundle)) {
1406 output_normal(ctx, xbundle, out_vlan);
1407 }
1408 }
1409 }
1410 }
1411 }
1412
1413 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
1414 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
1415 * the bundle on which the packet was received, returns the VLAN to which the
1416 * packet belongs.
1417 *
1418 * Both 'vid' and the return value are in the range 0...4095. */
1419 static uint16_t
1420 input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
1421 {
1422 switch (in_xbundle->vlan_mode) {
1423 case PORT_VLAN_ACCESS:
1424 return in_xbundle->vlan;
1425 break;
1426
1427 case PORT_VLAN_TRUNK:
1428 return vid;
1429
1430 case PORT_VLAN_NATIVE_UNTAGGED:
1431 case PORT_VLAN_NATIVE_TAGGED:
1432 return vid ? vid : in_xbundle->vlan;
1433
1434 default:
1435 OVS_NOT_REACHED();
1436 }
1437 }
1438
1439 /* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
1440 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
1441 * a warning.
1442 *
1443 * 'vid' should be the VID obtained from the 802.1Q header that was received as
1444 * part of a packet (specify 0 if there was no 802.1Q header), in the range
1445 * 0...4095. */
1446 static bool
1447 input_vid_is_valid(uint16_t vid, struct xbundle *in_xbundle, bool warn)
1448 {
1449 /* Allow any VID on the OFPP_NONE port. */
1450 if (in_xbundle == &ofpp_none_bundle) {
1451 return true;
1452 }
1453
1454 switch (in_xbundle->vlan_mode) {
1455 case PORT_VLAN_ACCESS:
1456 if (vid) {
1457 if (warn) {
1458 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1459 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" tagged "
1460 "packet received on port %s configured as VLAN "
1461 "%"PRIu16" access port", vid, in_xbundle->name,
1462 in_xbundle->vlan);
1463 }
1464 return false;
1465 }
1466 return true;
1467
1468 case PORT_VLAN_NATIVE_UNTAGGED:
1469 case PORT_VLAN_NATIVE_TAGGED:
1470 if (!vid) {
1471 /* Port must always carry its native VLAN. */
1472 return true;
1473 }
1474 /* Fall through. */
1475 case PORT_VLAN_TRUNK:
1476 if (!xbundle_includes_vlan(in_xbundle, vid)) {
1477 if (warn) {
1478 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1479 VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" packet "
1480 "received on port %s not configured for trunking "
1481 "VLAN %"PRIu16, vid, in_xbundle->name, vid);
1482 }
1483 return false;
1484 }
1485 return true;
1486
1487 default:
1488 OVS_NOT_REACHED();
1489 }
1490
1491 }
1492
1493 /* Given 'vlan', the VLAN that a packet belongs to, and
1494 * 'out_xbundle', a bundle on which the packet is to be output, returns the VID
1495 * that should be included in the 802.1Q header. (If the return value is 0,
1496 * then the 802.1Q header should only be included in the packet if there is a
1497 * nonzero PCP.)
1498 *
1499 * Both 'vlan' and the return value are in the range 0...4095. */
1500 static uint16_t
1501 output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
1502 {
1503 switch (out_xbundle->vlan_mode) {
1504 case PORT_VLAN_ACCESS:
1505 return 0;
1506
1507 case PORT_VLAN_TRUNK:
1508 case PORT_VLAN_NATIVE_TAGGED:
1509 return vlan;
1510
1511 case PORT_VLAN_NATIVE_UNTAGGED:
1512 return vlan == out_xbundle->vlan ? 0 : vlan;
1513
1514 default:
1515 OVS_NOT_REACHED();
1516 }
1517 }
1518
1519 static void
1520 output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
1521 uint16_t vlan)
1522 {
1523 ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
1524 uint16_t vid;
1525 ovs_be16 tci, old_tci;
1526 struct xport *xport;
1527
1528 vid = output_vlan_to_vid(out_xbundle, vlan);
1529 if (list_is_empty(&out_xbundle->xports)) {
1530 /* Partially configured bundle with no slaves. Drop the packet. */
1531 return;
1532 } else if (!out_xbundle->bond) {
1533 ctx->use_recirc = false;
1534 xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
1535 bundle_node);
1536 } else {
1537 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1538 struct flow_wildcards *wc = &ctx->xout->wc;
1539 struct xlate_recirc *xr = &ctx->recirc;
1540 struct ofport_dpif *ofport;
1541
1542 if (ctx->xbridge->enable_recirc) {
1543 ctx->use_recirc = bond_may_recirc(
1544 out_xbundle->bond, &xr->recirc_id, &xr->hash_basis);
1545
1546 if (ctx->use_recirc) {
1547 /* Only TCP mode uses recirculation. */
1548 xr->hash_alg = OVS_HASH_ALG_L4;
1549 bond_update_post_recirc_rules(out_xbundle->bond, false);
1550
1551 /* Recirculation does not require unmasking hash fields. */
1552 wc = NULL;
1553 }
1554 }
1555
1556 ofport = bond_choose_output_slave(out_xbundle->bond,
1557 &ctx->xin->flow, wc, vid);
1558 xport = xport_lookup(xcfg, ofport);
1559
1560 if (!xport) {
1561 /* No slaves enabled, so drop packet. */
1562 return;
1563 }
1564
1565 /* If ctx->xout->use_recirc is set, the main thread will handle stats
1566 * accounting for this bond. */
1567 if (!ctx->use_recirc) {
1568 if (ctx->xin->resubmit_stats) {
1569 bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
1570 ctx->xin->resubmit_stats->n_bytes);
1571 }
1572 if (ctx->xin->xcache) {
1573 struct xc_entry *entry;
1574 struct flow *flow;
1575
1576 flow = &ctx->xin->flow;
1577 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
1578 entry->u.bond.bond = bond_ref(out_xbundle->bond);
1579 entry->u.bond.flow = xmemdup(flow, sizeof *flow);
1580 entry->u.bond.vid = vid;
1581 }
1582 }
1583 }
1584
1585 old_tci = *flow_tci;
1586 tci = htons(vid);
1587 if (tci || out_xbundle->use_priority_tags) {
1588 tci |= *flow_tci & htons(VLAN_PCP_MASK);
1589 if (tci) {
1590 tci |= htons(VLAN_CFI);
1591 }
1592 }
1593 *flow_tci = tci;
1594
1595 compose_output_action(ctx, xport->ofp_port);
1596 *flow_tci = old_tci;
1597 }
1598
1599 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
1600 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
1601 * indicate this; newer upstream kernels use gratuitous ARP requests. */
1602 static bool
1603 is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
1604 {
1605 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
1606 return false;
1607 }
1608
1609 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1610 if (!eth_addr_is_broadcast(flow->dl_dst)) {
1611 return false;
1612 }
1613
1614 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1615 if (flow->nw_proto == ARP_OP_REPLY) {
1616 return true;
1617 } else if (flow->nw_proto == ARP_OP_REQUEST) {
1618 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1619 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1620
1621 return flow->nw_src == flow->nw_dst;
1622 } else {
1623 return false;
1624 }
1625 }
1626
1627 /* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
1628 * dropped. Returns true if they may be forwarded, false if they should be
1629 * dropped.
1630 *
1631 * 'in_port' must be the xport that corresponds to flow->in_port.
1632 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
1633 *
1634 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
1635 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
1636 * checked by input_vid_is_valid().
1637 *
1638 * May also add tags to '*tags', although the current implementation only does
1639 * so in one special case.
1640 */
1641 static bool
1642 is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
1643 uint16_t vlan)
1644 {
1645 struct xbundle *in_xbundle = in_port->xbundle;
1646 const struct xbridge *xbridge = ctx->xbridge;
1647 struct flow *flow = &ctx->xin->flow;
1648
1649 /* Drop frames for reserved multicast addresses
1650 * only if forward_bpdu option is absent. */
1651 if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
1652 xlate_report(ctx, "packet has reserved destination MAC, dropping");
1653 return false;
1654 }
1655
1656 if (in_xbundle->bond) {
1657 struct mac_entry *mac;
1658
1659 switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
1660 flow->dl_dst)) {
1661 case BV_ACCEPT:
1662 break;
1663
1664 case BV_DROP:
1665 xlate_report(ctx, "bonding refused admissibility, dropping");
1666 return false;
1667
1668 case BV_DROP_IF_MOVED:
1669 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
1670 mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
1671 if (mac && mac->port.p != in_xbundle->ofbundle &&
1672 (!is_gratuitous_arp(flow, &ctx->xout->wc)
1673 || mac_entry_is_grat_arp_locked(mac))) {
1674 ovs_rwlock_unlock(&xbridge->ml->rwlock);
1675 xlate_report(ctx, "SLB bond thinks this packet looped back, "
1676 "dropping");
1677 return false;
1678 }
1679 ovs_rwlock_unlock(&xbridge->ml->rwlock);
1680 break;
1681 }
1682 }
1683
1684 return true;
1685 }
1686
1687 /* Checks whether a MAC learning update is necessary for MAC learning table
1688 * 'ml' given that a packet matching 'flow' was received on 'in_xbundle' in
1689 * 'vlan'.
1690 *
1691 * Most packets processed through the MAC learning table do not actually
1692 * change it in any way. This function requires only a read lock on the MAC
1693 * learning table, so it is much cheaper in this common case.
1694 *
1695 * Keep the code here synchronized with that in update_learning_table__()
1696 * below. */
1697 static bool
1698 is_mac_learning_update_needed(const struct mac_learning *ml,
1699 const struct flow *flow,
1700 struct flow_wildcards *wc,
1701 int vlan, struct xbundle *in_xbundle)
1702 OVS_REQ_RDLOCK(ml->rwlock)
1703 {
1704 struct mac_entry *mac;
1705
1706 if (!mac_learning_may_learn(ml, flow->dl_src, vlan)) {
1707 return false;
1708 }
1709
1710 mac = mac_learning_lookup(ml, flow->dl_src, vlan);
1711 if (!mac || mac_entry_age(ml, mac)) {
1712 return true;
1713 }
1714
1715 if (is_gratuitous_arp(flow, wc)) {
1716 /* We don't want to learn from gratuitous ARP packets that are
1717 * reflected back over bond slaves so we lock the learning table. */
1718 if (!in_xbundle->bond) {
1719 return true;
1720 } else if (mac_entry_is_grat_arp_locked(mac)) {
1721 return false;
1722 }
1723 }
1724
1725 return mac->port.p != in_xbundle->ofbundle;
1726 }
1727
1728
1729 /* Updates MAC learning table 'ml' given that a packet matching 'flow' was
1730 * received on 'in_xbundle' in 'vlan'.
1731 *
1732 * This code repeats all the checks in is_mac_learning_update_needed() because
1733 * the lock was released between there and here and thus the MAC learning state
1734 * could have changed.
1735 *
1736 * Keep the code here synchronized with that in is_mac_learning_update_needed()
1737 * above. */
1738 static void
1739 update_learning_table__(const struct xbridge *xbridge,
1740 const struct flow *flow, struct flow_wildcards *wc,
1741 int vlan, struct xbundle *in_xbundle)
1742 OVS_REQ_WRLOCK(xbridge->ml->rwlock)
1743 {
1744 struct mac_entry *mac;
1745
1746 if (!mac_learning_may_learn(xbridge->ml, flow->dl_src, vlan)) {
1747 return;
1748 }
1749
1750 mac = mac_learning_insert(xbridge->ml, flow->dl_src, vlan);
1751 if (is_gratuitous_arp(flow, wc)) {
1752 /* We don't want to learn from gratuitous ARP packets that are
1753 * reflected back over bond slaves so we lock the learning table. */
1754 if (!in_xbundle->bond) {
1755 mac_entry_set_grat_arp_lock(mac);
1756 } else if (mac_entry_is_grat_arp_locked(mac)) {
1757 return;
1758 }
1759 }
1760
1761 if (mac->port.p != in_xbundle->ofbundle) {
1762 /* The log messages here could actually be useful in debugging,
1763 * so keep the rate limit relatively high. */
1764 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
1765
1766 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
1767 "on port %s in VLAN %d",
1768 xbridge->name, ETH_ADDR_ARGS(flow->dl_src),
1769 in_xbundle->name, vlan);
1770
1771 mac->port.p = in_xbundle->ofbundle;
1772 mac_learning_changed(xbridge->ml);
1773 }
1774 }
1775
1776 static void
1777 update_learning_table(const struct xbridge *xbridge,
1778 const struct flow *flow, struct flow_wildcards *wc,
1779 int vlan, struct xbundle *in_xbundle)
1780 {
1781 bool need_update;
1782
1783 /* Don't learn the OFPP_NONE port. */
1784 if (in_xbundle == &ofpp_none_bundle) {
1785 return;
1786 }
1787
1788 /* First try the common case: no change to MAC learning table. */
1789 ovs_rwlock_rdlock(&xbridge->ml->rwlock);
1790 need_update = is_mac_learning_update_needed(xbridge->ml, flow, wc, vlan,
1791 in_xbundle);
1792 ovs_rwlock_unlock(&xbridge->ml->rwlock);
1793
1794 if (need_update) {
1795 /* Slow path: MAC learning table might need an update. */
1796 ovs_rwlock_wrlock(&xbridge->ml->rwlock);
1797 update_learning_table__(xbridge, flow, wc, vlan, in_xbundle);
1798 ovs_rwlock_unlock(&xbridge->ml->rwlock);
1799 }
1800 }
1801
1802 static void
1803 xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
1804 uint16_t vlan)
1805 {
1806 struct xbundle *xbundle;
1807
1808 LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
1809 if (xbundle != in_xbundle
1810 && xbundle_includes_vlan(xbundle, vlan)
1811 && xbundle->floodable
1812 && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
1813 output_normal(ctx, xbundle, vlan);
1814 }
1815 }
1816 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1817 }
1818
1819 static void
1820 xlate_normal(struct xlate_ctx *ctx)
1821 {
1822 struct flow_wildcards *wc = &ctx->xout->wc;
1823 struct flow *flow = &ctx->xin->flow;
1824 struct xbundle *in_xbundle;
1825 struct xport *in_port;
1826 struct mac_entry *mac;
1827 void *mac_port;
1828 uint16_t vlan;
1829 uint16_t vid;
1830
1831 ctx->xout->has_normal = true;
1832
1833 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1834 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1835 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
1836
1837 in_xbundle = lookup_input_bundle(ctx->xbridge, flow->in_port.ofp_port,
1838 ctx->xin->packet != NULL, &in_port);
1839 if (!in_xbundle) {
1840 xlate_report(ctx, "no input bundle, dropping");
1841 return;
1842 }
1843
1844 /* Drop malformed frames. */
1845 if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
1846 !(flow->vlan_tci & htons(VLAN_CFI))) {
1847 if (ctx->xin->packet != NULL) {
1848 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1849 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
1850 "VLAN tag received on port %s",
1851 ctx->xbridge->name, in_xbundle->name);
1852 }
1853 xlate_report(ctx, "partial VLAN tag, dropping");
1854 return;
1855 }
1856
1857 /* Drop frames on bundles reserved for mirroring. */
1858 if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
1859 if (ctx->xin->packet != NULL) {
1860 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1861 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
1862 "%s, which is reserved exclusively for mirroring",
1863 ctx->xbridge->name, in_xbundle->name);
1864 }
1865 xlate_report(ctx, "input port is mirror output port, dropping");
1866 return;
1867 }
1868
1869 /* Check VLAN. */
1870 vid = vlan_tci_to_vid(flow->vlan_tci);
1871 if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
1872 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
1873 return;
1874 }
1875 vlan = input_vid_to_vlan(in_xbundle, vid);
1876
1877 /* Check other admissibility requirements. */
1878 if (in_port && !is_admissible(ctx, in_port, vlan)) {
1879 return;
1880 }
1881
1882 /* Learn source MAC. */
1883 if (ctx->xin->may_learn) {
1884 update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
1885 }
1886 if (ctx->xin->xcache) {
1887 struct xc_entry *entry;
1888
1889 /* Save enough info to update mac learning table later. */
1890 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
1891 entry->u.normal.ofproto = ctx->xbridge->ofproto;
1892 entry->u.normal.flow = xmemdup(flow, sizeof *flow);
1893 entry->u.normal.vlan = vlan;
1894 }
1895
1896 /* Determine output bundle. */
1897 ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
1898 mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
1899 mac_port = mac ? mac->port.p : NULL;
1900 ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
1901
1902 if (mac_port) {
1903 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
1904 struct xbundle *mac_xbundle = xbundle_lookup(xcfg, mac_port);
1905 if (mac_xbundle && mac_xbundle != in_xbundle) {
1906 xlate_report(ctx, "forwarding to learned port");
1907 output_normal(ctx, mac_xbundle, vlan);
1908 } else if (!mac_xbundle) {
1909 xlate_report(ctx, "learned port is unknown, dropping");
1910 } else {
1911 xlate_report(ctx, "learned port is input port, dropping");
1912 }
1913 } else {
1914 xlate_report(ctx, "no learned MAC for destination, flooding");
1915 xlate_normal_flood(ctx, in_xbundle, vlan);
1916 }
1917 }
1918
1919 /* Compose SAMPLE action for sFlow or IPFIX. The given probability is
1920 * the number of packets out of UINT32_MAX to sample. The given
1921 * cookie is passed back in the callback for each sampled packet.
1922 */
1923 static size_t
1924 compose_sample_action(const struct xbridge *xbridge,
1925 struct ofpbuf *odp_actions,
1926 const struct flow *flow,
1927 const uint32_t probability,
1928 const union user_action_cookie *cookie,
1929 const size_t cookie_size)
1930 {
1931 size_t sample_offset, actions_offset;
1932 odp_port_t odp_port;
1933 int cookie_offset;
1934 uint32_t pid;
1935
1936 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
1937
1938 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
1939
1940 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
1941
1942 odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
1943 pid = dpif_port_get_pid(xbridge->dpif, odp_port,
1944 flow_hash_5tuple(flow, 0));
1945 cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
1946 odp_actions);
1947
1948 nl_msg_end_nested(odp_actions, actions_offset);
1949 nl_msg_end_nested(odp_actions, sample_offset);
1950 return cookie_offset;
1951 }
1952
1953 static void
1954 compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
1955 odp_port_t odp_port, unsigned int n_outputs,
1956 union user_action_cookie *cookie)
1957 {
1958 int ifindex;
1959
1960 cookie->type = USER_ACTION_COOKIE_SFLOW;
1961 cookie->sflow.vlan_tci = vlan_tci;
1962
1963 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
1964 * port information") for the interpretation of cookie->output. */
1965 switch (n_outputs) {
1966 case 0:
1967 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
1968 cookie->sflow.output = 0x40000000 | 256;
1969 break;
1970
1971 case 1:
1972 ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
1973 if (ifindex) {
1974 cookie->sflow.output = ifindex;
1975 break;
1976 }
1977 /* Fall through. */
1978 default:
1979 /* 0x80000000 means "multiple output ports. */
1980 cookie->sflow.output = 0x80000000 | n_outputs;
1981 break;
1982 }
1983 }
1984
1985 /* Compose SAMPLE action for sFlow bridge sampling. */
1986 static size_t
1987 compose_sflow_action(const struct xbridge *xbridge,
1988 struct ofpbuf *odp_actions,
1989 const struct flow *flow,
1990 odp_port_t odp_port)
1991 {
1992 uint32_t probability;
1993 union user_action_cookie cookie;
1994
1995 if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
1996 return 0;
1997 }
1998
1999 probability = dpif_sflow_get_probability(xbridge->sflow);
2000 compose_sflow_cookie(xbridge, htons(0), odp_port,
2001 odp_port == ODPP_NONE ? 0 : 1, &cookie);
2002
2003 return compose_sample_action(xbridge, odp_actions, flow, probability,
2004 &cookie, sizeof cookie.sflow);
2005 }
2006
2007 static void
2008 compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
2009 uint32_t obs_domain_id, uint32_t obs_point_id,
2010 union user_action_cookie *cookie)
2011 {
2012 cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
2013 cookie->flow_sample.probability = probability;
2014 cookie->flow_sample.collector_set_id = collector_set_id;
2015 cookie->flow_sample.obs_domain_id = obs_domain_id;
2016 cookie->flow_sample.obs_point_id = obs_point_id;
2017 }
2018
2019 static void
2020 compose_ipfix_cookie(union user_action_cookie *cookie)
2021 {
2022 cookie->type = USER_ACTION_COOKIE_IPFIX;
2023 }
2024
2025 /* Compose SAMPLE action for IPFIX bridge sampling. */
2026 static void
2027 compose_ipfix_action(const struct xbridge *xbridge,
2028 struct ofpbuf *odp_actions,
2029 const struct flow *flow)
2030 {
2031 uint32_t probability;
2032 union user_action_cookie cookie;
2033
2034 if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
2035 return;
2036 }
2037
2038 probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
2039 compose_ipfix_cookie(&cookie);
2040
2041 compose_sample_action(xbridge, odp_actions, flow, probability,
2042 &cookie, sizeof cookie.ipfix);
2043 }
2044
2045 /* SAMPLE action for sFlow must be first action in any given list of
2046 * actions. At this point we do not have all information required to
2047 * build it. So try to build sample action as complete as possible. */
2048 static void
2049 add_sflow_action(struct xlate_ctx *ctx)
2050 {
2051 ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
2052 &ctx->xout->odp_actions,
2053 &ctx->xin->flow, ODPP_NONE);
2054 ctx->sflow_odp_port = 0;
2055 ctx->sflow_n_outputs = 0;
2056 }
2057
2058 /* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
2059 * of actions, eventually after the SAMPLE action for sFlow. */
2060 static void
2061 add_ipfix_action(struct xlate_ctx *ctx)
2062 {
2063 compose_ipfix_action(ctx->xbridge, &ctx->xout->odp_actions,
2064 &ctx->xin->flow);
2065 }
2066
2067 /* Fix SAMPLE action according to data collected while composing ODP actions.
2068 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
2069 * USERSPACE action's user-cookie which is required for sflow. */
2070 static void
2071 fix_sflow_action(struct xlate_ctx *ctx)
2072 {
2073 const struct flow *base = &ctx->base_flow;
2074 union user_action_cookie *cookie;
2075
2076 if (!ctx->user_cookie_offset) {
2077 return;
2078 }
2079
2080 cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
2081 sizeof cookie->sflow);
2082 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
2083
2084 compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
2085 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
2086 }
2087
2088 static enum slow_path_reason
2089 process_special(struct xlate_ctx *ctx, const struct flow *flow,
2090 const struct xport *xport, const struct ofpbuf *packet)
2091 {
2092 struct flow_wildcards *wc = &ctx->xout->wc;
2093 const struct xbridge *xbridge = ctx->xbridge;
2094
2095 if (!xport) {
2096 return 0;
2097 } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
2098 if (packet) {
2099 cfm_process_heartbeat(xport->cfm, packet);
2100 }
2101 return SLOW_CFM;
2102 } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
2103 if (packet) {
2104 bfd_process_packet(xport->bfd, flow, packet);
2105 /* If POLL received, immediately sends FINAL back. */
2106 if (bfd_should_send_packet(xport->bfd)) {
2107 ofproto_dpif_monitor_port_send_soon(xport->ofport);
2108 }
2109 }
2110 return SLOW_BFD;
2111 } else if (xport->xbundle && xport->xbundle->lacp
2112 && flow->dl_type == htons(ETH_TYPE_LACP)) {
2113 if (packet) {
2114 lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
2115 }
2116 return SLOW_LACP;
2117 } else if (xbridge->stp && stp_should_process_flow(flow, wc)) {
2118 if (packet) {
2119 stp_process_packet(xport, packet);
2120 }
2121 return SLOW_STP;
2122 } else {
2123 return 0;
2124 }
2125 }
2126
2127 static void
2128 compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
2129 bool check_stp)
2130 {
2131 const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
2132 struct flow_wildcards *wc = &ctx->xout->wc;
2133 struct flow *flow = &ctx->xin->flow;
2134 ovs_be16 flow_vlan_tci;
2135 uint32_t flow_pkt_mark;
2136 uint8_t flow_nw_tos;
2137 odp_port_t out_port, odp_port;
2138 uint8_t dscp;
2139
2140 /* If 'struct flow' gets additional metadata, we'll need to zero it out
2141 * before traversing a patch port. */
2142 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
2143
2144 if (!xport) {
2145 xlate_report(ctx, "Nonexistent output port");
2146 return;
2147 } else if (xport->config & OFPUTIL_PC_NO_FWD) {
2148 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
2149 return;
2150 } else if (check_stp) {
2151 if (is_stp(&ctx->base_flow)) {
2152 if (!xport_stp_listen_state(xport)) {
2153 xlate_report(ctx, "STP not in listening state, "
2154 "skipping bpdu output");
2155 return;
2156 }
2157 } else if (!xport_stp_forward_state(xport)) {
2158 xlate_report(ctx, "STP not in forwarding state, "
2159 "skipping output");
2160 return;
2161 }
2162 }
2163
2164 if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
2165 ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
2166 xport->xbundle);
2167 }
2168
2169 if (xport->peer) {
2170 const struct xport *peer = xport->peer;
2171 struct flow old_flow = ctx->xin->flow;
2172 enum slow_path_reason special;
2173
2174 ctx->xbridge = peer->xbridge;
2175 flow->in_port.ofp_port = peer->ofp_port;
2176 flow->metadata = htonll(0);
2177 memset(&flow->tunnel, 0, sizeof flow->tunnel);
2178 memset(flow->regs, 0, sizeof flow->regs);
2179
2180 special = process_special(ctx, &ctx->xin->flow, peer,
2181 ctx->xin->packet);
2182 if (special) {
2183 ctx->xout->slow |= special;
2184 } else if (may_receive(peer, ctx)) {
2185 if (xport_stp_forward_state(peer)) {
2186 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
2187 } else {
2188 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
2189 * learning action look at the packet, then drop it. */
2190 struct flow old_base_flow = ctx->base_flow;
2191 size_t old_size = ofpbuf_size(&ctx->xout->odp_actions);
2192 mirror_mask_t old_mirrors = ctx->xout->mirrors;
2193 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
2194 ctx->xout->mirrors = old_mirrors;
2195 ctx->base_flow = old_base_flow;
2196 ofpbuf_set_size(&ctx->xout->odp_actions, old_size);
2197 }
2198 }
2199
2200 ctx->xin->flow = old_flow;
2201 ctx->xbridge = xport->xbridge;
2202
2203 if (ctx->xin->resubmit_stats) {
2204 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
2205 netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
2206 if (peer->bfd) {
2207 bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats);
2208 }
2209 }
2210 if (ctx->xin->xcache) {
2211 struct xc_entry *entry;
2212
2213 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
2214 entry->u.dev.tx = netdev_ref(xport->netdev);
2215 entry->u.dev.rx = netdev_ref(peer->netdev);
2216 entry->u.dev.bfd = bfd_ref(peer->bfd);
2217 }
2218 return;
2219 }
2220
2221 flow_vlan_tci = flow->vlan_tci;
2222 flow_pkt_mark = flow->pkt_mark;
2223 flow_nw_tos = flow->nw_tos;
2224
2225 if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
2226 wc->masks.nw_tos |= IP_DSCP_MASK;
2227 flow->nw_tos &= ~IP_DSCP_MASK;
2228 flow->nw_tos |= dscp;
2229 }
2230
2231 if (xport->is_tunnel) {
2232 /* Save tunnel metadata so that changes made due to
2233 * the Logical (tunnel) Port are not visible for any further
2234 * matches, while explicit set actions on tunnel metadata are.
2235 */
2236 struct flow_tnl flow_tnl = flow->tunnel;
2237 odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
2238 if (odp_port == ODPP_NONE) {
2239 xlate_report(ctx, "Tunneling decided against output");
2240 goto out; /* restore flow_nw_tos */
2241 }
2242 if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
2243 xlate_report(ctx, "Not tunneling to our own address");
2244 goto out; /* restore flow_nw_tos */
2245 }
2246 if (ctx->xin->resubmit_stats) {
2247 netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
2248 }
2249 if (ctx->xin->xcache) {
2250 struct xc_entry *entry;
2251
2252 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
2253 entry->u.dev.tx = netdev_ref(xport->netdev);
2254 }
2255 out_port = odp_port;
2256 commit_odp_tunnel_action(flow, &ctx->base_flow,
2257 &ctx->xout->odp_actions);
2258 flow->tunnel = flow_tnl; /* Restore tunnel metadata */
2259 } else {
2260 odp_port = xport->odp_port;
2261 out_port = odp_port;
2262 if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
2263 ofp_port_t vlandev_port;
2264
2265 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
2266 vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto,
2267 ofp_port, flow->vlan_tci);
2268 if (vlandev_port != ofp_port) {
2269 out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
2270 flow->vlan_tci = htons(0);
2271 }
2272 }
2273 }
2274
2275 if (out_port != ODPP_NONE) {
2276 ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
2277 &ctx->xout->odp_actions,
2278 &ctx->xout->wc);
2279
2280 if (ctx->use_recirc) {
2281 struct ovs_action_hash *act_hash;
2282 struct xlate_recirc *xr = &ctx->recirc;
2283
2284 /* Hash action. */
2285 act_hash = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions,
2286 OVS_ACTION_ATTR_HASH,
2287 sizeof *act_hash);
2288 act_hash->hash_alg = xr->hash_alg;
2289 act_hash->hash_basis = xr->hash_basis;
2290
2291 /* Recirc action. */
2292 nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
2293 xr->recirc_id);
2294 } else {
2295 nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
2296 out_port);
2297 }
2298
2299 ctx->sflow_odp_port = odp_port;
2300 ctx->sflow_n_outputs++;
2301 ctx->xout->nf_output_iface = ofp_port;
2302 }
2303
2304 out:
2305 /* Restore flow */
2306 flow->vlan_tci = flow_vlan_tci;
2307 flow->pkt_mark = flow_pkt_mark;
2308 flow->nw_tos = flow_nw_tos;
2309 }
2310
2311 static void
2312 compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port)
2313 {
2314 compose_output_action__(ctx, ofp_port, true);
2315 }
2316
2317 static void
2318 xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
2319 {
2320 struct rule_dpif *old_rule = ctx->rule;
2321 const struct rule_actions *actions;
2322
2323 if (ctx->xin->resubmit_stats) {
2324 rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
2325 }
2326
2327 ctx->resubmits++;
2328 ctx->recurse++;
2329 ctx->rule = rule;
2330 actions = rule_dpif_get_actions(rule);
2331 do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
2332 ctx->rule = old_rule;
2333 ctx->recurse--;
2334 }
2335
2336 static bool
2337 xlate_resubmit_resource_check(struct xlate_ctx *ctx)
2338 {
2339 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2340
2341 if (ctx->recurse >= MAX_RESUBMIT_RECURSION + MAX_INTERNAL_RESUBMITS) {
2342 VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
2343 MAX_RESUBMIT_RECURSION);
2344 } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
2345 VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
2346 } else if (ofpbuf_size(&ctx->xout->odp_actions) > UINT16_MAX) {
2347 VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
2348 } else if (ofpbuf_size(&ctx->stack) >= 65536) {
2349 VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
2350 } else {
2351 return true;
2352 }
2353
2354 return false;
2355 }
2356
2357 static void
2358 xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
2359 bool may_packet_in, bool honor_table_miss)
2360 {
2361 if (xlate_resubmit_resource_check(ctx)) {
2362 ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
2363 bool skip_wildcards = ctx->xin->skip_wildcards;
2364 uint8_t old_table_id = ctx->table_id;
2365 struct rule_dpif *rule;
2366 enum rule_dpif_lookup_verdict verdict;
2367 enum ofputil_port_config config = 0;
2368
2369 ctx->table_id = table_id;
2370
2371 /* Look up a flow with 'in_port' as the input port. Then restore the
2372 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2373 * have surprising behavior). */
2374 ctx->xin->flow.in_port.ofp_port = in_port;
2375 verdict = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
2376 &ctx->xin->flow,
2377 !skip_wildcards
2378 ? &ctx->xout->wc : NULL,
2379 honor_table_miss,
2380 &ctx->table_id, &rule,
2381 ctx->xin->xcache != NULL,
2382 ctx->xin->resubmit_stats);
2383 ctx->xin->flow.in_port.ofp_port = old_in_port;
2384
2385 if (ctx->xin->resubmit_hook) {
2386 ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
2387 }
2388
2389 switch (verdict) {
2390 case RULE_DPIF_LOOKUP_VERDICT_MATCH:
2391 goto match;
2392 case RULE_DPIF_LOOKUP_VERDICT_CONTROLLER:
2393 if (may_packet_in) {
2394 struct xport *xport;
2395
2396 xport = get_ofp_port(ctx->xbridge,
2397 ctx->xin->flow.in_port.ofp_port);
2398 config = xport ? xport->config : 0;
2399 break;
2400 }
2401 /* Fall through to drop */
2402 case RULE_DPIF_LOOKUP_VERDICT_DROP:
2403 config = OFPUTIL_PC_NO_PACKET_IN;
2404 break;
2405 case RULE_DPIF_LOOKUP_VERDICT_DEFAULT:
2406 if (!ofproto_dpif_wants_packet_in_on_miss(ctx->xbridge->ofproto)) {
2407 config = OFPUTIL_PC_NO_PACKET_IN;
2408 }
2409 break;
2410 default:
2411 OVS_NOT_REACHED();
2412 }
2413
2414 choose_miss_rule(config, ctx->xbridge->miss_rule,
2415 ctx->xbridge->no_packet_in_rule, &rule,
2416 ctx->xin->xcache != NULL);
2417
2418 match:
2419 if (rule) {
2420 /* Fill in the cache entry here instead of xlate_recursively
2421 * to make the reference counting more explicit. We take a
2422 * reference in the lookups above if we are going to cache the
2423 * rule. */
2424 if (ctx->xin->xcache) {
2425 struct xc_entry *entry;
2426
2427 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
2428 entry->u.rule = rule;
2429 }
2430 xlate_recursively(ctx, rule);
2431 }
2432
2433 ctx->table_id = old_table_id;
2434 return;
2435 }
2436
2437 ctx->exit = true;
2438 }
2439
2440 static void
2441 xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
2442 struct ofputil_bucket *bucket)
2443 {
2444 if (ctx->xin->resubmit_stats) {
2445 group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
2446 }
2447 if (ctx->xin->xcache) {
2448 struct xc_entry *entry;
2449
2450 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
2451 entry->u.group.group = group_dpif_ref(group);
2452 entry->u.group.bucket = bucket;
2453 }
2454 }
2455
2456 static void
2457 xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket)
2458 {
2459 uint64_t action_list_stub[1024 / 8];
2460 struct ofpbuf action_list, action_set;
2461
2462 ofpbuf_use_const(&action_set, bucket->ofpacts, bucket->ofpacts_len);
2463 ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
2464
2465 ofpacts_execute_action_set(&action_list, &action_set);
2466 ctx->recurse++;
2467 do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
2468 ctx->recurse--;
2469
2470 ofpbuf_uninit(&action_set);
2471 ofpbuf_uninit(&action_list);
2472 }
2473
2474 static void
2475 xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
2476 {
2477 struct ofputil_bucket *bucket;
2478 const struct list *buckets;
2479 struct flow old_flow = ctx->xin->flow;
2480
2481 group_dpif_get_buckets(group, &buckets);
2482
2483 LIST_FOR_EACH (bucket, list_node, buckets) {
2484 xlate_group_bucket(ctx, bucket);
2485 /* Roll back flow to previous state.
2486 * This is equivalent to cloning the packet for each bucket.
2487 *
2488 * As a side effect any subsequently applied actions will
2489 * also effectively be applied to a clone of the packet taken
2490 * just before applying the all or indirect group. */
2491 ctx->xin->flow = old_flow;
2492 }
2493 xlate_group_stats(ctx, group, NULL);
2494 }
2495
2496 static void
2497 xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
2498 {
2499 struct ofputil_bucket *bucket;
2500
2501 bucket = group_first_live_bucket(ctx, group, 0);
2502 if (bucket) {
2503 xlate_group_bucket(ctx, bucket);
2504 xlate_group_stats(ctx, group, bucket);
2505 }
2506 }
2507
2508 static void
2509 xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
2510 {
2511 struct flow_wildcards *wc = &ctx->xout->wc;
2512 struct ofputil_bucket *bucket;
2513 uint32_t basis;
2514
2515 basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
2516 bucket = group_best_live_bucket(ctx, group, basis);
2517 if (bucket) {
2518 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
2519 xlate_group_bucket(ctx, bucket);
2520 xlate_group_stats(ctx, group, bucket);
2521 }
2522 }
2523
2524 static void
2525 xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
2526 {
2527 ctx->in_group = true;
2528
2529 switch (group_dpif_get_type(group)) {
2530 case OFPGT11_ALL:
2531 case OFPGT11_INDIRECT:
2532 xlate_all_group(ctx, group);
2533 break;
2534 case OFPGT11_SELECT:
2535 xlate_select_group(ctx, group);
2536 break;
2537 case OFPGT11_FF:
2538 xlate_ff_group(ctx, group);
2539 break;
2540 default:
2541 OVS_NOT_REACHED();
2542 }
2543 group_dpif_unref(group);
2544
2545 ctx->in_group = false;
2546 }
2547
2548 static bool
2549 xlate_group_resource_check(struct xlate_ctx *ctx)
2550 {
2551 if (!xlate_resubmit_resource_check(ctx)) {
2552 return false;
2553 } else if (ctx->in_group) {
2554 /* Prevent nested translation of OpenFlow groups.
2555 *
2556 * OpenFlow allows this restriction. We enforce this restriction only
2557 * because, with the current architecture, we would otherwise have to
2558 * take a possibly recursive read lock on the ofgroup rwlock, which is
2559 * unsafe given that POSIX allows taking a read lock to block if there
2560 * is a thread blocked on taking the write lock. Other solutions
2561 * without this restriction are also possible, but seem unwarranted
2562 * given the current limited use of groups. */
2563 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
2564
2565 VLOG_ERR_RL(&rl, "cannot recursively translate OpenFlow group");
2566 return false;
2567 } else {
2568 return true;
2569 }
2570 }
2571
2572 static bool
2573 xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
2574 {
2575 if (xlate_group_resource_check(ctx)) {
2576 struct group_dpif *group;
2577 bool got_group;
2578
2579 got_group = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
2580 if (got_group) {
2581 xlate_group_action__(ctx, group);
2582 } else {
2583 return true;
2584 }
2585 }
2586
2587 return false;
2588 }
2589
2590 static void
2591 xlate_ofpact_resubmit(struct xlate_ctx *ctx,
2592 const struct ofpact_resubmit *resubmit)
2593 {
2594 ofp_port_t in_port;
2595 uint8_t table_id;
2596 bool may_packet_in = false;
2597 bool honor_table_miss = false;
2598
2599 if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
2600 /* Still allow missed packets to be sent to the controller
2601 * if resubmitting from an internal table. */
2602 may_packet_in = true;
2603 honor_table_miss = true;
2604 }
2605
2606 in_port = resubmit->in_port;
2607 if (in_port == OFPP_IN_PORT) {
2608 in_port = ctx->xin->flow.in_port.ofp_port;
2609 }
2610
2611 table_id = resubmit->table_id;
2612 if (table_id == 255) {
2613 table_id = ctx->table_id;
2614 }
2615
2616 xlate_table_action(ctx, in_port, table_id, may_packet_in,
2617 honor_table_miss);
2618 }
2619
2620 static void
2621 flood_packets(struct xlate_ctx *ctx, bool all)
2622 {
2623 const struct xport *xport;
2624
2625 HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
2626 if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
2627 continue;
2628 }
2629
2630 if (all) {
2631 compose_output_action__(ctx, xport->ofp_port, false);
2632 } else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
2633 compose_output_action(ctx, xport->ofp_port);
2634 }
2635 }
2636
2637 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
2638 }
2639
2640 static void
2641 execute_controller_action(struct xlate_ctx *ctx, int len,
2642 enum ofp_packet_in_reason reason,
2643 uint16_t controller_id)
2644 {
2645 struct ofproto_packet_in *pin;
2646 struct ofpbuf *packet;
2647 struct pkt_metadata md = PKT_METADATA_INITIALIZER(0);
2648
2649 ctx->xout->slow |= SLOW_CONTROLLER;
2650 if (!ctx->xin->packet) {
2651 return;
2652 }
2653
2654 packet = ofpbuf_clone(ctx->xin->packet);
2655
2656 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
2657 &ctx->xout->odp_actions,
2658 &ctx->xout->wc);
2659
2660 odp_execute_actions(NULL, packet, false, &md,
2661 ofpbuf_data(&ctx->xout->odp_actions),
2662 ofpbuf_size(&ctx->xout->odp_actions), NULL);
2663
2664 pin = xmalloc(sizeof *pin);
2665 pin->up.packet_len = ofpbuf_size(packet);
2666 pin->up.packet = ofpbuf_steal_data(packet);
2667 pin->up.reason = reason;
2668 pin->up.table_id = ctx->table_id;
2669 pin->up.cookie = (ctx->rule
2670 ? rule_dpif_get_flow_cookie(ctx->rule)
2671 : OVS_BE64_MAX);
2672
2673 flow_get_metadata(&ctx->xin->flow, &pin->up.fmd);
2674
2675 pin->controller_id = controller_id;
2676 pin->send_len = len;
2677 /* If a rule is a table-miss rule then this is
2678 * a table-miss handled by a table-miss rule.
2679 *
2680 * Else, if rule is internal and has a controller action,
2681 * the later being implied by the rule being processed here,
2682 * then this is a table-miss handled without a table-miss rule.
2683 *
2684 * Otherwise this is not a table-miss. */
2685 pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
2686 if (ctx->rule) {
2687 if (rule_dpif_is_table_miss(ctx->rule)) {
2688 pin->miss_type = OFPROTO_PACKET_IN_MISS_FLOW;
2689 } else if (rule_dpif_is_internal(ctx->rule)) {
2690 pin->miss_type = OFPROTO_PACKET_IN_MISS_WITHOUT_FLOW;
2691 }
2692 }
2693 ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, pin);
2694 ofpbuf_delete(packet);
2695 }
2696
2697 static void
2698 compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
2699 {
2700 struct flow_wildcards *wc = &ctx->xout->wc;
2701 struct flow *flow = &ctx->xin->flow;
2702 int n;
2703
2704 ovs_assert(eth_type_mpls(mpls->ethertype));
2705
2706 n = flow_count_mpls_labels(flow, wc);
2707 if (!n) {
2708 ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
2709 &ctx->xout->odp_actions,
2710 &ctx->xout->wc);
2711 } else if (n >= FLOW_MAX_MPLS_LABELS) {
2712 if (ctx->xin->packet != NULL) {
2713 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2714 VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
2715 "MPLS push action can't be performed as it would "
2716 "have more MPLS LSEs than the %d supported.",
2717 ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
2718 }
2719 ctx->exit = true;
2720 return;
2721 } else if (n >= ctx->xbridge->max_mpls_depth) {
2722 COVERAGE_INC(xlate_actions_mpls_overflow);
2723 ctx->xout->slow |= SLOW_ACTION;
2724 }
2725
2726 flow_push_mpls(flow, n, mpls->ethertype, wc);
2727 }
2728
2729 static void
2730 compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
2731 {
2732 struct flow_wildcards *wc = &ctx->xout->wc;
2733 struct flow *flow = &ctx->xin->flow;
2734 int n = flow_count_mpls_labels(flow, wc);
2735
2736 if (!flow_pop_mpls(flow, n, eth_type, wc) && n >= FLOW_MAX_MPLS_LABELS) {
2737 if (ctx->xin->packet != NULL) {
2738 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2739 VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
2740 "MPLS pop action can't be performed as it has "
2741 "more MPLS LSEs than the %d supported.",
2742 ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
2743 }
2744 ctx->exit = true;
2745 ofpbuf_clear(&ctx->xout->odp_actions);
2746 }
2747 }
2748
2749 static bool
2750 compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
2751 {
2752 struct flow *flow = &ctx->xin->flow;
2753
2754 if (!is_ip_any(flow)) {
2755 return false;
2756 }
2757
2758 ctx->xout->wc.masks.nw_ttl = 0xff;
2759 if (flow->nw_ttl > 1) {
2760 flow->nw_ttl--;
2761 return false;
2762 } else {
2763 size_t i;
2764
2765 for (i = 0; i < ids->n_controllers; i++) {
2766 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
2767 ids->cnt_ids[i]);
2768 }
2769
2770 /* Stop processing for current table. */
2771 return true;
2772 }
2773 }
2774
2775 static void
2776 compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
2777 {
2778 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
2779 ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
2780 set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
2781 }
2782 }
2783
2784 static void
2785 compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
2786 {
2787 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
2788 ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
2789 set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
2790 }
2791 }
2792
2793 static void
2794 compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
2795 {
2796 if (eth_type_mpls(ctx->xin->flow.dl_type)) {
2797 ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
2798 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
2799 }
2800 }
2801
2802 static bool
2803 compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
2804 {
2805 struct flow *flow = &ctx->xin->flow;
2806 uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
2807 struct flow_wildcards *wc = &ctx->xout->wc;
2808
2809 memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
2810 if (eth_type_mpls(flow->dl_type)) {
2811 if (ttl > 1) {
2812 ttl--;
2813 set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
2814 return false;
2815 } else {
2816 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
2817
2818 /* Stop processing for current table. */
2819 return true;
2820 }
2821 } else {
2822 return true;
2823 }
2824 }
2825
2826 static void
2827 xlate_output_action(struct xlate_ctx *ctx,
2828 ofp_port_t port, uint16_t max_len, bool may_packet_in)
2829 {
2830 ofp_port_t prev_nf_output_iface = ctx->xout->nf_output_iface;
2831
2832 ctx->xout->nf_output_iface = NF_OUT_DROP;
2833
2834 switch (port) {
2835 case OFPP_IN_PORT:
2836 compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port);
2837 break;
2838 case OFPP_TABLE:
2839 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
2840 0, may_packet_in, true);
2841 break;
2842 case OFPP_NORMAL:
2843 xlate_normal(ctx);
2844 break;
2845 case OFPP_FLOOD:
2846 flood_packets(ctx, false);
2847 break;
2848 case OFPP_ALL:
2849 flood_packets(ctx, true);
2850 break;
2851 case OFPP_CONTROLLER:
2852 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
2853 break;
2854 case OFPP_NONE:
2855 break;
2856 case OFPP_LOCAL:
2857 default:
2858 if (port != ctx->xin->flow.in_port.ofp_port) {
2859 compose_output_action(ctx, port);
2860 } else {
2861 xlate_report(ctx, "skipping output to input port");
2862 }
2863 break;
2864 }
2865
2866 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2867 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
2868 } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
2869 ctx->xout->nf_output_iface = prev_nf_output_iface;
2870 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2871 ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
2872 ctx->xout->nf_output_iface = NF_OUT_MULTI;
2873 }
2874 }
2875
2876 static void
2877 xlate_output_reg_action(struct xlate_ctx *ctx,
2878 const struct ofpact_output_reg *or)
2879 {
2880 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
2881 if (port <= UINT16_MAX) {
2882 union mf_subvalue value;
2883
2884 memset(&value, 0xff, sizeof value);
2885 mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
2886 xlate_output_action(ctx, u16_to_ofp(port),
2887 or->max_len, false);
2888 }
2889 }
2890
2891 static void
2892 xlate_enqueue_action(struct xlate_ctx *ctx,
2893 const struct ofpact_enqueue *enqueue)
2894 {
2895 ofp_port_t ofp_port = enqueue->port;
2896 uint32_t queue_id = enqueue->queue;
2897 uint32_t flow_priority, priority;
2898 int error;
2899
2900 /* Translate queue to priority. */
2901 error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
2902 if (error) {
2903 /* Fall back to ordinary output action. */
2904 xlate_output_action(ctx, enqueue->port, 0, false);
2905 return;
2906 }
2907
2908 /* Check output port. */
2909 if (ofp_port == OFPP_IN_PORT) {
2910 ofp_port = ctx->xin->flow.in_port.ofp_port;
2911 } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
2912 return;
2913 }
2914
2915 /* Add datapath actions. */
2916 flow_priority = ctx->xin->flow.skb_priority;
2917 ctx->xin->flow.skb_priority = priority;
2918 compose_output_action(ctx, ofp_port);
2919 ctx->xin->flow.skb_priority = flow_priority;
2920
2921 /* Update NetFlow output port. */
2922 if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
2923 ctx->xout->nf_output_iface = ofp_port;
2924 } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
2925 ctx->xout->nf_output_iface = NF_OUT_MULTI;
2926 }
2927 }
2928
2929 static void
2930 xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
2931 {
2932 uint32_t skb_priority;
2933
2934 if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
2935 ctx->xin->flow.skb_priority = skb_priority;
2936 } else {
2937 /* Couldn't translate queue to a priority. Nothing to do. A warning
2938 * has already been logged. */
2939 }
2940 }
2941
2942 static bool
2943 slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
2944 {
2945 const struct xbridge *xbridge = xbridge_;
2946 struct xport *port;
2947
2948 switch (ofp_port) {
2949 case OFPP_IN_PORT:
2950 case OFPP_TABLE:
2951 case OFPP_NORMAL:
2952 case OFPP_FLOOD:
2953 case OFPP_ALL:
2954 case OFPP_NONE:
2955 return true;
2956 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
2957 return false;
2958 default:
2959 port = get_ofp_port(xbridge, ofp_port);
2960 return port ? port->may_enable : false;
2961 }
2962 }
2963
2964 static void
2965 xlate_bundle_action(struct xlate_ctx *ctx,
2966 const struct ofpact_bundle *bundle)
2967 {
2968 ofp_port_t port;
2969
2970 port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
2971 slave_enabled_cb,
2972 CONST_CAST(struct xbridge *, ctx->xbridge));
2973 if (bundle->dst.field) {
2974 nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow,
2975 &ctx->xout->wc);
2976 } else {
2977 xlate_output_action(ctx, port, 0, false);
2978 }
2979 }
2980
2981 static void
2982 xlate_learn_action__(struct xlate_ctx *ctx, const struct ofpact_learn *learn,
2983 struct ofputil_flow_mod *fm, struct ofpbuf *ofpacts)
2984 {
2985 learn_execute(learn, &ctx->xin->flow, fm, ofpacts);
2986 if (ctx->xin->may_learn) {
2987 ofproto_dpif_flow_mod(ctx->xbridge->ofproto, fm);
2988 }
2989 }
2990
2991 static void
2992 xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
2993 {
2994 ctx->xout->has_learn = true;
2995 learn_mask(learn, &ctx->xout->wc);
2996
2997 if (ctx->xin->xcache) {
2998 struct xc_entry *entry;
2999
3000 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
3001 entry->u.learn.ofproto = ctx->xbridge->ofproto;
3002 entry->u.learn.fm = xmalloc(sizeof *entry->u.learn.fm);
3003 entry->u.learn.ofpacts = ofpbuf_new(64);
3004 xlate_learn_action__(ctx, learn, entry->u.learn.fm,
3005 entry->u.learn.ofpacts);
3006 } else if (ctx->xin->may_learn) {
3007 uint64_t ofpacts_stub[1024 / 8];
3008 struct ofputil_flow_mod fm;
3009 struct ofpbuf ofpacts;
3010
3011 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
3012 xlate_learn_action__(ctx, learn, &fm, &ofpacts);
3013 ofpbuf_uninit(&ofpacts);
3014 }
3015 }
3016
3017 static void
3018 xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
3019 uint16_t idle_timeout, uint16_t hard_timeout)
3020 {
3021 if (tcp_flags & (TCP_FIN | TCP_RST)) {
3022 rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout);
3023 }
3024 }
3025
3026 static void
3027 xlate_fin_timeout(struct xlate_ctx *ctx,
3028 const struct ofpact_fin_timeout *oft)
3029 {
3030 if (ctx->rule) {
3031 xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
3032 oft->fin_idle_timeout, oft->fin_hard_timeout);
3033 if (ctx->xin->xcache) {
3034 struct xc_entry *entry;
3035
3036 entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
3037 /* XC_RULE already holds a reference on the rule, none is taken
3038 * here. */
3039 entry->u.fin.rule = ctx->rule;
3040 entry->u.fin.idle = oft->fin_idle_timeout;
3041 entry->u.fin.hard = oft->fin_hard_timeout;
3042 }
3043 }
3044 }
3045
3046 static void
3047 xlate_sample_action(struct xlate_ctx *ctx,
3048 const struct ofpact_sample *os)
3049 {
3050 union user_action_cookie cookie;
3051 /* Scale the probability from 16-bit to 32-bit while representing
3052 * the same percentage. */
3053 uint32_t probability = (os->probability << 16) | os->probability;
3054
3055 if (!ctx->xbridge->variable_length_userdata) {
3056 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
3057
3058 VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
3059 "lacks support (needs Linux 3.10+ or kernel module from "
3060 "OVS 1.11+)");
3061 return;
3062 }
3063
3064 ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
3065 &ctx->xout->odp_actions,
3066 &ctx->xout->wc);
3067
3068 compose_flow_sample_cookie(os->probability, os->collector_set_id,
3069 os->obs_domain_id, os->obs_point_id, &cookie);
3070 compose_sample_action(ctx->xbridge, &ctx->xout->odp_actions, &ctx->xin->flow,
3071 probability, &cookie, sizeof cookie.flow_sample);
3072 }
3073
3074 static bool
3075 may_receive(const struct xport *xport, struct xlate_ctx *ctx)
3076 {
3077 if (xport->config & (is_stp(&ctx->xin->flow)
3078 ? OFPUTIL_PC_NO_RECV_STP
3079 : OFPUTIL_PC_NO_RECV)) {
3080 return false;
3081 }
3082
3083 /* Only drop packets here if both forwarding and learning are
3084 * disabled. If just learning is enabled, we need to have
3085 * OFPP_NORMAL and the learning action have a look at the packet
3086 * before we can drop it. */
3087 if (!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) {
3088 return false;
3089 }
3090
3091 return true;
3092 }
3093
3094 static void
3095 xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact *a)
3096 {
3097 struct ofpact_nest *on = ofpact_get_WRITE_ACTIONS(a);
3098 ofpbuf_put(&ctx->action_set, on->actions, ofpact_nest_get_action_len(on));
3099 ofpact_pad(&ctx->action_set);
3100 }
3101
3102 static void
3103 xlate_action_set(struct xlate_ctx *ctx)
3104 {
3105 uint64_t action_list_stub[1024 / 64];
3106 struct ofpbuf action_list;
3107
3108 ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
3109 ofpacts_execute_action_set(&action_list, &ctx->action_set);
3110 do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
3111 ofpbuf_uninit(&action_list);
3112 }
3113
3114 static void
3115 do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
3116 struct xlate_ctx *ctx)
3117 {
3118 struct flow_wildcards *wc = &ctx->xout->wc;
3119 struct flow *flow = &ctx->xin->flow;
3120 const struct ofpact *a;
3121
3122 /* dl_type already in the mask, not set below. */
3123
3124 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
3125 struct ofpact_controller *controller;
3126 const struct ofpact_metadata *metadata;
3127 const struct ofpact_set_field *set_field;
3128 const struct mf_field *mf;
3129
3130 if (ctx->exit) {
3131 break;
3132 }
3133
3134 switch (a->type) {
3135 case OFPACT_OUTPUT:
3136 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
3137 ofpact_get_OUTPUT(a)->max_len, true);
3138 break;
3139
3140 case OFPACT_GROUP:
3141 if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) {
3142 return;
3143 }
3144 break;
3145
3146 case OFPACT_CONTROLLER:
3147 controller = ofpact_get_CONTROLLER(a);
3148 execute_controller_action(ctx, controller->max_len,
3149 controller->reason,
3150 controller->controller_id);
3151 break;
3152
3153 case OFPACT_ENQUEUE:
3154 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
3155 break;
3156
3157 case OFPACT_SET_VLAN_VID:
3158 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
3159 if (flow->vlan_tci & htons(VLAN_CFI) ||
3160 ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
3161 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
3162 flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
3163 | htons(VLAN_CFI));
3164 }
3165 break;
3166
3167 case OFPACT_SET_VLAN_PCP:
3168 wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
3169 if (flow->vlan_tci & htons(VLAN_CFI) ||
3170 ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
3171 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
3172 flow->vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
3173 << VLAN_PCP_SHIFT) | VLAN_CFI);
3174 }
3175 break;
3176
3177 case OFPACT_STRIP_VLAN:
3178 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
3179 flow->vlan_tci = htons(0);
3180 break;
3181
3182 case OFPACT_PUSH_VLAN:
3183 /* XXX 802.1AD(QinQ) */
3184 memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
3185 flow->vlan_tci = htons(VLAN_CFI);
3186 break;
3187
3188 case OFPACT_SET_ETH_SRC:
3189 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
3190 memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
3191 break;
3192
3193 case OFPACT_SET_ETH_DST:
3194 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
3195 memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
3196 break;
3197
3198 case OFPACT_SET_IPV4_SRC:
3199 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3200 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
3201 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
3202 }
3203 break;
3204
3205 case OFPACT_SET_IPV4_DST:
3206 if (flow->dl_type == htons(ETH_TYPE_IP)) {
3207 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
3208 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
3209 }
3210 break;
3211
3212 case OFPACT_SET_IP_DSCP:
3213 if (is_ip_any(flow)) {
3214 wc->masks.nw_tos |= IP_DSCP_MASK;
3215 flow->nw_tos &= ~IP_DSCP_MASK;
3216 flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
3217 }
3218 break;
3219
3220 case OFPACT_SET_IP_ECN:
3221 if (is_ip_any(flow)) {
3222 wc->masks.nw_tos |= IP_ECN_MASK;
3223 flow->nw_tos &= ~IP_ECN_MASK;
3224 flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
3225 }
3226 break;
3227
3228 case OFPACT_SET_IP_TTL:
3229 if (is_ip_any(flow)) {
3230 wc->masks.nw_ttl = 0xff;
3231 flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
3232 }
3233 break;
3234
3235 case OFPACT_SET_L4_SRC_PORT:
3236 if (is_ip_any(flow)) {
3237 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
3238 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
3239 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
3240 }
3241 break;
3242
3243 case OFPACT_SET_L4_DST_PORT:
3244 if (is_ip_any(flow)) {
3245 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
3246 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
3247 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
3248 }
3249 break;
3250
3251 case OFPACT_RESUBMIT:
3252 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
3253 break;
3254
3255 case OFPACT_SET_TUNNEL:
3256 flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
3257 break;
3258
3259 case OFPACT_SET_QUEUE:
3260 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
3261 break;
3262
3263 case OFPACT_POP_QUEUE:
3264 flow->skb_priority = ctx->orig_skb_priority;
3265 break;
3266
3267 case OFPACT_REG_MOVE:
3268 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
3269 break;
3270
3271 case OFPACT_REG_LOAD:
3272 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow, wc);
3273 break;
3274
3275 case OFPACT_SET_FIELD:
3276 set_field = ofpact_get_SET_FIELD(a);
3277 mf = set_field->field;
3278
3279 /* Set field action only ever overwrites packet's outermost
3280 * applicable header fields. Do nothing if no header exists. */
3281 if (mf->id == MFF_VLAN_VID) {
3282 wc->masks.vlan_tci |= htons(VLAN_CFI);
3283 if (!(flow->vlan_tci & htons(VLAN_CFI))) {
3284 break;
3285 }
3286 } else if ((mf->id == MFF_MPLS_LABEL || mf->id == MFF_MPLS_TC)
3287 /* 'dl_type' is already unwildcarded. */
3288 && !eth_type_mpls(flow->dl_type)) {
3289 break;
3290 }
3291
3292 mf_mask_field_and_prereqs(mf, &wc->masks);
3293 mf_set_flow_value(mf, &set_field->value, flow);
3294 break;
3295
3296 case OFPACT_STACK_PUSH:
3297 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
3298 &ctx->stack);
3299 break;
3300
3301 case OFPACT_STACK_POP:
3302 nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, wc,
3303 &ctx->stack);
3304 break;
3305
3306 case OFPACT_PUSH_MPLS:
3307 compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
3308 break;
3309
3310 case OFPACT_POP_MPLS:
3311 compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
3312 break;
3313
3314 case OFPACT_SET_MPLS_LABEL:
3315 compose_set_mpls_label_action(
3316 ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
3317 break;
3318
3319 case OFPACT_SET_MPLS_TC:
3320 compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
3321 break;
3322
3323 case OFPACT_SET_MPLS_TTL:
3324 compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
3325 break;
3326
3327 case OFPACT_DEC_MPLS_TTL:
3328 if (compose_dec_mpls_ttl_action(ctx)) {
3329 return;
3330 }
3331 break;
3332
3333 case OFPACT_DEC_TTL:
3334 wc->masks.nw_ttl = 0xff;
3335 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
3336 return;
3337 }
3338 break;
3339
3340 case OFPACT_NOTE:
3341 /* Nothing to do. */
3342 break;
3343
3344 case OFPACT_MULTIPATH:
3345 multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
3346 break;
3347
3348 case OFPACT_BUNDLE:
3349 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
3350 break;
3351
3352 case OFPACT_OUTPUT_REG:
3353 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
3354 break;
3355
3356 case OFPACT_LEARN:
3357 xlate_learn_action(ctx, ofpact_get_LEARN(a));
3358 break;
3359
3360 case OFPACT_EXIT:
3361 ctx->exit = true;
3362 break;
3363
3364 case OFPACT_FIN_TIMEOUT:
3365 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
3366 ctx->xout->has_fin_timeout = true;
3367 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
3368 break;
3369
3370 case OFPACT_CLEAR_ACTIONS:
3371 ofpbuf_clear(&ctx->action_set);
3372 break;
3373
3374 case OFPACT_WRITE_ACTIONS:
3375 xlate_write_actions(ctx, a);
3376 break;
3377
3378 case OFPACT_WRITE_METADATA:
3379 metadata = ofpact_get_WRITE_METADATA(a);
3380 flow->metadata &= ~metadata->mask;
3381 flow->metadata |= metadata->metadata & metadata->mask;
3382 break;
3383
3384 case OFPACT_METER:
3385 /* Not implemented yet. */
3386 break;
3387
3388 case OFPACT_GOTO_TABLE: {
3389 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
3390
3391 ovs_assert(ctx->table_id < ogt->table_id);
3392 xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
3393 ogt->table_id, true, true);
3394 break;
3395 }
3396
3397 case OFPACT_SAMPLE:
3398 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
3399 break;
3400 }
3401 }
3402 }
3403
3404 void
3405 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
3406 const struct flow *flow, struct rule_dpif *rule,
3407 uint16_t tcp_flags, const struct ofpbuf *packet)
3408 {
3409 xin->ofproto = ofproto;
3410 xin->flow = *flow;
3411 xin->packet = packet;
3412 xin->may_learn = packet != NULL;
3413 xin->rule = rule;
3414 xin->xcache = NULL;
3415 xin->ofpacts = NULL;
3416 xin->ofpacts_len = 0;
3417 xin->tcp_flags = tcp_flags;
3418 xin->resubmit_hook = NULL;
3419 xin->report_hook = NULL;
3420 xin->resubmit_stats = NULL;
3421 xin->skip_wildcards = false;
3422 }
3423
3424 void
3425 xlate_out_uninit(struct xlate_out *xout)
3426 {
3427 if (xout) {
3428 ofpbuf_uninit(&xout->odp_actions);
3429 }
3430 }
3431
3432 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
3433 * into datapath actions, using 'ctx', and discards the datapath actions. */
3434 void
3435 xlate_actions_for_side_effects(struct xlate_in *xin)
3436 {
3437 struct xlate_out xout;
3438
3439 xlate_actions(xin, &xout);
3440 xlate_out_uninit(&xout);
3441 }
3442
3443 static void
3444 xlate_report(struct xlate_ctx *ctx, const char *s)
3445 {
3446 if (ctx->xin->report_hook) {
3447 ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
3448 }
3449 }
3450
3451 void
3452 xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
3453 {
3454 dst->wc = src->wc;
3455 dst->slow = src->slow;
3456 dst->has_learn = src->has_learn;
3457 dst->has_normal = src->has_normal;
3458 dst->has_fin_timeout = src->has_fin_timeout;
3459 dst->nf_output_iface = src->nf_output_iface;
3460 dst->mirrors = src->mirrors;
3461
3462 ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
3463 sizeof dst->odp_actions_stub);
3464 ofpbuf_put(&dst->odp_actions, ofpbuf_data(&src->odp_actions),
3465 ofpbuf_size(&src->odp_actions));
3466 }
3467 \f
3468 static struct skb_priority_to_dscp *
3469 get_skb_priority(const struct xport *xport, uint32_t skb_priority)
3470 {
3471 struct skb_priority_to_dscp *pdscp;
3472 uint32_t hash;
3473
3474 hash = hash_int(skb_priority, 0);
3475 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
3476 if (pdscp->skb_priority == skb_priority) {
3477 return pdscp;
3478 }
3479 }
3480 return NULL;
3481 }
3482
3483 static bool
3484 dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
3485 uint8_t *dscp)
3486 {
3487 struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
3488 *dscp = pdscp ? pdscp->dscp : 0;
3489 return pdscp != NULL;
3490 }
3491
3492 static void
3493 clear_skb_priorities(struct xport *xport)
3494 {
3495 struct skb_priority_to_dscp *pdscp, *next;
3496
3497 HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &xport->skb_priorities) {
3498 hmap_remove(&xport->skb_priorities, &pdscp->hmap_node);
3499 free(pdscp);
3500 }
3501 }
3502
3503 static bool
3504 actions_output_to_local_port(const struct xlate_ctx *ctx)
3505 {
3506 odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
3507 const struct nlattr *a;
3508 unsigned int left;
3509
3510 NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(&ctx->xout->odp_actions),
3511 ofpbuf_size(&ctx->xout->odp_actions)) {
3512 if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
3513 && nl_attr_get_odp_port(a) == local_odp_port) {
3514 return true;
3515 }
3516 }
3517 return false;
3518 }
3519
3520 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
3521 * into datapath actions in 'odp_actions', using 'ctx'.
3522 *
3523 * The caller must take responsibility for eventually freeing 'xout', with
3524 * xlate_out_uninit(). */
3525 void
3526 xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
3527 {
3528 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
3529 struct flow_wildcards *wc = &xout->wc;
3530 struct flow *flow = &xin->flow;
3531 struct rule_dpif *rule = NULL;
3532
3533 const struct rule_actions *actions = NULL;
3534 enum slow_path_reason special;
3535 const struct ofpact *ofpacts;
3536 struct xport *in_port;
3537 struct flow orig_flow;
3538 struct xlate_ctx ctx;
3539 size_t ofpacts_len;
3540 bool tnl_may_send;
3541 bool is_icmp;
3542
3543 COVERAGE_INC(xlate_actions);
3544
3545 /* Flow initialization rules:
3546 * - 'base_flow' must match the kernel's view of the packet at the
3547 * time that action processing starts. 'flow' represents any
3548 * transformations we wish to make through actions.
3549 * - By default 'base_flow' and 'flow' are the same since the input
3550 * packet matches the output before any actions are applied.
3551 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
3552 * of the received packet as seen by the kernel. If we later output
3553 * to another device without any modifications this will cause us to
3554 * insert a new tag since the original one was stripped off by the
3555 * VLAN device.
3556 * - Tunnel metadata as received is retained in 'flow'. This allows
3557 * tunnel metadata matching also in later tables.
3558 * Since a kernel action for setting the tunnel metadata will only be
3559 * generated with actual tunnel output, changing the tunnel metadata
3560 * values in 'flow' (such as tun_id) will only have effect with a later
3561 * tunnel output action.
3562 * - Tunnel 'base_flow' is completely cleared since that is what the
3563 * kernel does. If we wish to maintain the original values an action
3564 * needs to be generated. */
3565
3566 ctx.xin = xin;
3567 ctx.xout = xout;
3568 ctx.xout->slow = 0;
3569 ctx.xout->has_learn = false;
3570 ctx.xout->has_normal = false;
3571 ctx.xout->has_fin_timeout = false;
3572 ctx.xout->nf_output_iface = NF_OUT_DROP;
3573 ctx.xout->mirrors = 0;
3574 ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
3575 sizeof ctx.xout->odp_actions_stub);
3576 ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
3577
3578 ctx.xbridge = xbridge_lookup(xcfg, xin->ofproto);
3579 if (!ctx.xbridge) {
3580 return;
3581 }
3582
3583 ctx.rule = xin->rule;
3584
3585 ctx.base_flow = *flow;
3586 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
3587 ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
3588
3589 flow_wildcards_init_catchall(wc);
3590 memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
3591 memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
3592 memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
3593 if (is_ip_any(flow)) {
3594 wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
3595 }
3596 is_icmp = is_icmpv4(flow) || is_icmpv6(flow);
3597
3598 tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
3599 if (ctx.xbridge->netflow) {
3600 netflow_mask_wc(flow, wc);
3601 }
3602
3603 ctx.recurse = 0;
3604 ctx.resubmits = 0;
3605 ctx.in_group = false;
3606 ctx.orig_skb_priority = flow->skb_priority;
3607 ctx.table_id = 0;
3608 ctx.exit = false;
3609 ctx.use_recirc = false;
3610
3611 if (!xin->ofpacts && !ctx.rule) {
3612 ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
3613 !xin->skip_wildcards ? wc : NULL,
3614 &rule, ctx.xin->xcache != NULL,
3615 ctx.xin->resubmit_stats);
3616 if (ctx.xin->resubmit_stats) {
3617 rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
3618 }
3619 if (ctx.xin->xcache) {
3620 struct xc_entry *entry;
3621
3622 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
3623 entry->u.rule = rule;
3624 }
3625 ctx.rule = rule;
3626 }
3627 xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
3628
3629 if (xin->ofpacts) {
3630 ofpacts = xin->ofpacts;
3631 ofpacts_len = xin->ofpacts_len;
3632 } else if (ctx.rule) {
3633 actions = rule_dpif_get_actions(ctx.rule);
3634 ofpacts = actions->ofpacts;
3635 ofpacts_len = actions->ofpacts_len;
3636 } else {
3637 OVS_NOT_REACHED();
3638 }
3639
3640 ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
3641 ofpbuf_use_stub(&ctx.action_set,
3642 ctx.action_set_stub, sizeof ctx.action_set_stub);
3643
3644 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
3645 /* Do this conditionally because the copy is expensive enough that it
3646 * shows up in profiles. */
3647 orig_flow = *flow;
3648 }
3649
3650 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
3651 switch (ctx.xbridge->frag) {
3652 case OFPC_FRAG_NORMAL:
3653 /* We must pretend that transport ports are unavailable. */
3654 flow->tp_src = ctx.base_flow.tp_src = htons(0);
3655 flow->tp_dst = ctx.base_flow.tp_dst = htons(0);
3656 break;
3657
3658 case OFPC_FRAG_DROP:
3659 return;
3660
3661 case OFPC_FRAG_REASM:
3662 OVS_NOT_REACHED();
3663
3664 case OFPC_FRAG_NX_MATCH:
3665 /* Nothing to do. */
3666 break;
3667
3668 case OFPC_INVALID_TTL_TO_CONTROLLER:
3669 OVS_NOT_REACHED();
3670 }
3671 }
3672
3673 in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
3674 if (in_port && in_port->is_tunnel) {
3675 if (ctx.xin->resubmit_stats) {
3676 netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
3677 if (in_port->bfd) {
3678 bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
3679 }
3680 }
3681 if (ctx.xin->xcache) {
3682 struct xc_entry *entry;
3683
3684 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
3685 entry->u.dev.rx = netdev_ref(in_port->netdev);
3686 entry->u.dev.bfd = bfd_ref(in_port->bfd);
3687 }
3688 }
3689
3690 special = process_special(&ctx, flow, in_port, ctx.xin->packet);
3691 if (special) {
3692 ctx.xout->slow |= special;
3693 } else {
3694 size_t sample_actions_len;
3695
3696 if (flow->in_port.ofp_port
3697 != vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
3698 flow->in_port.ofp_port,
3699 flow->vlan_tci)) {
3700 ctx.base_flow.vlan_tci = 0;
3701 }
3702
3703 add_sflow_action(&ctx);
3704 add_ipfix_action(&ctx);
3705 sample_actions_len = ofpbuf_size(&ctx.xout->odp_actions);
3706
3707 if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
3708 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
3709
3710 /* We've let OFPP_NORMAL and the learning action look at the
3711 * packet, so drop it now if forwarding is disabled. */
3712 if (in_port && !xport_stp_forward_state(in_port)) {
3713 ofpbuf_set_size(&ctx.xout->odp_actions, sample_actions_len);
3714 }
3715 }
3716
3717 if (ofpbuf_size(&ctx.action_set)) {
3718 xlate_action_set(&ctx);
3719 }
3720
3721 if (ctx.xbridge->has_in_band
3722 && in_band_must_output_to_local_port(flow)
3723 && !actions_output_to_local_port(&ctx)) {
3724 compose_output_action(&ctx, OFPP_LOCAL);
3725 }
3726
3727 fix_sflow_action(&ctx);
3728
3729 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
3730 add_mirror_actions(&ctx, &orig_flow);
3731 }
3732 }
3733
3734 if (nl_attr_oversized(ofpbuf_size(&ctx.xout->odp_actions))) {
3735 /* These datapath actions are too big for a Netlink attribute, so we
3736 * can't hand them to the kernel directly. dpif_execute() can execute
3737 * them one by one with help, so just mark the result as SLOW_ACTION to
3738 * prevent the flow from being installed. */
3739 COVERAGE_INC(xlate_actions_oversize);
3740 ctx.xout->slow |= SLOW_ACTION;
3741 }
3742
3743 if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
3744 if (ctx.xin->resubmit_stats) {
3745 mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
3746 ctx.xin->resubmit_stats->n_packets,
3747 ctx.xin->resubmit_stats->n_bytes);
3748 }
3749 if (ctx.xin->xcache) {
3750 struct xc_entry *entry;
3751
3752 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR);
3753 entry->u.mirror.mbridge = mbridge_ref(ctx.xbridge->mbridge);
3754 entry->u.mirror.mirrors = xout->mirrors;
3755 }
3756 }
3757
3758 if (ctx.xbridge->netflow) {
3759 /* Only update netflow if we don't have controller flow. We don't
3760 * report NetFlow expiration messages for such facets because they
3761 * are just part of the control logic for the network, not real
3762 * traffic. */
3763 if (ofpacts_len == 0
3764 || ofpacts->type != OFPACT_CONTROLLER
3765 || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
3766 if (ctx.xin->resubmit_stats) {
3767 netflow_flow_update(ctx.xbridge->netflow, flow,
3768 xout->nf_output_iface,
3769 ctx.xin->resubmit_stats);
3770 }
3771 if (ctx.xin->xcache) {
3772 struct xc_entry *entry;
3773
3774 entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
3775 entry->u.nf.netflow = netflow_ref(ctx.xbridge->netflow);
3776 entry->u.nf.flow = xmemdup(flow, sizeof *flow);
3777 entry->u.nf.iface = xout->nf_output_iface;
3778 }
3779 }
3780 }
3781
3782 ofpbuf_uninit(&ctx.stack);
3783 ofpbuf_uninit(&ctx.action_set);
3784
3785 /* Clear the metadata and register wildcard masks, because we won't
3786 * use non-header fields as part of the cache. */
3787 flow_wildcards_clear_non_packet_fields(wc);
3788
3789 /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow uses
3790 * the low 8 bits of the 16-bit tp_src and tp_dst members to represent
3791 * these fields. The datapath interface, on the other hand, represents
3792 * them with just 8 bits each. This means that if the high 8 bits of the
3793 * masks for these fields somehow become set, then they will get chopped
3794 * off by a round trip through the datapath, and revalidation will spot
3795 * that as an inconsistency and delete the flow. Avoid the problem here by
3796 * making sure that only the low 8 bits of either field can be unwildcarded
3797 * for ICMP.
3798 */
3799 if (is_icmp) {
3800 wc->masks.tp_src &= htons(UINT8_MAX);
3801 wc->masks.tp_dst &= htons(UINT8_MAX);
3802 }
3803 }
3804
3805 /* Sends 'packet' out 'ofport'.
3806 * May modify 'packet'.
3807 * Returns 0 if successful, otherwise a positive errno value. */
3808 int
3809 xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
3810 {
3811 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
3812 struct xport *xport;
3813 struct ofpact_output output;
3814 struct flow flow;
3815
3816 ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
3817 /* Use OFPP_NONE as the in_port to avoid special packet processing. */
3818 flow_extract(packet, NULL, &flow);
3819 flow.in_port.ofp_port = OFPP_NONE;
3820
3821 xport = xport_lookup(xcfg, ofport);
3822 if (!xport) {
3823 return EINVAL;
3824 }
3825 output.port = xport->ofp_port;
3826 output.max_len = 0;
3827
3828 return ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
3829 &output.ofpact, sizeof output,
3830 packet);
3831 }
3832
3833 struct xlate_cache *
3834 xlate_cache_new(void)
3835 {
3836 struct xlate_cache *xcache = xmalloc(sizeof *xcache);
3837
3838 ofpbuf_init(&xcache->entries, 512);
3839 return xcache;
3840 }
3841
3842 static struct xc_entry *
3843 xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type)
3844 {
3845 struct xc_entry *entry;
3846
3847 entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry);
3848 entry->type = type;
3849
3850 return entry;
3851 }
3852
3853 static void
3854 xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
3855 {
3856 if (entry->u.dev.tx) {
3857 netdev_vport_inc_tx(entry->u.dev.tx, stats);
3858 }
3859 if (entry->u.dev.rx) {
3860 netdev_vport_inc_rx(entry->u.dev.rx, stats);
3861 }
3862 if (entry->u.dev.bfd) {
3863 bfd_account_rx(entry->u.dev.bfd, stats);
3864 }
3865 }
3866
3867 static void
3868 xlate_cache_normal(struct ofproto_dpif *ofproto, struct flow *flow, int vlan)
3869 {
3870 struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
3871 struct xbridge *xbridge;
3872 struct xbundle *xbundle;
3873 struct flow_wildcards wc;
3874
3875 xbridge = xbridge_lookup(xcfg, ofproto);
3876 if (!xbridge) {
3877 return;
3878 }
3879
3880 xbundle = lookup_input_bundle(xbridge, flow->in_port.ofp_port, false,
3881 NULL);
3882 if (!xbundle) {
3883 return;
3884 }
3885
3886 update_learning_table(xbridge, flow, &wc, vlan, xbundle);
3887 }
3888
3889 /* Push stats and perform side effects of flow translation. */
3890 void
3891 xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
3892 const struct dpif_flow_stats *stats)
3893 {
3894 struct xc_entry *entry;
3895 struct ofpbuf entries = xcache->entries;
3896
3897 XC_ENTRY_FOR_EACH (entry, entries, xcache) {
3898 switch (entry->type) {
3899 case XC_RULE:
3900 rule_dpif_credit_stats(entry->u.rule, stats);
3901 break;
3902 case XC_BOND:
3903 bond_account(entry->u.bond.bond, entry->u.bond.flow,
3904 entry->u.bond.vid, stats->n_bytes);
3905 break;
3906 case XC_NETDEV:
3907 xlate_cache_netdev(entry, stats);
3908 break;
3909 case XC_NETFLOW:
3910 netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow,
3911 entry->u.nf.iface, stats);
3912 break;
3913 case XC_MIRROR:
3914 mirror_update_stats(entry->u.mirror.mbridge,
3915 entry->u.mirror.mirrors,
3916 stats->n_packets, stats->n_bytes);
3917 break;
3918 case XC_LEARN:
3919 if (may_learn) {
3920 ofproto_dpif_flow_mod(entry->u.learn.ofproto,
3921 entry->u.learn.fm);
3922 }
3923 break;
3924 case XC_NORMAL:
3925 xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
3926 entry->u.normal.vlan);
3927 break;
3928 case XC_FIN_TIMEOUT:
3929 xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
3930 entry->u.fin.idle, entry->u.fin.hard);
3931 break;
3932 case XC_GROUP:
3933 group_dpif_credit_stats(entry->u.group.group, entry->u.group.bucket,
3934 stats);
3935 break;
3936 default:
3937 OVS_NOT_REACHED();
3938 }
3939 }
3940 }
3941
3942 static void
3943 xlate_dev_unref(struct xc_entry *entry)
3944 {
3945 if (entry->u.dev.tx) {
3946 netdev_close(entry->u.dev.tx);
3947 }
3948 if (entry->u.dev.rx) {
3949 netdev_close(entry->u.dev.rx);
3950 }
3951 if (entry->u.dev.bfd) {
3952 bfd_unref(entry->u.dev.bfd);
3953 }
3954 }
3955
3956 static void
3957 xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
3958 {
3959 netflow_flow_clear(netflow, flow);
3960 netflow_unref(netflow);
3961 free(flow);
3962 }
3963
3964 void
3965 xlate_cache_clear(struct xlate_cache *xcache)
3966 {
3967 struct xc_entry *entry;
3968 struct ofpbuf entries;
3969
3970 if (!xcache) {
3971 return;
3972 }
3973
3974 XC_ENTRY_FOR_EACH (entry, entries, xcache) {
3975 switch (entry->type) {
3976 case XC_RULE:
3977 rule_dpif_unref(entry->u.rule);
3978 break;
3979 case XC_BOND:
3980 free(entry->u.bond.flow);
3981 bond_unref(entry->u.bond.bond);
3982 break;
3983 case XC_NETDEV:
3984 xlate_dev_unref(entry);
3985 break;
3986 case XC_NETFLOW:
3987 xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow);
3988 break;
3989 case XC_MIRROR:
3990 mbridge_unref(entry->u.mirror.mbridge);
3991 break;
3992 case XC_LEARN:
3993 free(entry->u.learn.fm);
3994 ofpbuf_delete(entry->u.learn.ofpacts);
3995 break;
3996 case XC_NORMAL:
3997 free(entry->u.normal.flow);
3998 break;
3999 case XC_FIN_TIMEOUT:
4000 /* 'u.fin.rule' is always already held as a XC_RULE, which
4001 * has already released it's reference above. */
4002 break;
4003 case XC_GROUP:
4004 group_dpif_unref(entry->u.group.group);
4005 break;
4006 default:
4007 OVS_NOT_REACHED();
4008 }
4009 }
4010
4011 ofpbuf_clear(&xcache->entries);
4012 }
4013
4014 void
4015 xlate_cache_delete(struct xlate_cache *xcache)
4016 {
4017 xlate_cache_clear(xcache);
4018 ofpbuf_uninit(&xcache->entries);
4019 free(xcache);
4020 }