]>
Commit | Line | Data |
---|---|---|
abe529af | 1 | /* |
e09ee259 | 2 | * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc. |
abe529af BP |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | * you may not use this file except in compliance with the License. | |
6 | * You may obtain a copy of the License at: | |
7 | * | |
8 | * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | * | |
10 | * Unless required by applicable law or agreed to in writing, software | |
11 | * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | * See the License for the specific language governing permissions and | |
14 | * limitations under the License. | |
15 | */ | |
16 | ||
17 | #include <config.h> | |
18 | ||
5bee6e26 | 19 | #include "ofproto/ofproto-provider.h" |
abe529af BP |
20 | |
21 | #include <errno.h> | |
22 | ||
ccc09689 | 23 | #include "bfd.h" |
abe529af | 24 | #include "bond.h" |
daff3353 | 25 | #include "bundle.h" |
abe529af BP |
26 | #include "byte-order.h" |
27 | #include "connmgr.h" | |
28 | #include "coverage.h" | |
29 | #include "cfm.h" | |
30 | #include "dpif.h" | |
31 | #include "dynamic-string.h" | |
32 | #include "fail-open.h" | |
33 | #include "hmapx.h" | |
34 | #include "lacp.h" | |
75a75043 | 35 | #include "learn.h" |
abe529af | 36 | #include "mac-learning.h" |
816fd533 | 37 | #include "meta-flow.h" |
abe529af | 38 | #include "multipath.h" |
0a740f48 | 39 | #include "netdev-vport.h" |
abe529af BP |
40 | #include "netdev.h" |
41 | #include "netlink.h" | |
42 | #include "nx-match.h" | |
43 | #include "odp-util.h" | |
1ac7c9bd | 44 | #include "odp-execute.h" |
abe529af BP |
45 | #include "ofp-util.h" |
46 | #include "ofpbuf.h" | |
f25d0cf3 | 47 | #include "ofp-actions.h" |
31a19d69 | 48 | #include "ofp-parse.h" |
abe529af | 49 | #include "ofp-print.h" |
9d6ac44e | 50 | #include "ofproto-dpif-governor.h" |
29089a54 | 51 | #include "ofproto-dpif-ipfix.h" |
bae473fe | 52 | #include "ofproto-dpif-sflow.h" |
abe529af | 53 | #include "poll-loop.h" |
0d085684 | 54 | #include "simap.h" |
27022416 | 55 | #include "smap.h" |
abe529af | 56 | #include "timer.h" |
b9ad7294 | 57 | #include "tunnel.h" |
6c1491fb | 58 | #include "unaligned.h" |
abe529af BP |
59 | #include "unixctl.h" |
60 | #include "vlan-bitmap.h" | |
61 | #include "vlog.h" | |
62 | ||
63 | VLOG_DEFINE_THIS_MODULE(ofproto_dpif); | |
64 | ||
abe529af | 65 | COVERAGE_DEFINE(ofproto_dpif_expired); |
abe529af BP |
66 | COVERAGE_DEFINE(ofproto_dpif_xlate); |
67 | COVERAGE_DEFINE(facet_changed_rule); | |
abe529af BP |
68 | COVERAGE_DEFINE(facet_revalidate); |
69 | COVERAGE_DEFINE(facet_unexpected); | |
9d6ac44e | 70 | COVERAGE_DEFINE(facet_suppress); |
abe529af | 71 | |
29901626 | 72 | /* Maximum depth of flow table recursion (due to resubmit actions) in a |
abe529af | 73 | * flow translation. */ |
1642690c | 74 | #define MAX_RESUBMIT_RECURSION 64 |
abe529af | 75 | |
9cdaaebe BP |
76 | /* Number of implemented OpenFlow tables. */ |
77 | enum { N_TABLES = 255 }; | |
c57b2226 BP |
78 | enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */ |
79 | BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255); | |
9cdaaebe | 80 | |
abe529af BP |
81 | struct ofport_dpif; |
82 | struct ofproto_dpif; | |
a088a1ff | 83 | struct flow_miss; |
ac35f9c8 | 84 | struct facet; |
abe529af BP |
85 | |
86 | struct rule_dpif { | |
87 | struct rule up; | |
88 | ||
abe529af BP |
89 | /* These statistics: |
90 | * | |
91 | * - Do include packets and bytes from facets that have been deleted or | |
92 | * whose own statistics have been folded into the rule. | |
93 | * | |
94 | * - Do include packets and bytes sent "by hand" that were accounted to | |
95 | * the rule without any facet being involved (this is a rare corner | |
96 | * case in rule_execute()). | |
97 | * | |
98 | * - Do not include packet or bytes that can be obtained from any facet's | |
99 | * packet_count or byte_count member or that can be obtained from the | |
b0f7b9b5 | 100 | * datapath by, e.g., dpif_flow_get() for any subfacet. |
abe529af BP |
101 | */ |
102 | uint64_t packet_count; /* Number of packets received. */ | |
103 | uint64_t byte_count; /* Number of bytes received. */ | |
104 | ||
54a9cbc9 BP |
105 | tag_type tag; /* Caches rule_calculate_tag() result. */ |
106 | ||
abe529af BP |
107 | struct list facets; /* List of "struct facet"s. */ |
108 | }; | |
109 | ||
110 | static struct rule_dpif *rule_dpif_cast(const struct rule *rule) | |
111 | { | |
112 | return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL; | |
113 | } | |
114 | ||
29901626 | 115 | static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *, |
bcd2633a JP |
116 | const struct flow *, |
117 | struct flow_wildcards *wc); | |
c57b2226 BP |
118 | static struct rule_dpif *rule_dpif_lookup__(struct ofproto_dpif *, |
119 | const struct flow *, | |
bcd2633a | 120 | struct flow_wildcards *wc, |
c57b2226 | 121 | uint8_t table); |
c376f9a3 IY |
122 | static struct rule_dpif *rule_dpif_miss_rule(struct ofproto_dpif *ofproto, |
123 | const struct flow *flow); | |
abe529af | 124 | |
7e741ffb | 125 | static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes); |
112bc5f4 BP |
126 | static void rule_credit_stats(struct rule_dpif *, |
127 | const struct dpif_flow_stats *); | |
822d9414 | 128 | static tag_type rule_calculate_tag(const struct flow *, |
5cb7a798 | 129 | const struct minimask *, uint32_t basis); |
b0f7b9b5 BP |
130 | static void rule_invalidate(const struct rule_dpif *); |
131 | ||
abe529af BP |
132 | #define MAX_MIRRORS 32 |
133 | typedef uint32_t mirror_mask_t; | |
134 | #define MIRROR_MASK_C(X) UINT32_C(X) | |
135 | BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS); | |
136 | struct ofmirror { | |
137 | struct ofproto_dpif *ofproto; /* Owning ofproto. */ | |
138 | size_t idx; /* In ofproto's "mirrors" array. */ | |
139 | void *aux; /* Key supplied by ofproto's client. */ | |
140 | char *name; /* Identifier for log messages. */ | |
141 | ||
142 | /* Selection criteria. */ | |
143 | struct hmapx srcs; /* Contains "struct ofbundle *"s. */ | |
144 | struct hmapx dsts; /* Contains "struct ofbundle *"s. */ | |
145 | unsigned long *vlans; /* Bitmap of chosen VLANs, NULL selects all. */ | |
146 | ||
9ba15e2a | 147 | /* Output (exactly one of out == NULL and out_vlan == -1 is true). */ |
abe529af BP |
148 | struct ofbundle *out; /* Output port or NULL. */ |
149 | int out_vlan; /* Output VLAN or -1. */ | |
9ba15e2a | 150 | mirror_mask_t dup_mirrors; /* Bitmap of mirrors with the same output. */ |
9d24de3b JP |
151 | |
152 | /* Counters. */ | |
153 | int64_t packet_count; /* Number of packets sent. */ | |
154 | int64_t byte_count; /* Number of bytes sent. */ | |
abe529af BP |
155 | }; |
156 | ||
157 | static void mirror_destroy(struct ofmirror *); | |
9d24de3b JP |
158 | static void update_mirror_stats(struct ofproto_dpif *ofproto, |
159 | mirror_mask_t mirrors, | |
160 | uint64_t packets, uint64_t bytes); | |
abe529af | 161 | |
abe529af | 162 | struct ofbundle { |
abe529af | 163 | struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */ |
6e492d81 | 164 | struct ofproto_dpif *ofproto; /* Owning ofproto. */ |
abe529af BP |
165 | void *aux; /* Key supplied by ofproto's client. */ |
166 | char *name; /* Identifier for log messages. */ | |
167 | ||
168 | /* Configuration. */ | |
169 | struct list ports; /* Contains "struct ofport"s. */ | |
ecac4ebf | 170 | enum port_vlan_mode vlan_mode; /* VLAN mode */ |
abe529af BP |
171 | int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */ |
172 | unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1. | |
173 | * NULL if all VLANs are trunked. */ | |
174 | struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */ | |
175 | struct bond *bond; /* Nonnull iff more than one port. */ | |
5e9ceccd | 176 | bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */ |
abe529af BP |
177 | |
178 | /* Status. */ | |
9e1fd49b | 179 | bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */ |
abe529af BP |
180 | |
181 | /* Port mirroring info. */ | |
182 | mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */ | |
183 | mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */ | |
184 | mirror_mask_t mirror_out; /* Mirrors that output to this bundle. */ | |
185 | }; | |
186 | ||
187 | static void bundle_remove(struct ofport *); | |
7bde8dd8 | 188 | static void bundle_update(struct ofbundle *); |
abe529af BP |
189 | static void bundle_destroy(struct ofbundle *); |
190 | static void bundle_del_port(struct ofport_dpif *); | |
191 | static void bundle_run(struct ofbundle *); | |
192 | static void bundle_wait(struct ofbundle *); | |
4acbc98d | 193 | static struct ofbundle *lookup_input_bundle(const struct ofproto_dpif *, |
70c2fd56 BP |
194 | uint16_t in_port, bool warn, |
195 | struct ofport_dpif **in_ofportp); | |
abe529af | 196 | |
33158a18 JP |
197 | /* A controller may use OFPP_NONE as the ingress port to indicate that |
198 | * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for | |
199 | * when an input bundle is needed for validation (e.g., mirroring or | |
200 | * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have | |
201 | * any 'port' structs, so care must be taken when dealing with it. */ | |
202 | static struct ofbundle ofpp_none_bundle = { | |
203 | .name = "OFPP_NONE", | |
204 | .vlan_mode = PORT_VLAN_TRUNK | |
205 | }; | |
206 | ||
21f7563c JP |
207 | static void stp_run(struct ofproto_dpif *ofproto); |
208 | static void stp_wait(struct ofproto_dpif *ofproto); | |
851bf71d EJ |
209 | static int set_stp_port(struct ofport *, |
210 | const struct ofproto_port_stp_settings *); | |
21f7563c | 211 | |
5da5ec37 BP |
212 | static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan); |
213 | ||
bbafd73b EJ |
214 | struct xlate_ctx; |
215 | ||
216 | /* Initial values of fields of the packet that may be changed during | |
217 | * flow processing and needed later. */ | |
218 | struct initial_vals { | |
219 | /* This is the value of vlan_tci in the packet as actually received from | |
220 | * dpif. This is the same as the facet's flow.vlan_tci unless the packet | |
221 | * was received via a VLAN splinter. In that case, this value is 0 | |
222 | * (because the packet as actually received from the dpif had no 802.1Q | |
223 | * tag) but the facet's flow.vlan_tci is set to the VLAN that the splinter | |
224 | * represents. | |
225 | * | |
226 | * This member should be removed when the VLAN splinters feature is no | |
227 | * longer needed. */ | |
228 | ovs_be16 vlan_tci; | |
229 | }; | |
230 | ||
231 | struct xlate_out { | |
bcd2633a JP |
232 | /* Wildcards relevant in translation. Any fields that were used to |
233 | * calculate the action must be set for caching and kernel | |
234 | * wildcarding to work. For example, if the flow lookup involved | |
235 | * performing the "normal" action on IPv4 and ARP packets, 'wc' | |
236 | * would have the 'in_port' (always set), 'dl_type' (flow match), | |
237 | * 'vlan_tci' (normal action), and 'dl_dst' (normal action) fields | |
238 | * set. */ | |
239 | struct flow_wildcards wc; | |
240 | ||
bbafd73b EJ |
241 | tag_type tags; /* Tags associated with actions. */ |
242 | enum slow_path_reason slow; /* 0 if fast path may be used. */ | |
243 | bool has_learn; /* Actions include NXAST_LEARN? */ | |
244 | bool has_normal; /* Actions output to OFPP_NORMAL? */ | |
245 | bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */ | |
246 | uint16_t nf_output_iface; /* Output interface index for NetFlow. */ | |
247 | mirror_mask_t mirrors; /* Bitmap of associated mirrors. */ | |
248 | ||
249 | uint64_t odp_actions_stub[256 / 8]; | |
250 | struct ofpbuf odp_actions; | |
251 | }; | |
abe529af | 252 | |
bbafd73b | 253 | struct xlate_in { |
abe529af BP |
254 | struct ofproto_dpif *ofproto; |
255 | ||
256 | /* Flow to which the OpenFlow actions apply. xlate_actions() will modify | |
257 | * this flow when actions change header fields. */ | |
258 | struct flow flow; | |
259 | ||
bbafd73b | 260 | struct initial_vals initial_vals; |
bd85dac1 | 261 | |
abe529af BP |
262 | /* The packet corresponding to 'flow', or a null pointer if we are |
263 | * revalidating without a packet to refer to. */ | |
264 | const struct ofpbuf *packet; | |
265 | ||
3de9590b BP |
266 | /* Should OFPP_NORMAL update the MAC learning table? Should "learn" |
267 | * actions update the flow table? | |
268 | * | |
269 | * We want to update these tables if we are actually processing a packet, | |
270 | * or if we are accounting for packets that the datapath has processed, but | |
271 | * not if we are just revalidating. */ | |
272 | bool may_learn; | |
75a75043 | 273 | |
bbafd73b | 274 | /* The rule initiating translation or NULL. */ |
18b2a258 | 275 | struct rule_dpif *rule; |
54834960 | 276 | |
bbafd73b EJ |
277 | /* The actions to translate. If 'rule' is not NULL, these may be NULL. */ |
278 | const struct ofpact *ofpacts; | |
279 | size_t ofpacts_len; | |
280 | ||
0e553d9c BP |
281 | /* Union of the set of TCP flags seen so far in this flow. (Used only by |
282 | * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules' | |
283 | * timeouts.) */ | |
284 | uint8_t tcp_flags; | |
285 | ||
112bc5f4 BP |
286 | /* If nonnull, flow translation calls this function just before executing a |
287 | * resubmit or OFPP_TABLE action. In addition, disables logging of traces | |
288 | * when the recursion depth is exceeded. | |
289 | * | |
290 | * 'rule' is the rule being submitted into. It will be null if the | |
291 | * resubmit or OFPP_TABLE action didn't find a matching rule. | |
292 | * | |
293 | * This is normally null so the client has to set it manually after | |
bbafd73b | 294 | * calling xlate_in_init(). */ |
f03a84b9 | 295 | void (*resubmit_hook)(struct xlate_ctx *, struct rule_dpif *rule); |
112bc5f4 | 296 | |
479df176 BP |
297 | /* If nonnull, flow translation calls this function to report some |
298 | * significant decision, e.g. to explain why OFPP_NORMAL translation | |
299 | * dropped a packet. */ | |
f03a84b9 | 300 | void (*report_hook)(struct xlate_ctx *, const char *s); |
479df176 | 301 | |
112bc5f4 BP |
302 | /* If nonnull, flow translation credits the specified statistics to each |
303 | * rule reached through a resubmit or OFPP_TABLE action. | |
abe529af BP |
304 | * |
305 | * This is normally null so the client has to set it manually after | |
bbafd73b | 306 | * calling xlate_in_init(). */ |
112bc5f4 | 307 | const struct dpif_flow_stats *resubmit_stats; |
bbafd73b | 308 | }; |
abe529af | 309 | |
bbafd73b EJ |
310 | /* Context used by xlate_actions() and its callees. */ |
311 | struct xlate_ctx { | |
312 | struct xlate_in *xin; | |
313 | struct xlate_out *xout; | |
abe529af | 314 | |
bbafd73b EJ |
315 | struct ofproto_dpif *ofproto; |
316 | ||
317 | /* Flow at the last commit. */ | |
318 | struct flow base_flow; | |
abe529af | 319 | |
bbafd73b EJ |
320 | /* Tunnel IP destination address as received. This is stored separately |
321 | * as the base_flow.tunnel is cleared on init to reflect the datapath | |
322 | * behavior. Used to make sure not to send tunneled output to ourselves, | |
323 | * which might lead to an infinite loop. This could happen easily | |
324 | * if a tunnel is marked as 'ip_remote=flow', and the flow does not | |
325 | * actually set the tun_dst field. */ | |
326 | ovs_be32 orig_tunnel_ip_dst; | |
327 | ||
328 | /* Stack for the push and pop actions. Each stack element is of type | |
329 | * "union mf_subvalue". */ | |
330 | union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)]; | |
331 | struct ofpbuf stack; | |
332 | ||
333 | /* The rule that we are currently translating, or NULL. */ | |
334 | struct rule_dpif *rule; | |
abe529af BP |
335 | |
336 | int recurse; /* Recursion level, via xlate_table_action. */ | |
6a6455e5 | 337 | bool max_resubmit_trigger; /* Recursed too deeply during translation. */ |
deedf7e7 | 338 | uint32_t orig_skb_priority; /* Priority when packet arrived. */ |
29901626 | 339 | uint8_t table_id; /* OpenFlow table ID where flow was found. */ |
6ff686f2 | 340 | uint32_t sflow_n_outputs; /* Number of output ports. */ |
9b56fe13 | 341 | uint32_t sflow_odp_port; /* Output port for composing sFlow action. */ |
6ff686f2 | 342 | uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */ |
848e8809 | 343 | bool exit; /* No further actions should be processed. */ |
abe529af BP |
344 | }; |
345 | ||
bbafd73b EJ |
346 | static void xlate_in_init(struct xlate_in *, struct ofproto_dpif *, |
347 | const struct flow *, const struct initial_vals *, | |
348 | struct rule_dpif *, uint8_t tcp_flags, | |
349 | const struct ofpbuf *); | |
350 | ||
351 | static void xlate_out_uninit(struct xlate_out *); | |
352 | ||
353 | static void xlate_actions(struct xlate_in *, struct xlate_out *); | |
354 | ||
355 | static void xlate_actions_for_side_effects(struct xlate_in *); | |
14f94f9a | 356 | |
f03a84b9 | 357 | static void xlate_table_action(struct xlate_ctx *, uint16_t in_port, |
0a740f48 | 358 | uint8_t table_id, bool may_packet_in); |
abe529af | 359 | |
6a7e895f BP |
360 | static size_t put_userspace_action(const struct ofproto_dpif *, |
361 | struct ofpbuf *odp_actions, | |
362 | const struct flow *, | |
29089a54 RL |
363 | const union user_action_cookie *, |
364 | const size_t); | |
6a7e895f BP |
365 | |
366 | static void compose_slow_path(const struct ofproto_dpif *, const struct flow *, | |
367 | enum slow_path_reason, | |
368 | uint64_t *stub, size_t stub_size, | |
369 | const struct nlattr **actionsp, | |
370 | size_t *actions_lenp); | |
371 | ||
f03a84b9 | 372 | static void xlate_report(struct xlate_ctx *ctx, const char *s); |
479df176 | 373 | |
bcd2633a JP |
374 | static void xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src); |
375 | ||
6a7e895f BP |
376 | /* A subfacet (see "struct subfacet" below) has three possible installation |
377 | * states: | |
378 | * | |
379 | * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the | |
380 | * case just after the subfacet is created, just before the subfacet is | |
381 | * destroyed, or if the datapath returns an error when we try to install a | |
382 | * subfacet. | |
383 | * | |
384 | * - SF_FAST_PATH: The subfacet's actions are installed in the datapath. | |
385 | * | |
386 | * - SF_SLOW_PATH: An action that sends every packet for the subfacet through | |
387 | * ofproto_dpif is installed in the datapath. | |
388 | */ | |
389 | enum subfacet_path { | |
390 | SF_NOT_INSTALLED, /* No datapath flow for this subfacet. */ | |
391 | SF_FAST_PATH, /* Full actions are installed. */ | |
392 | SF_SLOW_PATH, /* Send-to-userspace action is installed. */ | |
393 | }; | |
394 | ||
5f5fbd17 BP |
395 | /* A dpif flow and actions associated with a facet. |
396 | * | |
397 | * See also the large comment on struct facet. */ | |
398 | struct subfacet { | |
399 | /* Owners. */ | |
400 | struct hmap_node hmap_node; /* In struct ofproto_dpif 'subfacets' list. */ | |
401 | struct list list_node; /* In struct facet's 'facets' list. */ | |
402 | struct facet *facet; /* Owning facet. */ | |
04d08d54 | 403 | struct dpif_backer *backer; /* Owning backer. */ |
5f5fbd17 | 404 | |
5f5fbd17 BP |
405 | enum odp_key_fitness key_fitness; |
406 | struct nlattr *key; | |
407 | int key_len; | |
408 | ||
409 | long long int used; /* Time last used; time created if not used. */ | |
655ab909 | 410 | long long int created; /* Time created. */ |
5f5fbd17 BP |
411 | |
412 | uint64_t dp_packet_count; /* Last known packet count in the datapath. */ | |
413 | uint64_t dp_byte_count; /* Last known byte count in the datapath. */ | |
414 | ||
6a7e895f | 415 | enum subfacet_path path; /* Installed in datapath? */ |
5f5fbd17 BP |
416 | }; |
417 | ||
1d85f9e5 JP |
418 | #define SUBFACET_DESTROY_MAX_BATCH 50 |
419 | ||
a088a1ff | 420 | static struct subfacet *subfacet_create(struct facet *, struct flow_miss *miss, |
459b16a1 | 421 | long long int now); |
04d08d54 | 422 | static struct subfacet *subfacet_find(struct dpif_backer *, |
acf60855 | 423 | const struct nlattr *key, size_t key_len, |
9566abf9 | 424 | uint32_t key_hash); |
5f5fbd17 BP |
425 | static void subfacet_destroy(struct subfacet *); |
426 | static void subfacet_destroy__(struct subfacet *); | |
04d08d54 | 427 | static void subfacet_destroy_batch(struct dpif_backer *, |
1d85f9e5 | 428 | struct subfacet **, int n); |
5f5fbd17 BP |
429 | static void subfacet_reset_dp_stats(struct subfacet *, |
430 | struct dpif_flow_stats *); | |
5f5fbd17 BP |
431 | static void subfacet_update_stats(struct subfacet *, |
432 | const struct dpif_flow_stats *); | |
5f5fbd17 | 433 | static int subfacet_install(struct subfacet *, |
4dff9097 EJ |
434 | const struct ofpbuf *odp_actions, |
435 | struct dpif_flow_stats *); | |
5f5fbd17 BP |
436 | static void subfacet_uninstall(struct subfacet *); |
437 | ||
bcd2633a | 438 | /* A unique, non-overlapping instantiation of an OpenFlow flow. |
b0f7b9b5 BP |
439 | * |
440 | * A facet associates a "struct flow", which represents the Open vSwitch | |
bcd2633a JP |
441 | * userspace idea of an exact-match flow, with one or more subfacets. |
442 | * While the facet is created based on an exact-match flow, it is stored | |
443 | * within the ofproto based on the wildcards that could be expressed | |
444 | * based on the flow table and other configuration. (See the 'wc' | |
445 | * description in "struct xlate_out" for more details.) | |
b0f7b9b5 | 446 | * |
bcd2633a JP |
447 | * Each subfacet tracks the datapath's idea of the flow equivalent to |
448 | * the facet. When the kernel module (or other dpif implementation) and | |
449 | * Open vSwitch userspace agree on the definition of a flow key, there | |
450 | * is exactly one subfacet per facet. If the dpif implementation | |
451 | * supports more-specific flow matching than userspace, however, a facet | |
452 | * can have more than one subfacet. Examples include the dpif | |
453 | * implementation not supporting the same wildcards as userspace or some | |
454 | * distinction in flow that userspace simply doesn't understand. | |
455 | * | |
456 | * Flow expiration works in terms of subfacets, so a facet must have at | |
457 | * least one subfacet or it will never expire, leaking memory. */ | |
abe529af | 458 | struct facet { |
b0f7b9b5 BP |
459 | /* Owners. */ |
460 | struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */ | |
461 | struct list list_node; /* In owning rule's 'facets' list. */ | |
462 | struct rule_dpif *rule; /* Owning rule. */ | |
463 | ||
464 | /* Owned data. */ | |
465 | struct list subfacets; | |
abe529af BP |
466 | long long int used; /* Time last used; time created if not used. */ |
467 | ||
b0f7b9b5 | 468 | /* Key. */ |
bcd2633a JP |
469 | struct flow flow; /* Flow of the creating subfacet. */ |
470 | struct cls_rule cr; /* In 'ofproto_dpif's facets classifier. */ | |
b0f7b9b5 | 471 | |
abe529af BP |
472 | /* These statistics: |
473 | * | |
474 | * - Do include packets and bytes sent "by hand", e.g. with | |
475 | * dpif_execute(). | |
476 | * | |
477 | * - Do include packets and bytes that were obtained from the datapath | |
b0f7b9b5 | 478 | * when a subfacet's statistics were reset (e.g. dpif_flow_put() with |
abe529af | 479 | * DPIF_FP_ZERO_STATS). |
b0f7b9b5 BP |
480 | * |
481 | * - Do not include packets or bytes that can be obtained from the | |
482 | * datapath for any existing subfacet. | |
abe529af BP |
483 | */ |
484 | uint64_t packet_count; /* Number of packets received. */ | |
485 | uint64_t byte_count; /* Number of bytes received. */ | |
486 | ||
b0f7b9b5 | 487 | /* Resubmit statistics. */ |
9d24de3b JP |
488 | uint64_t prev_packet_count; /* Number of packets from last stats push. */ |
489 | uint64_t prev_byte_count; /* Number of bytes from last stats push. */ | |
490 | long long int prev_used; /* Used time from last stats push. */ | |
abe529af | 491 | |
b0f7b9b5 | 492 | /* Accounting. */ |
907a4c5e | 493 | uint64_t accounted_bytes; /* Bytes processed by facet_account(). */ |
b0f7b9b5 | 494 | struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */ |
0e553d9c | 495 | uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */ |
abe529af | 496 | |
bbafd73b | 497 | struct xlate_out xout; |
4dff9097 EJ |
498 | |
499 | /* Initial values of the packet that may be needed later. */ | |
500 | struct initial_vals initial_vals; | |
501 | ||
26cd7e34 BP |
502 | /* Storage for a single subfacet, to reduce malloc() time and space |
503 | * overhead. (A facet always has at least one subfacet and in the common | |
bcd516b7 JP |
504 | * case has exactly one subfacet. However, 'one_subfacet' may not |
505 | * always be valid, since it could have been removed after newer | |
506 | * subfacets were pushed onto the 'subfacets' list.) */ | |
26cd7e34 | 507 | struct subfacet one_subfacet; |
6cf474d7 EJ |
508 | |
509 | long long int learn_rl; /* Rate limiter for facet_learn(). */ | |
abe529af BP |
510 | }; |
511 | ||
bcd2633a JP |
512 | static struct facet *facet_create(const struct flow_miss *, struct rule_dpif *, |
513 | struct xlate_out *, | |
514 | struct dpif_flow_stats *); | |
15baa734 | 515 | static void facet_remove(struct facet *); |
abe529af BP |
516 | static void facet_free(struct facet *); |
517 | ||
bcd2633a | 518 | static struct facet *facet_find(struct ofproto_dpif *, const struct flow *); |
abe529af | 519 | static struct facet *facet_lookup_valid(struct ofproto_dpif *, |
bcd2633a | 520 | const struct flow *); |
5bf64ade | 521 | static bool facet_revalidate(struct facet *); |
6814e51f | 522 | static bool facet_check_consistency(struct facet *); |
abe529af | 523 | |
15baa734 | 524 | static void facet_flush_stats(struct facet *); |
abe529af | 525 | |
bbb5d219 | 526 | static void facet_reset_counters(struct facet *); |
9dfb1f78 | 527 | static void facet_push_stats(struct facet *, bool may_learn); |
3de9590b BP |
528 | static void facet_learn(struct facet *); |
529 | static void facet_account(struct facet *); | |
8844e035 | 530 | static void push_all_stats(void); |
abe529af BP |
531 | |
532 | static bool facet_is_controller_flow(struct facet *); | |
533 | ||
abe529af | 534 | struct ofport_dpif { |
acf60855 | 535 | struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */ |
abe529af BP |
536 | struct ofport up; |
537 | ||
538 | uint32_t odp_port; | |
539 | struct ofbundle *bundle; /* Bundle that contains this port, if any. */ | |
540 | struct list bundle_node; /* In struct ofbundle's "ports" list. */ | |
541 | struct cfm *cfm; /* Connectivity Fault Management, if any. */ | |
ccc09689 | 542 | struct bfd *bfd; /* BFD, if any. */ |
abe529af | 543 | tag_type tag; /* Tag associated with this port. */ |
015e08bc | 544 | bool may_enable; /* May be enabled in bonds. */ |
3e5b3fdb | 545 | long long int carrier_seq; /* Carrier status changes. */ |
b9ad7294 | 546 | struct tnl_port *tnl_port; /* Tunnel handle, or null. */ |
21f7563c | 547 | |
52a90c29 | 548 | /* Spanning tree. */ |
21f7563c JP |
549 | struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */ |
550 | enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */ | |
551 | long long int stp_state_entered; | |
8b36f51e EJ |
552 | |
553 | struct hmap priorities; /* Map of attached 'priority_to_dscp's. */ | |
52a90c29 BP |
554 | |
555 | /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.) | |
556 | * | |
557 | * This is deprecated. It is only for compatibility with broken device | |
558 | * drivers in old versions of Linux that do not properly support VLANs when | |
559 | * VLAN devices are not used. When broken device drivers are no longer in | |
560 | * widespread use, we will delete these interfaces. */ | |
561 | uint16_t realdev_ofp_port; | |
562 | int vlandev_vid; | |
8b36f51e EJ |
563 | }; |
564 | ||
565 | /* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from | |
566 | * 'priority' (the datapath's term for QoS queue) to the dscp bits which all | |
567 | * traffic egressing the 'ofport' with that priority should be marked with. */ | |
568 | struct priority_to_dscp { | |
569 | struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */ | |
570 | uint32_t priority; /* Priority of this queue (see struct flow). */ | |
571 | ||
572 | uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */ | |
abe529af BP |
573 | }; |
574 | ||
52a90c29 BP |
575 | /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.) |
576 | * | |
577 | * This is deprecated. It is only for compatibility with broken device drivers | |
578 | * in old versions of Linux that do not properly support VLANs when VLAN | |
579 | * devices are not used. When broken device drivers are no longer in | |
580 | * widespread use, we will delete these interfaces. */ | |
581 | struct vlan_splinter { | |
582 | struct hmap_node realdev_vid_node; | |
583 | struct hmap_node vlandev_node; | |
584 | uint16_t realdev_ofp_port; | |
585 | uint16_t vlandev_ofp_port; | |
586 | int vid; | |
587 | }; | |
588 | ||
deea1200 AW |
589 | static uint16_t vsp_realdev_to_vlandev(const struct ofproto_dpif *, |
590 | uint16_t realdev_ofp_port, | |
591 | ovs_be16 vlan_tci); | |
b98d8985 | 592 | static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *); |
52a90c29 BP |
593 | static void vsp_remove(struct ofport_dpif *); |
594 | static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid); | |
595 | ||
e1b1d06a JP |
596 | static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif *, |
597 | uint16_t ofp_port); | |
598 | static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif *, | |
599 | uint32_t odp_port); | |
600 | ||
abe529af BP |
601 | static struct ofport_dpif * |
602 | ofport_dpif_cast(const struct ofport *ofport) | |
603 | { | |
abe529af BP |
604 | return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL; |
605 | } | |
606 | ||
607 | static void port_run(struct ofport_dpif *); | |
0aa66d6e | 608 | static void port_run_fast(struct ofport_dpif *); |
abe529af | 609 | static void port_wait(struct ofport_dpif *); |
8aee94b6 | 610 | static int set_bfd(struct ofport *, const struct smap *); |
a5610457 | 611 | static int set_cfm(struct ofport *, const struct cfm_settings *); |
8b36f51e | 612 | static void ofport_clear_priorities(struct ofport_dpif *); |
8fa4d1d0 | 613 | static void run_fast_rl(void); |
abe529af | 614 | |
7ee20df1 BP |
615 | struct dpif_completion { |
616 | struct list list_node; | |
617 | struct ofoperation *op; | |
618 | }; | |
619 | ||
54a9cbc9 BP |
620 | /* Extra information about a classifier table. |
621 | * Currently used just for optimized flow revalidation. */ | |
622 | struct table_dpif { | |
623 | /* If either of these is nonnull, then this table has a form that allows | |
624 | * flows to be tagged to avoid revalidating most flows for the most common | |
625 | * kinds of flow table changes. */ | |
626 | struct cls_table *catchall_table; /* Table that wildcards all fields. */ | |
627 | struct cls_table *other_table; /* Table with any other wildcard set. */ | |
628 | uint32_t basis; /* Keeps each table's tags separate. */ | |
629 | }; | |
630 | ||
3c4a309c BP |
631 | /* Reasons that we might need to revalidate every facet, and corresponding |
632 | * coverage counters. | |
633 | * | |
634 | * A value of 0 means that there is no need to revalidate. | |
635 | * | |
636 | * It would be nice to have some cleaner way to integrate with coverage | |
637 | * counters, but with only a few reasons I guess this is good enough for | |
638 | * now. */ | |
639 | enum revalidate_reason { | |
640 | REV_RECONFIGURE = 1, /* Switch configuration changed. */ | |
641 | REV_STP, /* Spanning tree protocol port status change. */ | |
642 | REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/ | |
643 | REV_FLOW_TABLE, /* Flow table changed. */ | |
644 | REV_INCONSISTENCY /* Facet self-check failed. */ | |
645 | }; | |
646 | COVERAGE_DEFINE(rev_reconfigure); | |
647 | COVERAGE_DEFINE(rev_stp); | |
648 | COVERAGE_DEFINE(rev_port_toggled); | |
649 | COVERAGE_DEFINE(rev_flow_table); | |
650 | COVERAGE_DEFINE(rev_inconsistency); | |
651 | ||
8f73d537 EJ |
652 | /* Drop keys are odp flow keys which have drop flows installed in the kernel. |
653 | * These are datapath flows which have no associated ofproto, if they did we | |
654 | * would use facets. */ | |
655 | struct drop_key { | |
656 | struct hmap_node hmap_node; | |
657 | struct nlattr *key; | |
658 | size_t key_len; | |
659 | }; | |
660 | ||
dc54ef36 EJ |
661 | struct avg_subfacet_rates { |
662 | double add_rate; /* Moving average of new flows created per minute. */ | |
663 | double del_rate; /* Moving average of flows deleted per minute. */ | |
664 | }; | |
665 | ||
acf60855 JP |
666 | /* All datapaths of a given type share a single dpif backer instance. */ |
667 | struct dpif_backer { | |
668 | char *type; | |
669 | int refcount; | |
670 | struct dpif *dpif; | |
671 | struct timer next_expiration; | |
672 | struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */ | |
2cc3c58e | 673 | |
7d82ab2e | 674 | struct simap tnl_backers; /* Set of dpif ports backing tunnels. */ |
b9ad7294 | 675 | |
2cc3c58e EJ |
676 | /* Facet revalidation flags applying to facets which use this backer. */ |
677 | enum revalidate_reason need_revalidate; /* Revalidate every facet. */ | |
678 | struct tag_set revalidate_set; /* Revalidate only matching facets. */ | |
8f73d537 EJ |
679 | |
680 | struct hmap drop_keys; /* Set of dropped odp keys. */ | |
40358701 | 681 | bool recv_set_enable; /* Enables or disables receiving packets. */ |
dc54ef36 | 682 | |
04d08d54 EJ |
683 | struct hmap subfacets; |
684 | struct governor *governor; | |
685 | ||
dc54ef36 EJ |
686 | /* Subfacet statistics. |
687 | * | |
688 | * These keep track of the total number of subfacets added and deleted and | |
689 | * flow life span. They are useful for computing the flow rates stats | |
690 | * exposed via "ovs-appctl dpif/show". The goal is to learn about | |
691 | * traffic patterns in ways that we can use later to improve Open vSwitch | |
692 | * performance in new situations. */ | |
693 | long long int created; /* Time when it is created. */ | |
694 | unsigned max_n_subfacet; /* Maximum number of flows */ | |
695 | unsigned avg_n_subfacet; /* Average number of flows. */ | |
696 | long long int avg_subfacet_life; /* Average life span of subfacets. */ | |
697 | ||
698 | /* The average number of subfacets... */ | |
699 | struct avg_subfacet_rates hourly; /* ...over the last hour. */ | |
700 | struct avg_subfacet_rates daily; /* ...over the last day. */ | |
701 | struct avg_subfacet_rates lifetime; /* ...over the switch lifetime. */ | |
702 | long long int last_minute; /* Last time 'hourly' was updated. */ | |
703 | ||
704 | /* Number of subfacets added or deleted since 'last_minute'. */ | |
705 | unsigned subfacet_add_count; | |
706 | unsigned subfacet_del_count; | |
707 | ||
708 | /* Number of subfacets added or deleted from 'created' to 'last_minute.' */ | |
709 | unsigned long long int total_subfacet_add_count; | |
710 | unsigned long long int total_subfacet_del_count; | |
acf60855 JP |
711 | }; |
712 | ||
713 | /* All existing ofproto_backer instances, indexed by ofproto->up.type. */ | |
714 | static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers); | |
715 | ||
8f73d537 | 716 | static void drop_key_clear(struct dpif_backer *); |
acf60855 JP |
717 | static struct ofport_dpif * |
718 | odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port); | |
dc54ef36 | 719 | static void update_moving_averages(struct dpif_backer *backer); |
735d7efb | 720 | |
abe529af | 721 | struct ofproto_dpif { |
b44a10b7 | 722 | struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */ |
abe529af | 723 | struct ofproto up; |
acf60855 | 724 | struct dpif_backer *backer; |
abe529af | 725 | |
c57b2226 BP |
726 | /* Special OpenFlow rules. */ |
727 | struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */ | |
728 | struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */ | |
7fd51d39 | 729 | struct rule_dpif *drop_frags_rule; /* Used in OFPC_FRAG_DROP mode. */ |
c57b2226 | 730 | |
abe529af BP |
731 | /* Bridging. */ |
732 | struct netflow *netflow; | |
bae473fe | 733 | struct dpif_sflow *sflow; |
29089a54 | 734 | struct dpif_ipfix *ipfix; |
abe529af BP |
735 | struct hmap bundles; /* Contains "struct ofbundle"s. */ |
736 | struct mac_learning *ml; | |
737 | struct ofmirror *mirrors[MAX_MIRRORS]; | |
ccb7c863 | 738 | bool has_mirrors; |
abe529af BP |
739 | bool has_bonded_bundles; |
740 | ||
abe529af | 741 | /* Facets. */ |
bcd2633a | 742 | struct classifier facets; /* Contains 'struct facet's. */ |
4d2a0f39 | 743 | long long int consistency_rl; |
54a9cbc9 BP |
744 | |
745 | /* Revalidation. */ | |
746 | struct table_dpif tables[N_TABLES]; | |
7ee20df1 BP |
747 | |
748 | /* Support for debugging async flow mods. */ | |
749 | struct list completions; | |
daff3353 EJ |
750 | |
751 | bool has_bundle_action; /* True when the first bundle action appears. */ | |
6527c598 PS |
752 | struct netdev_stats stats; /* To account packets generated and consumed in |
753 | * userspace. */ | |
21f7563c JP |
754 | |
755 | /* Spanning tree. */ | |
756 | struct stp *stp; | |
757 | long long int stp_last_tick; | |
52a90c29 BP |
758 | |
759 | /* VLAN splinters. */ | |
760 | struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */ | |
761 | struct hmap vlandev_map; /* vlandev -> (realdev,vid). */ | |
e1b1d06a | 762 | |
acf60855 | 763 | /* Ports. */ |
0a740f48 EJ |
764 | struct sset ports; /* Set of standard port names. */ |
765 | struct sset ghost_ports; /* Ports with no datapath port. */ | |
acf60855 JP |
766 | struct sset port_poll_set; /* Queued names for port_poll() reply. */ |
767 | int port_poll_errno; /* Last errno for port_poll() reply. */ | |
735d7efb AZ |
768 | |
769 | /* Per ofproto's dpif stats. */ | |
770 | uint64_t n_hit; | |
771 | uint64_t n_missed; | |
abe529af BP |
772 | }; |
773 | ||
7ee20df1 BP |
774 | /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only |
775 | * for debugging the asynchronous flow_mod implementation.) */ | |
776 | static bool clogged; | |
777 | ||
b44a10b7 BP |
778 | /* All existing ofproto_dpif instances, indexed by ->up.name. */ |
779 | static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs); | |
780 | ||
abe529af BP |
781 | static void ofproto_dpif_unixctl_init(void); |
782 | ||
783 | static struct ofproto_dpif * | |
784 | ofproto_dpif_cast(const struct ofproto *ofproto) | |
785 | { | |
cb22974d | 786 | ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class); |
abe529af BP |
787 | return CONTAINER_OF(ofproto, struct ofproto_dpif, up); |
788 | } | |
789 | ||
4acbc98d | 790 | static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *, |
abe529af | 791 | uint16_t ofp_port); |
4acbc98d | 792 | static struct ofport_dpif *get_odp_port(const struct ofproto_dpif *, |
abe529af | 793 | uint32_t odp_port); |
6a6455e5 | 794 | static void ofproto_trace(struct ofproto_dpif *, const struct flow *, |
14f94f9a JP |
795 | const struct ofpbuf *, |
796 | const struct initial_vals *, struct ds *); | |
abe529af BP |
797 | |
798 | /* Packet processing. */ | |
bcd2633a JP |
799 | static void update_learning_table(struct ofproto_dpif *, const struct flow *, |
800 | struct flow_wildcards *, int vlan, | |
abe529af | 801 | struct ofbundle *); |
501f8d1f BP |
802 | /* Upcalls. */ |
803 | #define FLOW_MISS_MAX_BATCH 50 | |
acf60855 | 804 | static int handle_upcalls(struct dpif_backer *, unsigned int max_batch); |
abe529af BP |
805 | |
806 | /* Flow expiration. */ | |
acf60855 | 807 | static int expire(struct dpif_backer *); |
abe529af | 808 | |
6fca1ffb BP |
809 | /* NetFlow. */ |
810 | static void send_netflow_active_timeouts(struct ofproto_dpif *); | |
811 | ||
abe529af | 812 | /* Utilities. */ |
52a90c29 | 813 | static int send_packet(const struct ofport_dpif *, struct ofpbuf *packet); |
6a7d1a39 BP |
814 | static size_t compose_sflow_action(const struct ofproto_dpif *, |
815 | struct ofpbuf *odp_actions, | |
816 | const struct flow *, uint32_t odp_port); | |
29089a54 RL |
817 | static void compose_ipfix_action(const struct ofproto_dpif *, |
818 | struct ofpbuf *odp_actions, | |
819 | const struct flow *); | |
f03a84b9 | 820 | static void add_mirror_actions(struct xlate_ctx *ctx, |
c06bba01 | 821 | const struct flow *flow); |
abe529af BP |
822 | /* Global variables. */ |
823 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
acf60855 JP |
824 | |
825 | /* Initial mappings of port to bridge mappings. */ | |
826 | static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports); | |
abe529af BP |
827 | \f |
828 | /* Factory functions. */ | |
829 | ||
b0408fca | 830 | static void |
acf60855 | 831 | init(const struct shash *iface_hints) |
b0408fca | 832 | { |
acf60855 JP |
833 | struct shash_node *node; |
834 | ||
835 | /* Make a local copy, since we don't own 'iface_hints' elements. */ | |
836 | SHASH_FOR_EACH(node, iface_hints) { | |
837 | const struct iface_hint *orig_hint = node->data; | |
838 | struct iface_hint *new_hint = xmalloc(sizeof *new_hint); | |
839 | ||
840 | new_hint->br_name = xstrdup(orig_hint->br_name); | |
841 | new_hint->br_type = xstrdup(orig_hint->br_type); | |
842 | new_hint->ofp_port = orig_hint->ofp_port; | |
843 | ||
844 | shash_add(&init_ofp_ports, node->name, new_hint); | |
845 | } | |
b0408fca JP |
846 | } |
847 | ||
abe529af BP |
848 | static void |
849 | enumerate_types(struct sset *types) | |
850 | { | |
851 | dp_enumerate_types(types); | |
852 | } | |
853 | ||
854 | static int | |
855 | enumerate_names(const char *type, struct sset *names) | |
856 | { | |
acf60855 JP |
857 | struct ofproto_dpif *ofproto; |
858 | ||
859 | sset_clear(names); | |
860 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
861 | if (strcmp(type, ofproto->up.type)) { | |
862 | continue; | |
863 | } | |
864 | sset_add(names, ofproto->up.name); | |
865 | } | |
866 | ||
867 | return 0; | |
abe529af BP |
868 | } |
869 | ||
870 | static int | |
871 | del(const char *type, const char *name) | |
872 | { | |
873 | struct dpif *dpif; | |
874 | int error; | |
875 | ||
876 | error = dpif_open(name, type, &dpif); | |
877 | if (!error) { | |
878 | error = dpif_delete(dpif); | |
879 | dpif_close(dpif); | |
880 | } | |
881 | return error; | |
882 | } | |
883 | \f | |
0aeaabc8 JP |
884 | static const char * |
885 | port_open_type(const char *datapath_type, const char *port_type) | |
886 | { | |
887 | return dpif_port_open_type(datapath_type, port_type); | |
888 | } | |
889 | ||
acf60855 JP |
890 | /* Type functions. */ |
891 | ||
476cb42a BP |
892 | static struct ofproto_dpif * |
893 | lookup_ofproto_dpif_by_port_name(const char *name) | |
894 | { | |
895 | struct ofproto_dpif *ofproto; | |
896 | ||
897 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
898 | if (sset_contains(&ofproto->ports, name)) { | |
899 | return ofproto; | |
900 | } | |
901 | } | |
902 | ||
903 | return NULL; | |
904 | } | |
905 | ||
acf60855 JP |
906 | static int |
907 | type_run(const char *type) | |
908 | { | |
e20b5746 | 909 | static long long int push_timer = LLONG_MIN; |
acf60855 JP |
910 | struct dpif_backer *backer; |
911 | char *devname; | |
912 | int error; | |
913 | ||
914 | backer = shash_find_data(&all_dpif_backers, type); | |
915 | if (!backer) { | |
916 | /* This is not necessarily a problem, since backers are only | |
917 | * created on demand. */ | |
918 | return 0; | |
919 | } | |
920 | ||
921 | dpif_run(backer->dpif); | |
922 | ||
e20b5746 EJ |
923 | /* The most natural place to push facet statistics is when they're pulled |
924 | * from the datapath. However, when there are many flows in the datapath, | |
925 | * this expensive operation can occur so frequently, that it reduces our | |
926 | * ability to quickly set up flows. To reduce the cost, we push statistics | |
927 | * here instead. */ | |
928 | if (time_msec() > push_timer) { | |
929 | push_timer = time_msec() + 2000; | |
930 | push_all_stats(); | |
931 | } | |
932 | ||
40358701 GS |
933 | /* If vswitchd started with other_config:flow_restore_wait set as "true", |
934 | * and the configuration has now changed to "false", enable receiving | |
935 | * packets from the datapath. */ | |
936 | if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) { | |
937 | backer->recv_set_enable = true; | |
938 | ||
939 | error = dpif_recv_set(backer->dpif, backer->recv_set_enable); | |
940 | if (error) { | |
941 | VLOG_ERR("Failed to enable receiving packets in dpif."); | |
942 | return error; | |
943 | } | |
944 | dpif_flow_flush(backer->dpif); | |
945 | backer->need_revalidate = REV_RECONFIGURE; | |
946 | } | |
947 | ||
2cc3c58e EJ |
948 | if (backer->need_revalidate |
949 | || !tag_set_is_empty(&backer->revalidate_set)) { | |
950 | struct tag_set revalidate_set = backer->revalidate_set; | |
951 | bool need_revalidate = backer->need_revalidate; | |
952 | struct ofproto_dpif *ofproto; | |
a614d823 KM |
953 | struct simap_node *node; |
954 | struct simap tmp_backers; | |
955 | ||
956 | /* Handle tunnel garbage collection. */ | |
957 | simap_init(&tmp_backers); | |
958 | simap_swap(&backer->tnl_backers, &tmp_backers); | |
959 | ||
960 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
961 | struct ofport_dpif *iter; | |
962 | ||
963 | if (backer != ofproto->backer) { | |
964 | continue; | |
965 | } | |
966 | ||
967 | HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) { | |
3aa30359 | 968 | char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; |
a614d823 KM |
969 | const char *dp_port; |
970 | ||
971 | if (!iter->tnl_port) { | |
972 | continue; | |
973 | } | |
974 | ||
3aa30359 BP |
975 | dp_port = netdev_vport_get_dpif_port(iter->up.netdev, |
976 | namebuf, sizeof namebuf); | |
a614d823 KM |
977 | node = simap_find(&tmp_backers, dp_port); |
978 | if (node) { | |
979 | simap_put(&backer->tnl_backers, dp_port, node->data); | |
980 | simap_delete(&tmp_backers, node); | |
981 | node = simap_find(&backer->tnl_backers, dp_port); | |
982 | } else { | |
983 | node = simap_find(&backer->tnl_backers, dp_port); | |
984 | if (!node) { | |
985 | uint32_t odp_port = UINT32_MAX; | |
986 | ||
987 | if (!dpif_port_add(backer->dpif, iter->up.netdev, | |
988 | &odp_port)) { | |
989 | simap_put(&backer->tnl_backers, dp_port, odp_port); | |
990 | node = simap_find(&backer->tnl_backers, dp_port); | |
991 | } | |
992 | } | |
993 | } | |
994 | ||
995 | iter->odp_port = node ? node->data : OVSP_NONE; | |
996 | if (tnl_port_reconfigure(&iter->up, iter->odp_port, | |
997 | &iter->tnl_port)) { | |
998 | backer->need_revalidate = REV_RECONFIGURE; | |
999 | } | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | SIMAP_FOR_EACH (node, &tmp_backers) { | |
1004 | dpif_port_del(backer->dpif, node->data); | |
1005 | } | |
1006 | simap_destroy(&tmp_backers); | |
2cc3c58e EJ |
1007 | |
1008 | switch (backer->need_revalidate) { | |
1009 | case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break; | |
1010 | case REV_STP: COVERAGE_INC(rev_stp); break; | |
1011 | case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break; | |
1012 | case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break; | |
1013 | case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break; | |
1014 | } | |
1015 | ||
8f73d537 EJ |
1016 | if (backer->need_revalidate) { |
1017 | /* Clear the drop_keys in case we should now be accepting some | |
1018 | * formerly dropped flows. */ | |
1019 | drop_key_clear(backer); | |
1020 | } | |
1021 | ||
f728af2e BP |
1022 | /* Clear the revalidation flags. */ |
1023 | tag_set_init(&backer->revalidate_set); | |
1024 | backer->need_revalidate = 0; | |
1025 | ||
2cc3c58e | 1026 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { |
f231418e | 1027 | struct facet *facet, *next; |
bcd2633a | 1028 | struct cls_cursor cursor; |
2cc3c58e EJ |
1029 | |
1030 | if (ofproto->backer != backer) { | |
1031 | continue; | |
1032 | } | |
1033 | ||
bcd2633a JP |
1034 | cls_cursor_init(&cursor, &ofproto->facets, NULL); |
1035 | CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) { | |
2cc3c58e | 1036 | if (need_revalidate |
bbafd73b | 1037 | || tag_set_intersects(&revalidate_set, facet->xout.tags)) { |
2cc3c58e | 1038 | facet_revalidate(facet); |
8fa4d1d0 | 1039 | run_fast_rl(); |
2cc3c58e EJ |
1040 | } |
1041 | } | |
1042 | } | |
2cc3c58e EJ |
1043 | } |
1044 | ||
40358701 GS |
1045 | if (!backer->recv_set_enable) { |
1046 | /* Wake up before a max of 1000ms. */ | |
1047 | timer_set_duration(&backer->next_expiration, 1000); | |
1048 | } else if (timer_expired(&backer->next_expiration)) { | |
acf60855 JP |
1049 | int delay = expire(backer); |
1050 | timer_set_duration(&backer->next_expiration, delay); | |
1051 | } | |
1052 | ||
1053 | /* Check for port changes in the dpif. */ | |
1054 | while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) { | |
476cb42a | 1055 | struct ofproto_dpif *ofproto; |
acf60855 JP |
1056 | struct dpif_port port; |
1057 | ||
1058 | /* Don't report on the datapath's device. */ | |
1059 | if (!strcmp(devname, dpif_base_name(backer->dpif))) { | |
c83b89ab | 1060 | goto next; |
acf60855 JP |
1061 | } |
1062 | ||
b9ad7294 EJ |
1063 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, |
1064 | &all_ofproto_dpifs) { | |
7d82ab2e | 1065 | if (simap_contains(&ofproto->backer->tnl_backers, devname)) { |
b9ad7294 EJ |
1066 | goto next; |
1067 | } | |
1068 | } | |
1069 | ||
476cb42a | 1070 | ofproto = lookup_ofproto_dpif_by_port_name(devname); |
acf60855 JP |
1071 | if (dpif_port_query_by_name(backer->dpif, devname, &port)) { |
1072 | /* The port was removed. If we know the datapath, | |
1073 | * report it through poll_set(). If we don't, it may be | |
1074 | * notifying us of a removal we initiated, so ignore it. | |
1075 | * If there's a pending ENOBUFS, let it stand, since | |
1076 | * everything will be reevaluated. */ | |
1077 | if (ofproto && ofproto->port_poll_errno != ENOBUFS) { | |
1078 | sset_add(&ofproto->port_poll_set, devname); | |
1079 | ofproto->port_poll_errno = 0; | |
1080 | } | |
acf60855 JP |
1081 | } else if (!ofproto) { |
1082 | /* The port was added, but we don't know with which | |
1083 | * ofproto we should associate it. Delete it. */ | |
1084 | dpif_port_del(backer->dpif, port.port_no); | |
1085 | } | |
5b5e6a4c | 1086 | dpif_port_destroy(&port); |
acf60855 | 1087 | |
c83b89ab | 1088 | next: |
acf60855 JP |
1089 | free(devname); |
1090 | } | |
1091 | ||
1092 | if (error != EAGAIN) { | |
1093 | struct ofproto_dpif *ofproto; | |
1094 | ||
1095 | /* There was some sort of error, so propagate it to all | |
1096 | * ofprotos that use this backer. */ | |
1097 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, | |
1098 | &all_ofproto_dpifs) { | |
1099 | if (ofproto->backer == backer) { | |
1100 | sset_clear(&ofproto->port_poll_set); | |
1101 | ofproto->port_poll_errno = error; | |
1102 | } | |
1103 | } | |
1104 | } | |
1105 | ||
04d08d54 EJ |
1106 | if (backer->governor) { |
1107 | size_t n_subfacets; | |
1108 | ||
1109 | governor_run(backer->governor); | |
1110 | ||
1111 | /* If the governor has shrunk to its minimum size and the number of | |
1112 | * subfacets has dwindled, then drop the governor entirely. | |
1113 | * | |
1114 | * For hysteresis, the number of subfacets to drop the governor is | |
1115 | * smaller than the number needed to trigger its creation. */ | |
1116 | n_subfacets = hmap_count(&backer->subfacets); | |
1117 | if (n_subfacets * 4 < flow_eviction_threshold | |
1118 | && governor_is_idle(backer->governor)) { | |
1119 | governor_destroy(backer->governor); | |
1120 | backer->governor = NULL; | |
1121 | } | |
1122 | } | |
1123 | ||
acf60855 JP |
1124 | return 0; |
1125 | } | |
1126 | ||
1127 | static int | |
8fa4d1d0 | 1128 | dpif_backer_run_fast(struct dpif_backer *backer, int max_batch) |
acf60855 | 1129 | { |
acf60855 JP |
1130 | unsigned int work; |
1131 | ||
40358701 GS |
1132 | /* If recv_set_enable is false, we should not handle upcalls. */ |
1133 | if (!backer->recv_set_enable) { | |
1134 | return 0; | |
1135 | } | |
1136 | ||
acf60855 JP |
1137 | /* Handle one or more batches of upcalls, until there's nothing left to do |
1138 | * or until we do a fixed total amount of work. | |
1139 | * | |
1140 | * We do work in batches because it can be much cheaper to set up a number | |
1141 | * of flows and fire off their patches all at once. We do multiple batches | |
1142 | * because in some cases handling a packet can cause another packet to be | |
1143 | * queued almost immediately as part of the return flow. Both | |
1144 | * optimizations can make major improvements on some benchmarks and | |
1145 | * presumably for real traffic as well. */ | |
1146 | work = 0; | |
8fa4d1d0 EJ |
1147 | while (work < max_batch) { |
1148 | int retval = handle_upcalls(backer, max_batch - work); | |
acf60855 JP |
1149 | if (retval <= 0) { |
1150 | return -retval; | |
1151 | } | |
1152 | work += retval; | |
1153 | } | |
1154 | ||
1155 | return 0; | |
1156 | } | |
1157 | ||
8fa4d1d0 EJ |
1158 | static int |
1159 | type_run_fast(const char *type) | |
1160 | { | |
1161 | struct dpif_backer *backer; | |
1162 | ||
1163 | backer = shash_find_data(&all_dpif_backers, type); | |
1164 | if (!backer) { | |
1165 | /* This is not necessarily a problem, since backers are only | |
1166 | * created on demand. */ | |
1167 | return 0; | |
1168 | } | |
1169 | ||
1170 | return dpif_backer_run_fast(backer, FLOW_MISS_MAX_BATCH); | |
1171 | } | |
1172 | ||
1173 | static void | |
1174 | run_fast_rl(void) | |
1175 | { | |
1176 | static long long int port_rl = LLONG_MIN; | |
1177 | static unsigned int backer_rl = 0; | |
1178 | ||
1179 | if (time_msec() >= port_rl) { | |
1180 | struct ofproto_dpif *ofproto; | |
1181 | struct ofport_dpif *ofport; | |
1182 | ||
1183 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
1184 | ||
1185 | HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { | |
1186 | port_run_fast(ofport); | |
1187 | } | |
1188 | } | |
1189 | port_rl = time_msec() + 200; | |
1190 | } | |
1191 | ||
1192 | /* XXX: We have to be careful not to do too much work in this function. If | |
1193 | * we call dpif_backer_run_fast() too often, or with too large a batch, | |
1194 | * performance improves signifcantly, but at a cost. It's possible for the | |
1195 | * number of flows in the datapath to increase without bound, and for poll | |
1196 | * loops to take 10s of seconds. The correct solution to this problem, | |
1197 | * long term, is to separate flow miss handling into it's own thread so it | |
1198 | * isn't affected by revalidations, and expirations. Until then, this is | |
1199 | * the best we can do. */ | |
1200 | if (++backer_rl >= 10) { | |
1201 | struct shash_node *node; | |
1202 | ||
1203 | backer_rl = 0; | |
1204 | SHASH_FOR_EACH (node, &all_dpif_backers) { | |
1205 | dpif_backer_run_fast(node->data, 1); | |
1206 | } | |
1207 | } | |
1208 | } | |
1209 | ||
acf60855 JP |
1210 | static void |
1211 | type_wait(const char *type) | |
1212 | { | |
1213 | struct dpif_backer *backer; | |
1214 | ||
1215 | backer = shash_find_data(&all_dpif_backers, type); | |
1216 | if (!backer) { | |
1217 | /* This is not necessarily a problem, since backers are only | |
1218 | * created on demand. */ | |
1219 | return; | |
1220 | } | |
1221 | ||
04d08d54 EJ |
1222 | if (backer->governor) { |
1223 | governor_wait(backer->governor); | |
1224 | } | |
1225 | ||
acf60855 JP |
1226 | timer_wait(&backer->next_expiration); |
1227 | } | |
1228 | \f | |
abe529af BP |
1229 | /* Basic life-cycle. */ |
1230 | ||
c57b2226 BP |
1231 | static int add_internal_flows(struct ofproto_dpif *); |
1232 | ||
abe529af BP |
1233 | static struct ofproto * |
1234 | alloc(void) | |
1235 | { | |
1236 | struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto); | |
1237 | return &ofproto->up; | |
1238 | } | |
1239 | ||
1240 | static void | |
1241 | dealloc(struct ofproto *ofproto_) | |
1242 | { | |
1243 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
1244 | free(ofproto); | |
1245 | } | |
1246 | ||
acf60855 JP |
1247 | static void |
1248 | close_dpif_backer(struct dpif_backer *backer) | |
1249 | { | |
1250 | struct shash_node *node; | |
1251 | ||
cb22974d | 1252 | ovs_assert(backer->refcount > 0); |
acf60855 JP |
1253 | |
1254 | if (--backer->refcount) { | |
1255 | return; | |
1256 | } | |
1257 | ||
8f73d537 EJ |
1258 | drop_key_clear(backer); |
1259 | hmap_destroy(&backer->drop_keys); | |
1260 | ||
7d82ab2e | 1261 | simap_destroy(&backer->tnl_backers); |
acf60855 JP |
1262 | hmap_destroy(&backer->odp_to_ofport_map); |
1263 | node = shash_find(&all_dpif_backers, backer->type); | |
1264 | free(backer->type); | |
1265 | shash_delete(&all_dpif_backers, node); | |
1266 | dpif_close(backer->dpif); | |
1267 | ||
04d08d54 EJ |
1268 | ovs_assert(hmap_is_empty(&backer->subfacets)); |
1269 | hmap_destroy(&backer->subfacets); | |
1270 | governor_destroy(backer->governor); | |
1271 | ||
acf60855 JP |
1272 | free(backer); |
1273 | } | |
1274 | ||
1275 | /* Datapath port slated for removal from datapath. */ | |
1276 | struct odp_garbage { | |
1277 | struct list list_node; | |
1278 | uint32_t odp_port; | |
1279 | }; | |
1280 | ||
1281 | static int | |
1282 | open_dpif_backer(const char *type, struct dpif_backer **backerp) | |
1283 | { | |
1284 | struct dpif_backer *backer; | |
1285 | struct dpif_port_dump port_dump; | |
1286 | struct dpif_port port; | |
1287 | struct shash_node *node; | |
1288 | struct list garbage_list; | |
1289 | struct odp_garbage *garbage, *next; | |
1290 | struct sset names; | |
1291 | char *backer_name; | |
1292 | const char *name; | |
1293 | int error; | |
1294 | ||
1295 | backer = shash_find_data(&all_dpif_backers, type); | |
1296 | if (backer) { | |
1297 | backer->refcount++; | |
1298 | *backerp = backer; | |
1299 | return 0; | |
1300 | } | |
1301 | ||
1302 | backer_name = xasprintf("ovs-%s", type); | |
1303 | ||
1304 | /* Remove any existing datapaths, since we assume we're the only | |
1305 | * userspace controlling the datapath. */ | |
1306 | sset_init(&names); | |
1307 | dp_enumerate_names(type, &names); | |
1308 | SSET_FOR_EACH(name, &names) { | |
1309 | struct dpif *old_dpif; | |
1310 | ||
1311 | /* Don't remove our backer if it exists. */ | |
1312 | if (!strcmp(name, backer_name)) { | |
1313 | continue; | |
1314 | } | |
1315 | ||
1316 | if (dpif_open(name, type, &old_dpif)) { | |
1317 | VLOG_WARN("couldn't open old datapath %s to remove it", name); | |
1318 | } else { | |
1319 | dpif_delete(old_dpif); | |
1320 | dpif_close(old_dpif); | |
1321 | } | |
1322 | } | |
1323 | sset_destroy(&names); | |
1324 | ||
1325 | backer = xmalloc(sizeof *backer); | |
1326 | ||
1327 | error = dpif_create_and_open(backer_name, type, &backer->dpif); | |
1328 | free(backer_name); | |
1329 | if (error) { | |
1330 | VLOG_ERR("failed to open datapath of type %s: %s", type, | |
1331 | strerror(error)); | |
4c1b1289 | 1332 | free(backer); |
acf60855 JP |
1333 | return error; |
1334 | } | |
1335 | ||
1336 | backer->type = xstrdup(type); | |
04d08d54 | 1337 | backer->governor = NULL; |
acf60855 JP |
1338 | backer->refcount = 1; |
1339 | hmap_init(&backer->odp_to_ofport_map); | |
8f73d537 | 1340 | hmap_init(&backer->drop_keys); |
04d08d54 | 1341 | hmap_init(&backer->subfacets); |
acf60855 | 1342 | timer_set_duration(&backer->next_expiration, 1000); |
2cc3c58e | 1343 | backer->need_revalidate = 0; |
7d82ab2e | 1344 | simap_init(&backer->tnl_backers); |
2cc3c58e | 1345 | tag_set_init(&backer->revalidate_set); |
40358701 | 1346 | backer->recv_set_enable = !ofproto_get_flow_restore_wait(); |
acf60855 JP |
1347 | *backerp = backer; |
1348 | ||
40358701 GS |
1349 | if (backer->recv_set_enable) { |
1350 | dpif_flow_flush(backer->dpif); | |
1351 | } | |
acf60855 JP |
1352 | |
1353 | /* Loop through the ports already on the datapath and remove any | |
1354 | * that we don't need anymore. */ | |
1355 | list_init(&garbage_list); | |
1356 | dpif_port_dump_start(&port_dump, backer->dpif); | |
1357 | while (dpif_port_dump_next(&port_dump, &port)) { | |
1358 | node = shash_find(&init_ofp_ports, port.name); | |
1359 | if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) { | |
1360 | garbage = xmalloc(sizeof *garbage); | |
1361 | garbage->odp_port = port.port_no; | |
1362 | list_push_front(&garbage_list, &garbage->list_node); | |
1363 | } | |
1364 | } | |
1365 | dpif_port_dump_done(&port_dump); | |
1366 | ||
1367 | LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) { | |
1368 | dpif_port_del(backer->dpif, garbage->odp_port); | |
1369 | list_remove(&garbage->list_node); | |
1370 | free(garbage); | |
1371 | } | |
1372 | ||
1373 | shash_add(&all_dpif_backers, type, backer); | |
1374 | ||
40358701 | 1375 | error = dpif_recv_set(backer->dpif, backer->recv_set_enable); |
acf60855 JP |
1376 | if (error) { |
1377 | VLOG_ERR("failed to listen on datapath of type %s: %s", | |
1378 | type, strerror(error)); | |
1379 | close_dpif_backer(backer); | |
1380 | return error; | |
1381 | } | |
1382 | ||
dc54ef36 EJ |
1383 | backer->max_n_subfacet = 0; |
1384 | backer->created = time_msec(); | |
1385 | backer->last_minute = backer->created; | |
1386 | memset(&backer->hourly, 0, sizeof backer->hourly); | |
1387 | memset(&backer->daily, 0, sizeof backer->daily); | |
1388 | memset(&backer->lifetime, 0, sizeof backer->lifetime); | |
1389 | backer->subfacet_add_count = 0; | |
1390 | backer->subfacet_del_count = 0; | |
1391 | backer->total_subfacet_add_count = 0; | |
1392 | backer->total_subfacet_del_count = 0; | |
1393 | backer->avg_n_subfacet = 0; | |
1394 | backer->avg_subfacet_life = 0; | |
1395 | ||
acf60855 JP |
1396 | return error; |
1397 | } | |
1398 | ||
abe529af | 1399 | static int |
0f5f95a9 | 1400 | construct(struct ofproto *ofproto_) |
abe529af BP |
1401 | { |
1402 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
acf60855 | 1403 | struct shash_node *node, *next; |
91858960 | 1404 | int max_ports; |
abe529af BP |
1405 | int error; |
1406 | int i; | |
1407 | ||
acf60855 | 1408 | error = open_dpif_backer(ofproto->up.type, &ofproto->backer); |
abe529af | 1409 | if (error) { |
abe529af BP |
1410 | return error; |
1411 | } | |
1412 | ||
acf60855 | 1413 | max_ports = dpif_get_max_ports(ofproto->backer->dpif); |
91858960 BP |
1414 | ofproto_init_max_ports(ofproto_, MIN(max_ports, OFPP_MAX)); |
1415 | ||
abe529af BP |
1416 | ofproto->netflow = NULL; |
1417 | ofproto->sflow = NULL; | |
29089a54 | 1418 | ofproto->ipfix = NULL; |
21f7563c | 1419 | ofproto->stp = NULL; |
abe529af | 1420 | hmap_init(&ofproto->bundles); |
e764773c | 1421 | ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME); |
abe529af BP |
1422 | for (i = 0; i < MAX_MIRRORS; i++) { |
1423 | ofproto->mirrors[i] = NULL; | |
1424 | } | |
1425 | ofproto->has_bonded_bundles = false; | |
1426 | ||
bcd2633a | 1427 | classifier_init(&ofproto->facets); |
4d2a0f39 | 1428 | ofproto->consistency_rl = LLONG_MIN; |
54a9cbc9 BP |
1429 | |
1430 | for (i = 0; i < N_TABLES; i++) { | |
1431 | struct table_dpif *table = &ofproto->tables[i]; | |
1432 | ||
1433 | table->catchall_table = NULL; | |
1434 | table->other_table = NULL; | |
1435 | table->basis = random_uint32(); | |
1436 | } | |
abe529af | 1437 | |
7ee20df1 BP |
1438 | list_init(&ofproto->completions); |
1439 | ||
abe529af BP |
1440 | ofproto_dpif_unixctl_init(); |
1441 | ||
ccb7c863 | 1442 | ofproto->has_mirrors = false; |
daff3353 EJ |
1443 | ofproto->has_bundle_action = false; |
1444 | ||
52a90c29 BP |
1445 | hmap_init(&ofproto->vlandev_map); |
1446 | hmap_init(&ofproto->realdev_vid_map); | |
1447 | ||
acf60855 | 1448 | sset_init(&ofproto->ports); |
0a740f48 | 1449 | sset_init(&ofproto->ghost_ports); |
acf60855 JP |
1450 | sset_init(&ofproto->port_poll_set); |
1451 | ofproto->port_poll_errno = 0; | |
1452 | ||
1453 | SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) { | |
4f9e08a5 | 1454 | struct iface_hint *iface_hint = node->data; |
acf60855 JP |
1455 | |
1456 | if (!strcmp(iface_hint->br_name, ofproto->up.name)) { | |
1457 | /* Check if the datapath already has this port. */ | |
1458 | if (dpif_port_exists(ofproto->backer->dpif, node->name)) { | |
1459 | sset_add(&ofproto->ports, node->name); | |
1460 | } | |
1461 | ||
1462 | free(iface_hint->br_name); | |
1463 | free(iface_hint->br_type); | |
4f9e08a5 | 1464 | free(iface_hint); |
acf60855 JP |
1465 | shash_delete(&init_ofp_ports, node); |
1466 | } | |
1467 | } | |
e1b1d06a | 1468 | |
b44a10b7 BP |
1469 | hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node, |
1470 | hash_string(ofproto->up.name, 0)); | |
6527c598 | 1471 | memset(&ofproto->stats, 0, sizeof ofproto->stats); |
0f5f95a9 BP |
1472 | |
1473 | ofproto_init_tables(ofproto_, N_TABLES); | |
c57b2226 BP |
1474 | error = add_internal_flows(ofproto); |
1475 | ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY; | |
1476 | ||
735d7efb AZ |
1477 | ofproto->n_hit = 0; |
1478 | ofproto->n_missed = 0; | |
1479 | ||
c57b2226 BP |
1480 | return error; |
1481 | } | |
1482 | ||
1483 | static int | |
1484 | add_internal_flow(struct ofproto_dpif *ofproto, int id, | |
f25d0cf3 | 1485 | const struct ofpbuf *ofpacts, struct rule_dpif **rulep) |
c57b2226 BP |
1486 | { |
1487 | struct ofputil_flow_mod fm; | |
1488 | int error; | |
1489 | ||
81a76618 BP |
1490 | match_init_catchall(&fm.match); |
1491 | fm.priority = 0; | |
1492 | match_set_reg(&fm.match, 0, id); | |
623e1caf | 1493 | fm.new_cookie = htonll(0); |
c57b2226 BP |
1494 | fm.cookie = htonll(0); |
1495 | fm.cookie_mask = htonll(0); | |
1496 | fm.table_id = TBL_INTERNAL; | |
1497 | fm.command = OFPFC_ADD; | |
1498 | fm.idle_timeout = 0; | |
1499 | fm.hard_timeout = 0; | |
1500 | fm.buffer_id = 0; | |
1501 | fm.out_port = 0; | |
1502 | fm.flags = 0; | |
f25d0cf3 BP |
1503 | fm.ofpacts = ofpacts->data; |
1504 | fm.ofpacts_len = ofpacts->size; | |
c57b2226 BP |
1505 | |
1506 | error = ofproto_flow_mod(&ofproto->up, &fm); | |
1507 | if (error) { | |
1508 | VLOG_ERR_RL(&rl, "failed to add internal flow %d (%s)", | |
1509 | id, ofperr_to_string(error)); | |
1510 | return error; | |
1511 | } | |
1512 | ||
bcd2633a | 1513 | *rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, NULL, TBL_INTERNAL); |
cb22974d | 1514 | ovs_assert(*rulep != NULL); |
0f5f95a9 | 1515 | |
abe529af BP |
1516 | return 0; |
1517 | } | |
1518 | ||
c57b2226 BP |
1519 | static int |
1520 | add_internal_flows(struct ofproto_dpif *ofproto) | |
1521 | { | |
f25d0cf3 BP |
1522 | struct ofpact_controller *controller; |
1523 | uint64_t ofpacts_stub[128 / 8]; | |
1524 | struct ofpbuf ofpacts; | |
c57b2226 BP |
1525 | int error; |
1526 | int id; | |
1527 | ||
f25d0cf3 | 1528 | ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); |
c57b2226 BP |
1529 | id = 1; |
1530 | ||
f25d0cf3 BP |
1531 | controller = ofpact_put_CONTROLLER(&ofpacts); |
1532 | controller->max_len = UINT16_MAX; | |
1533 | controller->controller_id = 0; | |
1534 | controller->reason = OFPR_NO_MATCH; | |
1535 | ofpact_pad(&ofpacts); | |
1536 | ||
1537 | error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->miss_rule); | |
c57b2226 BP |
1538 | if (error) { |
1539 | return error; | |
1540 | } | |
1541 | ||
f25d0cf3 BP |
1542 | ofpbuf_clear(&ofpacts); |
1543 | error = add_internal_flow(ofproto, id++, &ofpacts, | |
c57b2226 | 1544 | &ofproto->no_packet_in_rule); |
7fd51d39 BP |
1545 | if (error) { |
1546 | return error; | |
1547 | } | |
1548 | ||
1549 | error = add_internal_flow(ofproto, id++, &ofpacts, | |
1550 | &ofproto->drop_frags_rule); | |
c57b2226 BP |
1551 | return error; |
1552 | } | |
1553 | ||
7ee20df1 BP |
1554 | static void |
1555 | complete_operations(struct ofproto_dpif *ofproto) | |
1556 | { | |
1557 | struct dpif_completion *c, *next; | |
1558 | ||
1559 | LIST_FOR_EACH_SAFE (c, next, list_node, &ofproto->completions) { | |
1560 | ofoperation_complete(c->op, 0); | |
1561 | list_remove(&c->list_node); | |
1562 | free(c); | |
1563 | } | |
1564 | } | |
1565 | ||
abe529af BP |
1566 | static void |
1567 | destruct(struct ofproto *ofproto_) | |
1568 | { | |
1569 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
7ee20df1 | 1570 | struct rule_dpif *rule, *next_rule; |
d0918789 | 1571 | struct oftable *table; |
abe529af BP |
1572 | int i; |
1573 | ||
b44a10b7 | 1574 | hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node); |
7ee20df1 BP |
1575 | complete_operations(ofproto); |
1576 | ||
0697b5c3 BP |
1577 | OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) { |
1578 | struct cls_cursor cursor; | |
1579 | ||
d0918789 | 1580 | cls_cursor_init(&cursor, &table->cls, NULL); |
0697b5c3 BP |
1581 | CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { |
1582 | ofproto_rule_destroy(&rule->up); | |
1583 | } | |
7ee20df1 BP |
1584 | } |
1585 | ||
abe529af BP |
1586 | for (i = 0; i < MAX_MIRRORS; i++) { |
1587 | mirror_destroy(ofproto->mirrors[i]); | |
1588 | } | |
1589 | ||
1590 | netflow_destroy(ofproto->netflow); | |
bae473fe | 1591 | dpif_sflow_destroy(ofproto->sflow); |
abe529af BP |
1592 | hmap_destroy(&ofproto->bundles); |
1593 | mac_learning_destroy(ofproto->ml); | |
1594 | ||
bcd2633a | 1595 | classifier_destroy(&ofproto->facets); |
abe529af | 1596 | |
52a90c29 BP |
1597 | hmap_destroy(&ofproto->vlandev_map); |
1598 | hmap_destroy(&ofproto->realdev_vid_map); | |
1599 | ||
acf60855 | 1600 | sset_destroy(&ofproto->ports); |
0a740f48 | 1601 | sset_destroy(&ofproto->ghost_ports); |
acf60855 | 1602 | sset_destroy(&ofproto->port_poll_set); |
e1b1d06a | 1603 | |
acf60855 | 1604 | close_dpif_backer(ofproto->backer); |
abe529af BP |
1605 | } |
1606 | ||
1607 | static int | |
5fcc0d00 | 1608 | run_fast(struct ofproto *ofproto_) |
abe529af BP |
1609 | { |
1610 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
0aa66d6e | 1611 | struct ofport_dpif *ofport; |
abe529af | 1612 | |
40358701 GS |
1613 | /* Do not perform any periodic activity required by 'ofproto' while |
1614 | * waiting for flow restore to complete. */ | |
1615 | if (ofproto_get_flow_restore_wait()) { | |
1616 | return 0; | |
1617 | } | |
1618 | ||
0aa66d6e EJ |
1619 | HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { |
1620 | port_run_fast(ofport); | |
1621 | } | |
1622 | ||
5fcc0d00 BP |
1623 | return 0; |
1624 | } | |
1625 | ||
1626 | static int | |
1627 | run(struct ofproto *ofproto_) | |
1628 | { | |
1629 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
1630 | struct ofport_dpif *ofport; | |
1631 | struct ofbundle *bundle; | |
1632 | int error; | |
1633 | ||
1634 | if (!clogged) { | |
1635 | complete_operations(ofproto); | |
1636 | } | |
5fcc0d00 | 1637 | |
40358701 GS |
1638 | /* Do not perform any periodic activity below required by 'ofproto' while |
1639 | * waiting for flow restore to complete. */ | |
1640 | if (ofproto_get_flow_restore_wait()) { | |
1641 | return 0; | |
1642 | } | |
1643 | ||
5fcc0d00 BP |
1644 | error = run_fast(ofproto_); |
1645 | if (error) { | |
1646 | return error; | |
abe529af BP |
1647 | } |
1648 | ||
abe529af | 1649 | if (ofproto->netflow) { |
6fca1ffb BP |
1650 | if (netflow_run(ofproto->netflow)) { |
1651 | send_netflow_active_timeouts(ofproto); | |
1652 | } | |
abe529af BP |
1653 | } |
1654 | if (ofproto->sflow) { | |
bae473fe | 1655 | dpif_sflow_run(ofproto->sflow); |
abe529af BP |
1656 | } |
1657 | ||
1658 | HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { | |
1659 | port_run(ofport); | |
1660 | } | |
1661 | HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { | |
1662 | bundle_run(bundle); | |
1663 | } | |
1664 | ||
21f7563c | 1665 | stp_run(ofproto); |
2cc3c58e | 1666 | mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set); |
abe529af | 1667 | |
6814e51f | 1668 | /* Check the consistency of a random facet, to aid debugging. */ |
4d2a0f39 | 1669 | if (time_msec() >= ofproto->consistency_rl |
bcd2633a | 1670 | && !classifier_is_empty(&ofproto->facets) |
2cc3c58e | 1671 | && !ofproto->backer->need_revalidate) { |
bcd2633a JP |
1672 | struct cls_table *table; |
1673 | struct cls_rule *cr; | |
6814e51f BP |
1674 | struct facet *facet; |
1675 | ||
4d2a0f39 EJ |
1676 | ofproto->consistency_rl = time_msec() + 250; |
1677 | ||
bcd2633a JP |
1678 | table = CONTAINER_OF(hmap_random_node(&ofproto->facets.tables), |
1679 | struct cls_table, hmap_node); | |
1680 | cr = CONTAINER_OF(hmap_random_node(&table->rules), struct cls_rule, | |
1681 | hmap_node); | |
1682 | facet = CONTAINER_OF(cr, struct facet, cr); | |
1683 | ||
2cc3c58e | 1684 | if (!tag_set_intersects(&ofproto->backer->revalidate_set, |
bbafd73b | 1685 | facet->xout.tags)) { |
6814e51f | 1686 | if (!facet_check_consistency(facet)) { |
2cc3c58e | 1687 | ofproto->backer->need_revalidate = REV_INCONSISTENCY; |
6814e51f BP |
1688 | } |
1689 | } | |
1690 | } | |
1691 | ||
abe529af BP |
1692 | return 0; |
1693 | } | |
1694 | ||
1695 | static void | |
1696 | wait(struct ofproto *ofproto_) | |
1697 | { | |
1698 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
1699 | struct ofport_dpif *ofport; | |
1700 | struct ofbundle *bundle; | |
1701 | ||
7ee20df1 BP |
1702 | if (!clogged && !list_is_empty(&ofproto->completions)) { |
1703 | poll_immediate_wake(); | |
1704 | } | |
1705 | ||
40358701 GS |
1706 | if (ofproto_get_flow_restore_wait()) { |
1707 | return; | |
1708 | } | |
1709 | ||
acf60855 JP |
1710 | dpif_wait(ofproto->backer->dpif); |
1711 | dpif_recv_wait(ofproto->backer->dpif); | |
abe529af | 1712 | if (ofproto->sflow) { |
bae473fe | 1713 | dpif_sflow_wait(ofproto->sflow); |
abe529af | 1714 | } |
2cc3c58e | 1715 | if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) { |
abe529af BP |
1716 | poll_immediate_wake(); |
1717 | } | |
1718 | HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { | |
1719 | port_wait(ofport); | |
1720 | } | |
1721 | HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { | |
1722 | bundle_wait(bundle); | |
1723 | } | |
6fca1ffb BP |
1724 | if (ofproto->netflow) { |
1725 | netflow_wait(ofproto->netflow); | |
1726 | } | |
1c313b88 | 1727 | mac_learning_wait(ofproto->ml); |
21f7563c | 1728 | stp_wait(ofproto); |
2cc3c58e | 1729 | if (ofproto->backer->need_revalidate) { |
abe529af BP |
1730 | /* Shouldn't happen, but if it does just go around again. */ |
1731 | VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()"); | |
1732 | poll_immediate_wake(); | |
abe529af BP |
1733 | } |
1734 | } | |
1735 | ||
0d085684 BP |
1736 | static void |
1737 | get_memory_usage(const struct ofproto *ofproto_, struct simap *usage) | |
1738 | { | |
1739 | const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
bcd2633a | 1740 | struct cls_cursor cursor; |
04d08d54 EJ |
1741 | size_t n_subfacets = 0; |
1742 | struct facet *facet; | |
0d085684 | 1743 | |
bcd2633a JP |
1744 | simap_increase(usage, "facets", classifier_count(&ofproto->facets)); |
1745 | ||
1746 | cls_cursor_init(&cursor, &ofproto->facets, NULL); | |
1747 | CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { | |
04d08d54 EJ |
1748 | n_subfacets += list_size(&facet->subfacets); |
1749 | } | |
1750 | simap_increase(usage, "subfacets", n_subfacets); | |
0d085684 BP |
1751 | } |
1752 | ||
abe529af BP |
1753 | static void |
1754 | flush(struct ofproto *ofproto_) | |
1755 | { | |
1756 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
acf60855 JP |
1757 | struct subfacet *subfacet, *next_subfacet; |
1758 | struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH]; | |
1759 | int n_batch; | |
b0f7b9b5 | 1760 | |
acf60855 JP |
1761 | n_batch = 0; |
1762 | HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node, | |
04d08d54 EJ |
1763 | &ofproto->backer->subfacets) { |
1764 | if (ofproto_dpif_cast(subfacet->facet->rule->up.ofproto) != ofproto) { | |
1765 | continue; | |
1766 | } | |
1767 | ||
acf60855 JP |
1768 | if (subfacet->path != SF_NOT_INSTALLED) { |
1769 | batch[n_batch++] = subfacet; | |
1770 | if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) { | |
04d08d54 | 1771 | subfacet_destroy_batch(ofproto->backer, batch, n_batch); |
acf60855 JP |
1772 | n_batch = 0; |
1773 | } | |
1774 | } else { | |
1775 | subfacet_destroy(subfacet); | |
b0f7b9b5 | 1776 | } |
abe529af | 1777 | } |
acf60855 JP |
1778 | |
1779 | if (n_batch > 0) { | |
04d08d54 | 1780 | subfacet_destroy_batch(ofproto->backer, batch, n_batch); |
acf60855 | 1781 | } |
abe529af BP |
1782 | } |
1783 | ||
6c1491fb BP |
1784 | static void |
1785 | get_features(struct ofproto *ofproto_ OVS_UNUSED, | |
9e1fd49b | 1786 | bool *arp_match_ip, enum ofputil_action_bitmap *actions) |
6c1491fb BP |
1787 | { |
1788 | *arp_match_ip = true; | |
9e1fd49b BP |
1789 | *actions = (OFPUTIL_A_OUTPUT | |
1790 | OFPUTIL_A_SET_VLAN_VID | | |
1791 | OFPUTIL_A_SET_VLAN_PCP | | |
1792 | OFPUTIL_A_STRIP_VLAN | | |
1793 | OFPUTIL_A_SET_DL_SRC | | |
1794 | OFPUTIL_A_SET_DL_DST | | |
1795 | OFPUTIL_A_SET_NW_SRC | | |
1796 | OFPUTIL_A_SET_NW_DST | | |
1797 | OFPUTIL_A_SET_NW_TOS | | |
1798 | OFPUTIL_A_SET_TP_SRC | | |
1799 | OFPUTIL_A_SET_TP_DST | | |
1800 | OFPUTIL_A_ENQUEUE); | |
6c1491fb BP |
1801 | } |
1802 | ||
1803 | static void | |
307975da | 1804 | get_tables(struct ofproto *ofproto_, struct ofp12_table_stats *ots) |
6c1491fb BP |
1805 | { |
1806 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
a8d9304d | 1807 | struct dpif_dp_stats s; |
7fd51d39 | 1808 | uint64_t n_miss, n_no_pkt_in, n_bytes, n_dropped_frags; |
7e741ffb | 1809 | uint64_t n_lookup; |
6c1491fb BP |
1810 | |
1811 | strcpy(ots->name, "classifier"); | |
1812 | ||
acf60855 | 1813 | dpif_get_dp_stats(ofproto->backer->dpif, &s); |
7e741ffb JG |
1814 | rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes); |
1815 | rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes); | |
7fd51d39 | 1816 | rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes); |
acf60855 | 1817 | |
7fd51d39 | 1818 | n_lookup = s.n_hit + s.n_missed - n_dropped_frags; |
7e741ffb JG |
1819 | ots->lookup_count = htonll(n_lookup); |
1820 | ots->matched_count = htonll(n_lookup - n_miss - n_no_pkt_in); | |
6c1491fb BP |
1821 | } |
1822 | ||
abe529af BP |
1823 | static struct ofport * |
1824 | port_alloc(void) | |
1825 | { | |
1826 | struct ofport_dpif *port = xmalloc(sizeof *port); | |
1827 | return &port->up; | |
1828 | } | |
1829 | ||
1830 | static void | |
1831 | port_dealloc(struct ofport *port_) | |
1832 | { | |
1833 | struct ofport_dpif *port = ofport_dpif_cast(port_); | |
1834 | free(port); | |
1835 | } | |
1836 | ||
1837 | static int | |
1838 | port_construct(struct ofport *port_) | |
1839 | { | |
1840 | struct ofport_dpif *port = ofport_dpif_cast(port_); | |
1841 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); | |
b9ad7294 | 1842 | const struct netdev *netdev = port->up.netdev; |
3aa30359 | 1843 | char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; |
e1b1d06a JP |
1844 | struct dpif_port dpif_port; |
1845 | int error; | |
abe529af | 1846 | |
2cc3c58e | 1847 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af BP |
1848 | port->bundle = NULL; |
1849 | port->cfm = NULL; | |
ccc09689 | 1850 | port->bfd = NULL; |
abe529af | 1851 | port->tag = tag_create_random(); |
d5ffa7f2 | 1852 | port->may_enable = true; |
21f7563c JP |
1853 | port->stp_port = NULL; |
1854 | port->stp_state = STP_DISABLED; | |
b9ad7294 | 1855 | port->tnl_port = NULL; |
8b36f51e | 1856 | hmap_init(&port->priorities); |
52a90c29 BP |
1857 | port->realdev_ofp_port = 0; |
1858 | port->vlandev_vid = 0; | |
b9ad7294 | 1859 | port->carrier_seq = netdev_get_carrier_resets(netdev); |
abe529af | 1860 | |
b9ad7294 | 1861 | if (netdev_vport_is_patch(netdev)) { |
743cea45 NM |
1862 | /* By bailing out here, we don't submit the port to the sFlow module |
1863 | * to be considered for counter polling export. This is correct | |
1864 | * because the patch port represents an interface that sFlow considers | |
1865 | * to be "internal" to the switch as a whole, and therefore not an | |
1866 | * candidate for counter polling. */ | |
0a740f48 EJ |
1867 | port->odp_port = OVSP_NONE; |
1868 | return 0; | |
1869 | } | |
1870 | ||
acf60855 | 1871 | error = dpif_port_query_by_name(ofproto->backer->dpif, |
3aa30359 BP |
1872 | netdev_vport_get_dpif_port(netdev, namebuf, |
1873 | sizeof namebuf), | |
e1b1d06a JP |
1874 | &dpif_port); |
1875 | if (error) { | |
1876 | return error; | |
1877 | } | |
1878 | ||
1879 | port->odp_port = dpif_port.port_no; | |
1880 | ||
b9ad7294 EJ |
1881 | if (netdev_get_tunnel_config(netdev)) { |
1882 | port->tnl_port = tnl_port_add(&port->up, port->odp_port); | |
1883 | } else { | |
1884 | /* Sanity-check that a mapping doesn't already exist. This | |
1885 | * shouldn't happen for non-tunnel ports. */ | |
1886 | if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) { | |
1887 | VLOG_ERR("port %s already has an OpenFlow port number", | |
1888 | dpif_port.name); | |
da78d43d | 1889 | dpif_port_destroy(&dpif_port); |
b9ad7294 EJ |
1890 | return EBUSY; |
1891 | } | |
e1b1d06a | 1892 | |
b9ad7294 EJ |
1893 | hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node, |
1894 | hash_int(port->odp_port, 0)); | |
1895 | } | |
da78d43d | 1896 | dpif_port_destroy(&dpif_port); |
e1b1d06a | 1897 | |
abe529af | 1898 | if (ofproto->sflow) { |
e1b1d06a | 1899 | dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port); |
abe529af BP |
1900 | } |
1901 | ||
1902 | return 0; | |
1903 | } | |
1904 | ||
1905 | static void | |
1906 | port_destruct(struct ofport *port_) | |
1907 | { | |
1908 | struct ofport_dpif *port = ofport_dpif_cast(port_); | |
1909 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); | |
02f8d646 | 1910 | const char *devname = netdev_get_name(port->up.netdev); |
3aa30359 BP |
1911 | char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; |
1912 | const char *dp_port_name; | |
abe529af | 1913 | |
3aa30359 BP |
1914 | dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf, |
1915 | sizeof namebuf); | |
a614d823 | 1916 | if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) { |
acf60855 JP |
1917 | /* The underlying device is still there, so delete it. This |
1918 | * happens when the ofproto is being destroyed, since the caller | |
1919 | * assumes that removal of attached ports will happen as part of | |
1920 | * destruction. */ | |
a614d823 KM |
1921 | if (!port->tnl_port) { |
1922 | dpif_port_del(ofproto->backer->dpif, port->odp_port); | |
1923 | } | |
1924 | ofproto->backer->need_revalidate = REV_RECONFIGURE; | |
acf60855 JP |
1925 | } |
1926 | ||
b9ad7294 | 1927 | if (port->odp_port != OVSP_NONE && !port->tnl_port) { |
0a740f48 EJ |
1928 | hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node); |
1929 | } | |
1930 | ||
b9ad7294 | 1931 | tnl_port_del(port->tnl_port); |
02f8d646 | 1932 | sset_find_and_delete(&ofproto->ports, devname); |
0a740f48 | 1933 | sset_find_and_delete(&ofproto->ghost_ports, devname); |
2cc3c58e | 1934 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af | 1935 | bundle_remove(port_); |
a5610457 | 1936 | set_cfm(port_, NULL); |
8aee94b6 | 1937 | set_bfd(port_, NULL); |
abe529af | 1938 | if (ofproto->sflow) { |
bae473fe | 1939 | dpif_sflow_del_port(ofproto->sflow, port->odp_port); |
abe529af | 1940 | } |
8b36f51e EJ |
1941 | |
1942 | ofport_clear_priorities(port); | |
1943 | hmap_destroy(&port->priorities); | |
abe529af BP |
1944 | } |
1945 | ||
1946 | static void | |
1947 | port_modified(struct ofport *port_) | |
1948 | { | |
1949 | struct ofport_dpif *port = ofport_dpif_cast(port_); | |
1950 | ||
1951 | if (port->bundle && port->bundle->bond) { | |
1952 | bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev); | |
1953 | } | |
9d46b444 EJ |
1954 | |
1955 | if (port->cfm) { | |
1956 | cfm_set_netdev(port->cfm, port->up.netdev); | |
1957 | } | |
abe529af BP |
1958 | } |
1959 | ||
1960 | static void | |
9e1fd49b | 1961 | port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config) |
abe529af BP |
1962 | { |
1963 | struct ofport_dpif *port = ofport_dpif_cast(port_); | |
1964 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); | |
9e1fd49b | 1965 | enum ofputil_port_config changed = old_config ^ port->up.pp.config; |
abe529af | 1966 | |
9e1fd49b | 1967 | if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP | |
c57b2226 BP |
1968 | OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD | |
1969 | OFPUTIL_PC_NO_PACKET_IN)) { | |
2cc3c58e | 1970 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
7bde8dd8 | 1971 | |
9e1fd49b | 1972 | if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) { |
7bde8dd8 JP |
1973 | bundle_update(port->bundle); |
1974 | } | |
abe529af BP |
1975 | } |
1976 | } | |
1977 | ||
1978 | static int | |
1979 | set_sflow(struct ofproto *ofproto_, | |
1980 | const struct ofproto_sflow_options *sflow_options) | |
1981 | { | |
1982 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
bae473fe | 1983 | struct dpif_sflow *ds = ofproto->sflow; |
6ff686f2 | 1984 | |
abe529af | 1985 | if (sflow_options) { |
bae473fe | 1986 | if (!ds) { |
abe529af BP |
1987 | struct ofport_dpif *ofport; |
1988 | ||
4213f19d | 1989 | ds = ofproto->sflow = dpif_sflow_create(); |
abe529af | 1990 | HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { |
e1b1d06a | 1991 | dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port); |
abe529af | 1992 | } |
2cc3c58e | 1993 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af | 1994 | } |
bae473fe | 1995 | dpif_sflow_set_options(ds, sflow_options); |
abe529af | 1996 | } else { |
6ff686f2 PS |
1997 | if (ds) { |
1998 | dpif_sflow_destroy(ds); | |
2cc3c58e | 1999 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
6ff686f2 PS |
2000 | ofproto->sflow = NULL; |
2001 | } | |
abe529af BP |
2002 | } |
2003 | return 0; | |
2004 | } | |
2005 | ||
29089a54 RL |
2006 | static int |
2007 | set_ipfix( | |
2008 | struct ofproto *ofproto_, | |
2009 | const struct ofproto_ipfix_bridge_exporter_options *bridge_exporter_options, | |
2010 | const struct ofproto_ipfix_flow_exporter_options *flow_exporters_options, | |
2011 | size_t n_flow_exporters_options) | |
2012 | { | |
2013 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
2014 | struct dpif_ipfix *di = ofproto->ipfix; | |
2015 | ||
2016 | if (bridge_exporter_options || flow_exporters_options) { | |
2017 | if (!di) { | |
2018 | di = ofproto->ipfix = dpif_ipfix_create(); | |
2019 | } | |
2020 | dpif_ipfix_set_options( | |
2021 | di, bridge_exporter_options, flow_exporters_options, | |
2022 | n_flow_exporters_options); | |
2023 | } else { | |
2024 | if (di) { | |
2025 | dpif_ipfix_destroy(di); | |
2026 | ofproto->ipfix = NULL; | |
2027 | } | |
2028 | } | |
2029 | return 0; | |
2030 | } | |
2031 | ||
abe529af | 2032 | static int |
a5610457 | 2033 | set_cfm(struct ofport *ofport_, const struct cfm_settings *s) |
abe529af BP |
2034 | { |
2035 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
2036 | int error; | |
2037 | ||
a5610457 | 2038 | if (!s) { |
abe529af BP |
2039 | error = 0; |
2040 | } else { | |
2041 | if (!ofport->cfm) { | |
8c977421 EJ |
2042 | struct ofproto_dpif *ofproto; |
2043 | ||
2044 | ofproto = ofproto_dpif_cast(ofport->up.ofproto); | |
2cc3c58e | 2045 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
90967e95 | 2046 | ofport->cfm = cfm_create(ofport->up.netdev); |
abe529af BP |
2047 | } |
2048 | ||
a5610457 | 2049 | if (cfm_configure(ofport->cfm, s)) { |
abe529af BP |
2050 | return 0; |
2051 | } | |
2052 | ||
2053 | error = EINVAL; | |
2054 | } | |
2055 | cfm_destroy(ofport->cfm); | |
2056 | ofport->cfm = NULL; | |
2057 | return error; | |
2058 | } | |
2059 | ||
9a9e3786 BP |
2060 | static bool |
2061 | get_cfm_status(const struct ofport *ofport_, | |
2062 | struct ofproto_cfm_status *status) | |
1de11730 EJ |
2063 | { |
2064 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
2065 | ||
2066 | if (ofport->cfm) { | |
9a9e3786 BP |
2067 | status->faults = cfm_get_fault(ofport->cfm); |
2068 | status->remote_opstate = cfm_get_opup(ofport->cfm); | |
2069 | status->health = cfm_get_health(ofport->cfm); | |
2070 | cfm_get_remote_mpids(ofport->cfm, &status->rmps, &status->n_rmps); | |
2071 | return true; | |
1de11730 | 2072 | } else { |
9a9e3786 | 2073 | return false; |
1de11730 EJ |
2074 | } |
2075 | } | |
ccc09689 EJ |
2076 | |
2077 | static int | |
2078 | set_bfd(struct ofport *ofport_, const struct smap *cfg) | |
2079 | { | |
2080 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto); | |
2081 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
2082 | struct bfd *old; | |
2083 | ||
2084 | old = ofport->bfd; | |
2085 | ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev), cfg); | |
2086 | if (ofport->bfd != old) { | |
2087 | ofproto->backer->need_revalidate = REV_RECONFIGURE; | |
2088 | } | |
2089 | ||
2090 | return 0; | |
2091 | } | |
2092 | ||
2093 | static int | |
2094 | get_bfd_status(struct ofport *ofport_, struct smap *smap) | |
2095 | { | |
2096 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
2097 | ||
2098 | if (ofport->bfd) { | |
2099 | bfd_get_status(ofport->bfd, smap); | |
2100 | return 0; | |
2101 | } else { | |
2102 | return ENOENT; | |
2103 | } | |
2104 | } | |
abe529af | 2105 | \f |
21f7563c JP |
2106 | /* Spanning Tree. */ |
2107 | ||
2108 | static void | |
2109 | send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_) | |
2110 | { | |
2111 | struct ofproto_dpif *ofproto = ofproto_; | |
2112 | struct stp_port *sp = stp_get_port(ofproto->stp, port_num); | |
2113 | struct ofport_dpif *ofport; | |
2114 | ||
2115 | ofport = stp_port_get_aux(sp); | |
2116 | if (!ofport) { | |
2117 | VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d", | |
2118 | ofproto->up.name, port_num); | |
2119 | } else { | |
2120 | struct eth_header *eth = pkt->l2; | |
2121 | ||
2122 | netdev_get_etheraddr(ofport->up.netdev, eth->eth_src); | |
2123 | if (eth_addr_is_zero(eth->eth_src)) { | |
2124 | VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d " | |
2125 | "with unknown MAC", ofproto->up.name, port_num); | |
2126 | } else { | |
97d6520b | 2127 | send_packet(ofport, pkt); |
21f7563c JP |
2128 | } |
2129 | } | |
2130 | ofpbuf_delete(pkt); | |
2131 | } | |
2132 | ||
2133 | /* Configures STP on 'ofproto_' using the settings defined in 's'. */ | |
2134 | static int | |
2135 | set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s) | |
2136 | { | |
2137 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
2138 | ||
2139 | /* Only revalidate flows if the configuration changed. */ | |
2140 | if (!s != !ofproto->stp) { | |
2cc3c58e | 2141 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
21f7563c JP |
2142 | } |
2143 | ||
2144 | if (s) { | |
2145 | if (!ofproto->stp) { | |
2146 | ofproto->stp = stp_create(ofproto_->name, s->system_id, | |
2147 | send_bpdu_cb, ofproto); | |
2148 | ofproto->stp_last_tick = time_msec(); | |
2149 | } | |
2150 | ||
2151 | stp_set_bridge_id(ofproto->stp, s->system_id); | |
2152 | stp_set_bridge_priority(ofproto->stp, s->priority); | |
2153 | stp_set_hello_time(ofproto->stp, s->hello_time); | |
2154 | stp_set_max_age(ofproto->stp, s->max_age); | |
2155 | stp_set_forward_delay(ofproto->stp, s->fwd_delay); | |
2156 | } else { | |
851bf71d EJ |
2157 | struct ofport *ofport; |
2158 | ||
2159 | HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) { | |
2160 | set_stp_port(ofport, NULL); | |
2161 | } | |
2162 | ||
21f7563c JP |
2163 | stp_destroy(ofproto->stp); |
2164 | ofproto->stp = NULL; | |
2165 | } | |
2166 | ||
2167 | return 0; | |
2168 | } | |
2169 | ||
2170 | static int | |
2171 | get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s) | |
2172 | { | |
2173 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
2174 | ||
2175 | if (ofproto->stp) { | |
2176 | s->enabled = true; | |
2177 | s->bridge_id = stp_get_bridge_id(ofproto->stp); | |
2178 | s->designated_root = stp_get_designated_root(ofproto->stp); | |
2179 | s->root_path_cost = stp_get_root_path_cost(ofproto->stp); | |
2180 | } else { | |
2181 | s->enabled = false; | |
2182 | } | |
2183 | ||
2184 | return 0; | |
2185 | } | |
2186 | ||
2187 | static void | |
2188 | update_stp_port_state(struct ofport_dpif *ofport) | |
2189 | { | |
2190 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); | |
2191 | enum stp_state state; | |
2192 | ||
2193 | /* Figure out new state. */ | |
2194 | state = ofport->stp_port ? stp_port_get_state(ofport->stp_port) | |
2195 | : STP_DISABLED; | |
2196 | ||
2197 | /* Update state. */ | |
2198 | if (ofport->stp_state != state) { | |
9e1fd49b | 2199 | enum ofputil_port_state of_state; |
21f7563c JP |
2200 | bool fwd_change; |
2201 | ||
2202 | VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s", | |
2203 | netdev_get_name(ofport->up.netdev), | |
2204 | stp_state_name(ofport->stp_state), | |
2205 | stp_state_name(state)); | |
2206 | if (stp_learn_in_state(ofport->stp_state) | |
2207 | != stp_learn_in_state(state)) { | |
2208 | /* xxx Learning action flows should also be flushed. */ | |
2cc3c58e EJ |
2209 | mac_learning_flush(ofproto->ml, |
2210 | &ofproto->backer->revalidate_set); | |
21f7563c JP |
2211 | } |
2212 | fwd_change = stp_forward_in_state(ofport->stp_state) | |
2213 | != stp_forward_in_state(state); | |
2214 | ||
2cc3c58e | 2215 | ofproto->backer->need_revalidate = REV_STP; |
21f7563c JP |
2216 | ofport->stp_state = state; |
2217 | ofport->stp_state_entered = time_msec(); | |
2218 | ||
b308140a | 2219 | if (fwd_change && ofport->bundle) { |
21f7563c JP |
2220 | bundle_update(ofport->bundle); |
2221 | } | |
2222 | ||
2223 | /* Update the STP state bits in the OpenFlow port description. */ | |
9e1fd49b BP |
2224 | of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK; |
2225 | of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN | |
2226 | : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN | |
2227 | : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD | |
2228 | : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK | |
2229 | : 0); | |
21f7563c JP |
2230 | ofproto_port_set_state(&ofport->up, of_state); |
2231 | } | |
2232 | } | |
2233 | ||
2234 | /* Configures STP on 'ofport_' using the settings defined in 's'. The | |
2235 | * caller is responsible for assigning STP port numbers and ensuring | |
2236 | * there are no duplicates. */ | |
2237 | static int | |
2238 | set_stp_port(struct ofport *ofport_, | |
2239 | const struct ofproto_port_stp_settings *s) | |
2240 | { | |
2241 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
2242 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); | |
2243 | struct stp_port *sp = ofport->stp_port; | |
2244 | ||
2245 | if (!s || !s->enable) { | |
2246 | if (sp) { | |
2247 | ofport->stp_port = NULL; | |
2248 | stp_port_disable(sp); | |
ecd12731 | 2249 | update_stp_port_state(ofport); |
21f7563c JP |
2250 | } |
2251 | return 0; | |
2252 | } else if (sp && stp_port_no(sp) != s->port_num | |
2253 | && ofport == stp_port_get_aux(sp)) { | |
2254 | /* The port-id changed, so disable the old one if it's not | |
2255 | * already in use by another port. */ | |
2256 | stp_port_disable(sp); | |
2257 | } | |
2258 | ||
2259 | sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num); | |
2260 | stp_port_enable(sp); | |
2261 | ||
2262 | stp_port_set_aux(sp, ofport); | |
2263 | stp_port_set_priority(sp, s->priority); | |
2264 | stp_port_set_path_cost(sp, s->path_cost); | |
2265 | ||
2266 | update_stp_port_state(ofport); | |
2267 | ||
2268 | return 0; | |
2269 | } | |
2270 | ||
2271 | static int | |
2272 | get_stp_port_status(struct ofport *ofport_, | |
2273 | struct ofproto_port_stp_status *s) | |
2274 | { | |
2275 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
2276 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); | |
2277 | struct stp_port *sp = ofport->stp_port; | |
2278 | ||
2279 | if (!ofproto->stp || !sp) { | |
2280 | s->enabled = false; | |
2281 | return 0; | |
2282 | } | |
2283 | ||
2284 | s->enabled = true; | |
2285 | s->port_id = stp_port_get_id(sp); | |
2286 | s->state = stp_port_get_state(sp); | |
2287 | s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000; | |
2288 | s->role = stp_port_get_role(sp); | |
80740385 | 2289 | stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count); |
21f7563c JP |
2290 | |
2291 | return 0; | |
2292 | } | |
2293 | ||
2294 | static void | |
2295 | stp_run(struct ofproto_dpif *ofproto) | |
2296 | { | |
2297 | if (ofproto->stp) { | |
2298 | long long int now = time_msec(); | |
2299 | long long int elapsed = now - ofproto->stp_last_tick; | |
2300 | struct stp_port *sp; | |
2301 | ||
2302 | if (elapsed > 0) { | |
2303 | stp_tick(ofproto->stp, MIN(INT_MAX, elapsed)); | |
2304 | ofproto->stp_last_tick = now; | |
2305 | } | |
2306 | while (stp_get_changed_port(ofproto->stp, &sp)) { | |
2307 | struct ofport_dpif *ofport = stp_port_get_aux(sp); | |
2308 | ||
2309 | if (ofport) { | |
2310 | update_stp_port_state(ofport); | |
2311 | } | |
2312 | } | |
6ae50723 EJ |
2313 | |
2314 | if (stp_check_and_reset_fdb_flush(ofproto->stp)) { | |
2cc3c58e | 2315 | mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); |
6ae50723 | 2316 | } |
21f7563c JP |
2317 | } |
2318 | } | |
2319 | ||
2320 | static void | |
2321 | stp_wait(struct ofproto_dpif *ofproto) | |
2322 | { | |
2323 | if (ofproto->stp) { | |
2324 | poll_timer_wait(1000); | |
2325 | } | |
2326 | } | |
2327 | ||
2328 | /* Returns true if STP should process 'flow'. */ | |
2329 | static bool | |
2330 | stp_should_process_flow(const struct flow *flow) | |
2331 | { | |
2332 | return eth_addr_equals(flow->dl_dst, eth_addr_stp); | |
2333 | } | |
2334 | ||
2335 | static void | |
2336 | stp_process_packet(const struct ofport_dpif *ofport, | |
2337 | const struct ofpbuf *packet) | |
2338 | { | |
2339 | struct ofpbuf payload = *packet; | |
2340 | struct eth_header *eth = payload.data; | |
2341 | struct stp_port *sp = ofport->stp_port; | |
2342 | ||
2343 | /* Sink packets on ports that have STP disabled when the bridge has | |
2344 | * STP enabled. */ | |
2345 | if (!sp || stp_port_get_state(sp) == STP_DISABLED) { | |
2346 | return; | |
2347 | } | |
2348 | ||
2349 | /* Trim off padding on payload. */ | |
c573540b BP |
2350 | if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) { |
2351 | payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN; | |
21f7563c JP |
2352 | } |
2353 | ||
2354 | if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) { | |
2355 | stp_received_bpdu(sp, payload.data, payload.size); | |
2356 | } | |
2357 | } | |
2358 | \f | |
8b36f51e EJ |
2359 | static struct priority_to_dscp * |
2360 | get_priority(const struct ofport_dpif *ofport, uint32_t priority) | |
2361 | { | |
2362 | struct priority_to_dscp *pdscp; | |
2363 | uint32_t hash; | |
2364 | ||
2365 | hash = hash_int(priority, 0); | |
2366 | HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &ofport->priorities) { | |
2367 | if (pdscp->priority == priority) { | |
2368 | return pdscp; | |
2369 | } | |
2370 | } | |
2371 | return NULL; | |
2372 | } | |
2373 | ||
2374 | static void | |
2375 | ofport_clear_priorities(struct ofport_dpif *ofport) | |
2376 | { | |
2377 | struct priority_to_dscp *pdscp, *next; | |
2378 | ||
2379 | HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &ofport->priorities) { | |
2380 | hmap_remove(&ofport->priorities, &pdscp->hmap_node); | |
2381 | free(pdscp); | |
2382 | } | |
2383 | } | |
2384 | ||
2385 | static int | |
2386 | set_queues(struct ofport *ofport_, | |
2387 | const struct ofproto_port_queue *qdscp_list, | |
2388 | size_t n_qdscp) | |
2389 | { | |
2390 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
2391 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); | |
2392 | struct hmap new = HMAP_INITIALIZER(&new); | |
2393 | size_t i; | |
2394 | ||
2395 | for (i = 0; i < n_qdscp; i++) { | |
2396 | struct priority_to_dscp *pdscp; | |
2397 | uint32_t priority; | |
2398 | uint8_t dscp; | |
2399 | ||
2400 | dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK; | |
acf60855 | 2401 | if (dpif_queue_to_priority(ofproto->backer->dpif, qdscp_list[i].queue, |
8b36f51e EJ |
2402 | &priority)) { |
2403 | continue; | |
2404 | } | |
2405 | ||
2406 | pdscp = get_priority(ofport, priority); | |
2407 | if (pdscp) { | |
2408 | hmap_remove(&ofport->priorities, &pdscp->hmap_node); | |
2409 | } else { | |
2410 | pdscp = xmalloc(sizeof *pdscp); | |
2411 | pdscp->priority = priority; | |
2412 | pdscp->dscp = dscp; | |
2cc3c58e | 2413 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
8b36f51e EJ |
2414 | } |
2415 | ||
2416 | if (pdscp->dscp != dscp) { | |
2417 | pdscp->dscp = dscp; | |
2cc3c58e | 2418 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
8b36f51e EJ |
2419 | } |
2420 | ||
2421 | hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0)); | |
2422 | } | |
2423 | ||
2424 | if (!hmap_is_empty(&ofport->priorities)) { | |
2425 | ofport_clear_priorities(ofport); | |
2cc3c58e | 2426 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
8b36f51e EJ |
2427 | } |
2428 | ||
2429 | hmap_swap(&new, &ofport->priorities); | |
2430 | hmap_destroy(&new); | |
2431 | ||
2432 | return 0; | |
2433 | } | |
2434 | \f | |
abe529af BP |
2435 | /* Bundles. */ |
2436 | ||
b44a10b7 BP |
2437 | /* Expires all MAC learning entries associated with 'bundle' and forces its |
2438 | * ofproto to revalidate every flow. | |
2439 | * | |
2440 | * Normally MAC learning entries are removed only from the ofproto associated | |
2441 | * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries | |
2442 | * are removed from every ofproto. When patch ports and SLB bonds are in use | |
2443 | * and a VM migration happens and the gratuitous ARPs are somehow lost, this | |
2444 | * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate | |
2445 | * with the host from which it migrated. */ | |
abe529af | 2446 | static void |
b44a10b7 | 2447 | bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos) |
abe529af BP |
2448 | { |
2449 | struct ofproto_dpif *ofproto = bundle->ofproto; | |
2450 | struct mac_learning *ml = ofproto->ml; | |
2451 | struct mac_entry *mac, *next_mac; | |
2452 | ||
2cc3c58e | 2453 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af BP |
2454 | LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) { |
2455 | if (mac->port.p == bundle) { | |
b44a10b7 BP |
2456 | if (all_ofprotos) { |
2457 | struct ofproto_dpif *o; | |
2458 | ||
2459 | HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
2460 | if (o != ofproto) { | |
2461 | struct mac_entry *e; | |
2462 | ||
2463 | e = mac_learning_lookup(o->ml, mac->mac, mac->vlan, | |
2464 | NULL); | |
2465 | if (e) { | |
b44a10b7 BP |
2466 | mac_learning_expire(o->ml, e); |
2467 | } | |
2468 | } | |
2469 | } | |
2470 | } | |
2471 | ||
abe529af BP |
2472 | mac_learning_expire(ml, mac); |
2473 | } | |
2474 | } | |
2475 | } | |
2476 | ||
2477 | static struct ofbundle * | |
2478 | bundle_lookup(const struct ofproto_dpif *ofproto, void *aux) | |
2479 | { | |
2480 | struct ofbundle *bundle; | |
2481 | ||
2482 | HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0), | |
2483 | &ofproto->bundles) { | |
2484 | if (bundle->aux == aux) { | |
2485 | return bundle; | |
2486 | } | |
2487 | } | |
2488 | return NULL; | |
2489 | } | |
2490 | ||
2491 | /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the | |
2492 | * ones that are found to 'bundles'. */ | |
2493 | static void | |
2494 | bundle_lookup_multiple(struct ofproto_dpif *ofproto, | |
2495 | void **auxes, size_t n_auxes, | |
2496 | struct hmapx *bundles) | |
2497 | { | |
2498 | size_t i; | |
2499 | ||
2500 | hmapx_init(bundles); | |
2501 | for (i = 0; i < n_auxes; i++) { | |
2502 | struct ofbundle *bundle = bundle_lookup(ofproto, auxes[i]); | |
2503 | if (bundle) { | |
2504 | hmapx_add(bundles, bundle); | |
2505 | } | |
2506 | } | |
2507 | } | |
2508 | ||
7bde8dd8 JP |
2509 | static void |
2510 | bundle_update(struct ofbundle *bundle) | |
2511 | { | |
2512 | struct ofport_dpif *port; | |
2513 | ||
2514 | bundle->floodable = true; | |
2515 | LIST_FOR_EACH (port, bundle_node, &bundle->ports) { | |
9e1fd49b BP |
2516 | if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD |
2517 | || !stp_forward_in_state(port->stp_state)) { | |
7bde8dd8 JP |
2518 | bundle->floodable = false; |
2519 | break; | |
2520 | } | |
2521 | } | |
2522 | } | |
2523 | ||
abe529af BP |
2524 | static void |
2525 | bundle_del_port(struct ofport_dpif *port) | |
2526 | { | |
2527 | struct ofbundle *bundle = port->bundle; | |
2528 | ||
2cc3c58e | 2529 | bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE; |
6f77f4ae | 2530 | |
abe529af BP |
2531 | list_remove(&port->bundle_node); |
2532 | port->bundle = NULL; | |
2533 | ||
2534 | if (bundle->lacp) { | |
2535 | lacp_slave_unregister(bundle->lacp, port); | |
2536 | } | |
2537 | if (bundle->bond) { | |
2538 | bond_slave_unregister(bundle->bond, port); | |
2539 | } | |
2540 | ||
7bde8dd8 | 2541 | bundle_update(bundle); |
abe529af BP |
2542 | } |
2543 | ||
2544 | static bool | |
213b00cf | 2545 | bundle_add_port(struct ofbundle *bundle, uint16_t ofp_port, |
df53d41c | 2546 | struct lacp_slave_settings *lacp) |
abe529af BP |
2547 | { |
2548 | struct ofport_dpif *port; | |
2549 | ||
2550 | port = get_ofp_port(bundle->ofproto, ofp_port); | |
2551 | if (!port) { | |
2552 | return false; | |
2553 | } | |
2554 | ||
2555 | if (port->bundle != bundle) { | |
2cc3c58e | 2556 | bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af BP |
2557 | if (port->bundle) { |
2558 | bundle_del_port(port); | |
2559 | } | |
2560 | ||
2561 | port->bundle = bundle; | |
2562 | list_push_back(&bundle->ports, &port->bundle_node); | |
9e1fd49b BP |
2563 | if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD |
2564 | || !stp_forward_in_state(port->stp_state)) { | |
abe529af BP |
2565 | bundle->floodable = false; |
2566 | } | |
2567 | } | |
2568 | if (lacp) { | |
2cc3c58e | 2569 | bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af BP |
2570 | lacp_slave_register(bundle->lacp, port, lacp); |
2571 | } | |
2572 | ||
2573 | return true; | |
2574 | } | |
2575 | ||
2576 | static void | |
2577 | bundle_destroy(struct ofbundle *bundle) | |
2578 | { | |
2579 | struct ofproto_dpif *ofproto; | |
2580 | struct ofport_dpif *port, *next_port; | |
2581 | int i; | |
2582 | ||
2583 | if (!bundle) { | |
2584 | return; | |
2585 | } | |
2586 | ||
2587 | ofproto = bundle->ofproto; | |
2588 | for (i = 0; i < MAX_MIRRORS; i++) { | |
2589 | struct ofmirror *m = ofproto->mirrors[i]; | |
2590 | if (m) { | |
2591 | if (m->out == bundle) { | |
2592 | mirror_destroy(m); | |
2593 | } else if (hmapx_find_and_delete(&m->srcs, bundle) | |
2594 | || hmapx_find_and_delete(&m->dsts, bundle)) { | |
2cc3c58e | 2595 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af BP |
2596 | } |
2597 | } | |
2598 | } | |
2599 | ||
2600 | LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) { | |
2601 | bundle_del_port(port); | |
2602 | } | |
2603 | ||
b44a10b7 | 2604 | bundle_flush_macs(bundle, true); |
abe529af BP |
2605 | hmap_remove(&ofproto->bundles, &bundle->hmap_node); |
2606 | free(bundle->name); | |
2607 | free(bundle->trunks); | |
2608 | lacp_destroy(bundle->lacp); | |
2609 | bond_destroy(bundle->bond); | |
2610 | free(bundle); | |
2611 | } | |
2612 | ||
2613 | static int | |
2614 | bundle_set(struct ofproto *ofproto_, void *aux, | |
2615 | const struct ofproto_bundle_settings *s) | |
2616 | { | |
2617 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
2618 | bool need_flush = false; | |
abe529af BP |
2619 | struct ofport_dpif *port; |
2620 | struct ofbundle *bundle; | |
ecac4ebf BP |
2621 | unsigned long *trunks; |
2622 | int vlan; | |
abe529af BP |
2623 | size_t i; |
2624 | bool ok; | |
2625 | ||
2626 | if (!s) { | |
2627 | bundle_destroy(bundle_lookup(ofproto, aux)); | |
2628 | return 0; | |
2629 | } | |
2630 | ||
cb22974d BP |
2631 | ovs_assert(s->n_slaves == 1 || s->bond != NULL); |
2632 | ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL)); | |
abe529af BP |
2633 | |
2634 | bundle = bundle_lookup(ofproto, aux); | |
2635 | if (!bundle) { | |
2636 | bundle = xmalloc(sizeof *bundle); | |
2637 | ||
2638 | bundle->ofproto = ofproto; | |
2639 | hmap_insert(&ofproto->bundles, &bundle->hmap_node, | |
2640 | hash_pointer(aux, 0)); | |
2641 | bundle->aux = aux; | |
2642 | bundle->name = NULL; | |
2643 | ||
2644 | list_init(&bundle->ports); | |
ecac4ebf | 2645 | bundle->vlan_mode = PORT_VLAN_TRUNK; |
abe529af BP |
2646 | bundle->vlan = -1; |
2647 | bundle->trunks = NULL; | |
5e9ceccd | 2648 | bundle->use_priority_tags = s->use_priority_tags; |
abe529af BP |
2649 | bundle->lacp = NULL; |
2650 | bundle->bond = NULL; | |
2651 | ||
2652 | bundle->floodable = true; | |
2653 | ||
2654 | bundle->src_mirrors = 0; | |
2655 | bundle->dst_mirrors = 0; | |
2656 | bundle->mirror_out = 0; | |
2657 | } | |
2658 | ||
2659 | if (!bundle->name || strcmp(s->name, bundle->name)) { | |
2660 | free(bundle->name); | |
2661 | bundle->name = xstrdup(s->name); | |
2662 | } | |
2663 | ||
2664 | /* LACP. */ | |
2665 | if (s->lacp) { | |
2666 | if (!bundle->lacp) { | |
2cc3c58e | 2667 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af BP |
2668 | bundle->lacp = lacp_create(); |
2669 | } | |
2670 | lacp_configure(bundle->lacp, s->lacp); | |
2671 | } else { | |
2672 | lacp_destroy(bundle->lacp); | |
2673 | bundle->lacp = NULL; | |
2674 | } | |
2675 | ||
2676 | /* Update set of ports. */ | |
2677 | ok = true; | |
2678 | for (i = 0; i < s->n_slaves; i++) { | |
2679 | if (!bundle_add_port(bundle, s->slaves[i], | |
df53d41c | 2680 | s->lacp ? &s->lacp_slaves[i] : NULL)) { |
abe529af BP |
2681 | ok = false; |
2682 | } | |
2683 | } | |
2684 | if (!ok || list_size(&bundle->ports) != s->n_slaves) { | |
2685 | struct ofport_dpif *next_port; | |
2686 | ||
2687 | LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) { | |
2688 | for (i = 0; i < s->n_slaves; i++) { | |
56c769ab | 2689 | if (s->slaves[i] == port->up.ofp_port) { |
abe529af BP |
2690 | goto found; |
2691 | } | |
2692 | } | |
2693 | ||
2694 | bundle_del_port(port); | |
2695 | found: ; | |
2696 | } | |
2697 | } | |
cb22974d | 2698 | ovs_assert(list_size(&bundle->ports) <= s->n_slaves); |
abe529af BP |
2699 | |
2700 | if (list_is_empty(&bundle->ports)) { | |
2701 | bundle_destroy(bundle); | |
2702 | return EINVAL; | |
2703 | } | |
2704 | ||
ecac4ebf | 2705 | /* Set VLAN tagging mode */ |
5e9ceccd BP |
2706 | if (s->vlan_mode != bundle->vlan_mode |
2707 | || s->use_priority_tags != bundle->use_priority_tags) { | |
ecac4ebf | 2708 | bundle->vlan_mode = s->vlan_mode; |
5e9ceccd | 2709 | bundle->use_priority_tags = s->use_priority_tags; |
ecac4ebf BP |
2710 | need_flush = true; |
2711 | } | |
2712 | ||
abe529af | 2713 | /* Set VLAN tag. */ |
ecac4ebf BP |
2714 | vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1 |
2715 | : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan | |
2716 | : 0); | |
2717 | if (vlan != bundle->vlan) { | |
2718 | bundle->vlan = vlan; | |
abe529af BP |
2719 | need_flush = true; |
2720 | } | |
2721 | ||
2722 | /* Get trunked VLANs. */ | |
ecac4ebf BP |
2723 | switch (s->vlan_mode) { |
2724 | case PORT_VLAN_ACCESS: | |
2725 | trunks = NULL; | |
2726 | break; | |
2727 | ||
2728 | case PORT_VLAN_TRUNK: | |
ebc56baa | 2729 | trunks = CONST_CAST(unsigned long *, s->trunks); |
ecac4ebf BP |
2730 | break; |
2731 | ||
2732 | case PORT_VLAN_NATIVE_UNTAGGED: | |
2733 | case PORT_VLAN_NATIVE_TAGGED: | |
2734 | if (vlan != 0 && (!s->trunks | |
2735 | || !bitmap_is_set(s->trunks, vlan) | |
2736 | || bitmap_is_set(s->trunks, 0))) { | |
2737 | /* Force trunking the native VLAN and prohibit trunking VLAN 0. */ | |
2738 | if (s->trunks) { | |
2739 | trunks = bitmap_clone(s->trunks, 4096); | |
2740 | } else { | |
2741 | trunks = bitmap_allocate1(4096); | |
2742 | } | |
2743 | bitmap_set1(trunks, vlan); | |
2744 | bitmap_set0(trunks, 0); | |
2745 | } else { | |
ebc56baa | 2746 | trunks = CONST_CAST(unsigned long *, s->trunks); |
ecac4ebf BP |
2747 | } |
2748 | break; | |
2749 | ||
2750 | default: | |
2751 | NOT_REACHED(); | |
2752 | } | |
abe529af BP |
2753 | if (!vlan_bitmap_equal(trunks, bundle->trunks)) { |
2754 | free(bundle->trunks); | |
ecac4ebf BP |
2755 | if (trunks == s->trunks) { |
2756 | bundle->trunks = vlan_bitmap_clone(trunks); | |
2757 | } else { | |
2758 | bundle->trunks = trunks; | |
2759 | trunks = NULL; | |
2760 | } | |
abe529af BP |
2761 | need_flush = true; |
2762 | } | |
ecac4ebf BP |
2763 | if (trunks != s->trunks) { |
2764 | free(trunks); | |
2765 | } | |
abe529af BP |
2766 | |
2767 | /* Bonding. */ | |
2768 | if (!list_is_short(&bundle->ports)) { | |
2769 | bundle->ofproto->has_bonded_bundles = true; | |
2770 | if (bundle->bond) { | |
2771 | if (bond_reconfigure(bundle->bond, s->bond)) { | |
2cc3c58e | 2772 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af BP |
2773 | } |
2774 | } else { | |
2775 | bundle->bond = bond_create(s->bond); | |
2cc3c58e | 2776 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
abe529af BP |
2777 | } |
2778 | ||
2779 | LIST_FOR_EACH (port, bundle_node, &bundle->ports) { | |
df53d41c | 2780 | bond_slave_register(bundle->bond, port, port->up.netdev); |
abe529af BP |
2781 | } |
2782 | } else { | |
2783 | bond_destroy(bundle->bond); | |
2784 | bundle->bond = NULL; | |
2785 | } | |
2786 | ||
2787 | /* If we changed something that would affect MAC learning, un-learn | |
2788 | * everything on this port and force flow revalidation. */ | |
2789 | if (need_flush) { | |
b44a10b7 | 2790 | bundle_flush_macs(bundle, false); |
abe529af BP |
2791 | } |
2792 | ||
2793 | return 0; | |
2794 | } | |
2795 | ||
2796 | static void | |
2797 | bundle_remove(struct ofport *port_) | |
2798 | { | |
2799 | struct ofport_dpif *port = ofport_dpif_cast(port_); | |
2800 | struct ofbundle *bundle = port->bundle; | |
2801 | ||
2802 | if (bundle) { | |
2803 | bundle_del_port(port); | |
2804 | if (list_is_empty(&bundle->ports)) { | |
2805 | bundle_destroy(bundle); | |
2806 | } else if (list_is_short(&bundle->ports)) { | |
2807 | bond_destroy(bundle->bond); | |
2808 | bundle->bond = NULL; | |
2809 | } | |
2810 | } | |
2811 | } | |
2812 | ||
2813 | static void | |
5f877369 | 2814 | send_pdu_cb(void *port_, const void *pdu, size_t pdu_size) |
abe529af BP |
2815 | { |
2816 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10); | |
2817 | struct ofport_dpif *port = port_; | |
2818 | uint8_t ea[ETH_ADDR_LEN]; | |
2819 | int error; | |
2820 | ||
2821 | error = netdev_get_etheraddr(port->up.netdev, ea); | |
2822 | if (!error) { | |
abe529af | 2823 | struct ofpbuf packet; |
5f877369 | 2824 | void *packet_pdu; |
abe529af BP |
2825 | |
2826 | ofpbuf_init(&packet, 0); | |
2827 | packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP, | |
5f877369 EJ |
2828 | pdu_size); |
2829 | memcpy(packet_pdu, pdu, pdu_size); | |
2830 | ||
97d6520b | 2831 | send_packet(port, &packet); |
abe529af BP |
2832 | ofpbuf_uninit(&packet); |
2833 | } else { | |
2834 | VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface " | |
2835 | "%s (%s)", port->bundle->name, | |
2836 | netdev_get_name(port->up.netdev), strerror(error)); | |
2837 | } | |
2838 | } | |
2839 | ||
2840 | static void | |
2841 | bundle_send_learning_packets(struct ofbundle *bundle) | |
2842 | { | |
2843 | struct ofproto_dpif *ofproto = bundle->ofproto; | |
2844 | int error, n_packets, n_errors; | |
2845 | struct mac_entry *e; | |
2846 | ||
2847 | error = n_packets = n_errors = 0; | |
2848 | LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) { | |
2849 | if (e->port.p != bundle) { | |
ea131871 JG |
2850 | struct ofpbuf *learning_packet; |
2851 | struct ofport_dpif *port; | |
4dd1e3ca | 2852 | void *port_void; |
ea131871 JG |
2853 | int ret; |
2854 | ||
4dd1e3ca BP |
2855 | /* The assignment to "port" is unnecessary but makes "grep"ing for |
2856 | * struct ofport_dpif more effective. */ | |
2857 | learning_packet = bond_compose_learning_packet(bundle->bond, | |
2858 | e->mac, e->vlan, | |
2859 | &port_void); | |
2860 | port = port_void; | |
97d6520b | 2861 | ret = send_packet(port, learning_packet); |
ea131871 | 2862 | ofpbuf_delete(learning_packet); |
abe529af BP |
2863 | if (ret) { |
2864 | error = ret; | |
2865 | n_errors++; | |
2866 | } | |
2867 | n_packets++; | |
2868 | } | |
2869 | } | |
2870 | ||
2871 | if (n_errors) { | |
2872 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
2873 | VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning " | |
2874 | "packets, last error was: %s", | |
2875 | bundle->name, n_errors, n_packets, strerror(error)); | |
2876 | } else { | |
2877 | VLOG_DBG("bond %s: sent %d gratuitous learning packets", | |
2878 | bundle->name, n_packets); | |
2879 | } | |
2880 | } | |
2881 | ||
2882 | static void | |
2883 | bundle_run(struct ofbundle *bundle) | |
2884 | { | |
2885 | if (bundle->lacp) { | |
2886 | lacp_run(bundle->lacp, send_pdu_cb); | |
2887 | } | |
2888 | if (bundle->bond) { | |
2889 | struct ofport_dpif *port; | |
2890 | ||
2891 | LIST_FOR_EACH (port, bundle_node, &bundle->ports) { | |
015e08bc | 2892 | bond_slave_set_may_enable(bundle->bond, port, port->may_enable); |
abe529af BP |
2893 | } |
2894 | ||
2cc3c58e | 2895 | bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set, |
bdebeece | 2896 | lacp_status(bundle->lacp)); |
abe529af BP |
2897 | if (bond_should_send_learning_packets(bundle->bond)) { |
2898 | bundle_send_learning_packets(bundle); | |
2899 | } | |
2900 | } | |
2901 | } | |
2902 | ||
2903 | static void | |
2904 | bundle_wait(struct ofbundle *bundle) | |
2905 | { | |
2906 | if (bundle->lacp) { | |
2907 | lacp_wait(bundle->lacp); | |
2908 | } | |
2909 | if (bundle->bond) { | |
2910 | bond_wait(bundle->bond); | |
2911 | } | |
2912 | } | |
2913 | \f | |
2914 | /* Mirrors. */ | |
2915 | ||
2916 | static int | |
2917 | mirror_scan(struct ofproto_dpif *ofproto) | |
2918 | { | |
2919 | int idx; | |
2920 | ||
2921 | for (idx = 0; idx < MAX_MIRRORS; idx++) { | |
2922 | if (!ofproto->mirrors[idx]) { | |
2923 | return idx; | |
2924 | } | |
2925 | } | |
2926 | return -1; | |
2927 | } | |
2928 | ||
2929 | static struct ofmirror * | |
2930 | mirror_lookup(struct ofproto_dpif *ofproto, void *aux) | |
2931 | { | |
2932 | int i; | |
2933 | ||
2934 | for (i = 0; i < MAX_MIRRORS; i++) { | |
2935 | struct ofmirror *mirror = ofproto->mirrors[i]; | |
2936 | if (mirror && mirror->aux == aux) { | |
2937 | return mirror; | |
2938 | } | |
2939 | } | |
2940 | ||
2941 | return NULL; | |
2942 | } | |
2943 | ||
9ba15e2a BP |
2944 | /* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */ |
2945 | static void | |
2946 | mirror_update_dups(struct ofproto_dpif *ofproto) | |
2947 | { | |
2948 | int i; | |
2949 | ||
2950 | for (i = 0; i < MAX_MIRRORS; i++) { | |
2951 | struct ofmirror *m = ofproto->mirrors[i]; | |
2952 | ||
2953 | if (m) { | |
2954 | m->dup_mirrors = MIRROR_MASK_C(1) << i; | |
2955 | } | |
2956 | } | |
2957 | ||
2958 | for (i = 0; i < MAX_MIRRORS; i++) { | |
2959 | struct ofmirror *m1 = ofproto->mirrors[i]; | |
2960 | int j; | |
2961 | ||
2962 | if (!m1) { | |
2963 | continue; | |
2964 | } | |
2965 | ||
2966 | for (j = i + 1; j < MAX_MIRRORS; j++) { | |
2967 | struct ofmirror *m2 = ofproto->mirrors[j]; | |
2968 | ||
edb0540b | 2969 | if (m2 && m1->out == m2->out && m1->out_vlan == m2->out_vlan) { |
9ba15e2a BP |
2970 | m1->dup_mirrors |= MIRROR_MASK_C(1) << j; |
2971 | m2->dup_mirrors |= m1->dup_mirrors; | |
2972 | } | |
2973 | } | |
2974 | } | |
2975 | } | |
2976 | ||
abe529af BP |
2977 | static int |
2978 | mirror_set(struct ofproto *ofproto_, void *aux, | |
2979 | const struct ofproto_mirror_settings *s) | |
2980 | { | |
2981 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
2982 | mirror_mask_t mirror_bit; | |
2983 | struct ofbundle *bundle; | |
2984 | struct ofmirror *mirror; | |
2985 | struct ofbundle *out; | |
2986 | struct hmapx srcs; /* Contains "struct ofbundle *"s. */ | |
2987 | struct hmapx dsts; /* Contains "struct ofbundle *"s. */ | |
2988 | int out_vlan; | |
2989 | ||
2990 | mirror = mirror_lookup(ofproto, aux); | |
2991 | if (!s) { | |
2992 | mirror_destroy(mirror); | |
2993 | return 0; | |
2994 | } | |
2995 | if (!mirror) { | |
2996 | int idx; | |
2997 | ||
2998 | idx = mirror_scan(ofproto); | |
2999 | if (idx < 0) { | |
3000 | VLOG_WARN("bridge %s: maximum of %d port mirrors reached, " | |
3001 | "cannot create %s", | |
3002 | ofproto->up.name, MAX_MIRRORS, s->name); | |
3003 | return EFBIG; | |
3004 | } | |
3005 | ||
3006 | mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror); | |
3007 | mirror->ofproto = ofproto; | |
3008 | mirror->idx = idx; | |
8b28d864 | 3009 | mirror->aux = aux; |
abe529af BP |
3010 | mirror->out_vlan = -1; |
3011 | mirror->name = NULL; | |
3012 | } | |
3013 | ||
3014 | if (!mirror->name || strcmp(s->name, mirror->name)) { | |
3015 | free(mirror->name); | |
3016 | mirror->name = xstrdup(s->name); | |
3017 | } | |
3018 | ||
3019 | /* Get the new configuration. */ | |
3020 | if (s->out_bundle) { | |
3021 | out = bundle_lookup(ofproto, s->out_bundle); | |
3022 | if (!out) { | |
3023 | mirror_destroy(mirror); | |
3024 | return EINVAL; | |
3025 | } | |
3026 | out_vlan = -1; | |
3027 | } else { | |
3028 | out = NULL; | |
3029 | out_vlan = s->out_vlan; | |
3030 | } | |
3031 | bundle_lookup_multiple(ofproto, s->srcs, s->n_srcs, &srcs); | |
3032 | bundle_lookup_multiple(ofproto, s->dsts, s->n_dsts, &dsts); | |
3033 | ||
3034 | /* If the configuration has not changed, do nothing. */ | |
3035 | if (hmapx_equals(&srcs, &mirror->srcs) | |
3036 | && hmapx_equals(&dsts, &mirror->dsts) | |
3037 | && vlan_bitmap_equal(mirror->vlans, s->src_vlans) | |
3038 | && mirror->out == out | |
3039 | && mirror->out_vlan == out_vlan) | |
3040 | { | |
3041 | hmapx_destroy(&srcs); | |
3042 | hmapx_destroy(&dsts); | |
3043 | return 0; | |
3044 | } | |
3045 | ||
3046 | hmapx_swap(&srcs, &mirror->srcs); | |
3047 | hmapx_destroy(&srcs); | |
3048 | ||
3049 | hmapx_swap(&dsts, &mirror->dsts); | |
3050 | hmapx_destroy(&dsts); | |
3051 | ||
3052 | free(mirror->vlans); | |
3053 | mirror->vlans = vlan_bitmap_clone(s->src_vlans); | |
3054 | ||
3055 | mirror->out = out; | |
3056 | mirror->out_vlan = out_vlan; | |
3057 | ||
3058 | /* Update bundles. */ | |
3059 | mirror_bit = MIRROR_MASK_C(1) << mirror->idx; | |
3060 | HMAP_FOR_EACH (bundle, hmap_node, &mirror->ofproto->bundles) { | |
3061 | if (hmapx_contains(&mirror->srcs, bundle)) { | |
3062 | bundle->src_mirrors |= mirror_bit; | |
3063 | } else { | |
3064 | bundle->src_mirrors &= ~mirror_bit; | |
3065 | } | |
3066 | ||
3067 | if (hmapx_contains(&mirror->dsts, bundle)) { | |
3068 | bundle->dst_mirrors |= mirror_bit; | |
3069 | } else { | |
3070 | bundle->dst_mirrors &= ~mirror_bit; | |
3071 | } | |
3072 | ||
3073 | if (mirror->out == bundle) { | |
3074 | bundle->mirror_out |= mirror_bit; | |
3075 | } else { | |
3076 | bundle->mirror_out &= ~mirror_bit; | |
3077 | } | |
3078 | } | |
3079 | ||
2cc3c58e | 3080 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
ccb7c863 | 3081 | ofproto->has_mirrors = true; |
2cc3c58e EJ |
3082 | mac_learning_flush(ofproto->ml, |
3083 | &ofproto->backer->revalidate_set); | |
9ba15e2a | 3084 | mirror_update_dups(ofproto); |
abe529af BP |
3085 | |
3086 | return 0; | |
3087 | } | |
3088 | ||
3089 | static void | |
3090 | mirror_destroy(struct ofmirror *mirror) | |
3091 | { | |
3092 | struct ofproto_dpif *ofproto; | |
3093 | mirror_mask_t mirror_bit; | |
3094 | struct ofbundle *bundle; | |
ccb7c863 | 3095 | int i; |
abe529af BP |
3096 | |
3097 | if (!mirror) { | |
3098 | return; | |
3099 | } | |
3100 | ||
3101 | ofproto = mirror->ofproto; | |
2cc3c58e EJ |
3102 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
3103 | mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); | |
abe529af BP |
3104 | |
3105 | mirror_bit = MIRROR_MASK_C(1) << mirror->idx; | |
3106 | HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { | |
3107 | bundle->src_mirrors &= ~mirror_bit; | |
3108 | bundle->dst_mirrors &= ~mirror_bit; | |
3109 | bundle->mirror_out &= ~mirror_bit; | |
3110 | } | |
3111 | ||
3112 | hmapx_destroy(&mirror->srcs); | |
3113 | hmapx_destroy(&mirror->dsts); | |
3114 | free(mirror->vlans); | |
3115 | ||
3116 | ofproto->mirrors[mirror->idx] = NULL; | |
3117 | free(mirror->name); | |
3118 | free(mirror); | |
9ba15e2a BP |
3119 | |
3120 | mirror_update_dups(ofproto); | |
ccb7c863 BP |
3121 | |
3122 | ofproto->has_mirrors = false; | |
3123 | for (i = 0; i < MAX_MIRRORS; i++) { | |
3124 | if (ofproto->mirrors[i]) { | |
3125 | ofproto->has_mirrors = true; | |
3126 | break; | |
3127 | } | |
3128 | } | |
abe529af BP |
3129 | } |
3130 | ||
9d24de3b JP |
3131 | static int |
3132 | mirror_get_stats(struct ofproto *ofproto_, void *aux, | |
3133 | uint64_t *packets, uint64_t *bytes) | |
3134 | { | |
3135 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
3136 | struct ofmirror *mirror = mirror_lookup(ofproto, aux); | |
3137 | ||
3138 | if (!mirror) { | |
3139 | *packets = *bytes = UINT64_MAX; | |
3140 | return 0; | |
3141 | } | |
3142 | ||
8844e035 EJ |
3143 | push_all_stats(); |
3144 | ||
9d24de3b JP |
3145 | *packets = mirror->packet_count; |
3146 | *bytes = mirror->byte_count; | |
3147 | ||
3148 | return 0; | |
3149 | } | |
3150 | ||
abe529af BP |
3151 | static int |
3152 | set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans) | |
3153 | { | |
3154 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
3155 | if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) { | |
2cc3c58e | 3156 | mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); |
abe529af BP |
3157 | } |
3158 | return 0; | |
3159 | } | |
3160 | ||
3161 | static bool | |
b4affc74 | 3162 | is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux) |
abe529af BP |
3163 | { |
3164 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
3165 | struct ofbundle *bundle = bundle_lookup(ofproto, aux); | |
3166 | return bundle && bundle->mirror_out != 0; | |
3167 | } | |
8402c74b SS |
3168 | |
3169 | static void | |
b53055f4 | 3170 | forward_bpdu_changed(struct ofproto *ofproto_) |
8402c74b SS |
3171 | { |
3172 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
2cc3c58e | 3173 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
8402c74b | 3174 | } |
e764773c BP |
3175 | |
3176 | static void | |
c4069512 BP |
3177 | set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time, |
3178 | size_t max_entries) | |
e764773c BP |
3179 | { |
3180 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
3181 | mac_learning_set_idle_time(ofproto->ml, idle_time); | |
c4069512 | 3182 | mac_learning_set_max_entries(ofproto->ml, max_entries); |
e764773c | 3183 | } |
abe529af BP |
3184 | \f |
3185 | /* Ports. */ | |
3186 | ||
3187 | static struct ofport_dpif * | |
4acbc98d | 3188 | get_ofp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port) |
abe529af | 3189 | { |
7df6a8bd BP |
3190 | struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port); |
3191 | return ofport ? ofport_dpif_cast(ofport) : NULL; | |
abe529af BP |
3192 | } |
3193 | ||
3194 | static struct ofport_dpif * | |
4acbc98d | 3195 | get_odp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port) |
abe529af | 3196 | { |
7c33b188 JR |
3197 | struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port); |
3198 | return port && &ofproto->up == port->up.ofproto ? port : NULL; | |
abe529af BP |
3199 | } |
3200 | ||
3201 | static void | |
e1b1d06a JP |
3202 | ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto, |
3203 | struct ofproto_port *ofproto_port, | |
abe529af BP |
3204 | struct dpif_port *dpif_port) |
3205 | { | |
3206 | ofproto_port->name = dpif_port->name; | |
3207 | ofproto_port->type = dpif_port->type; | |
e1b1d06a | 3208 | ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no); |
abe529af BP |
3209 | } |
3210 | ||
0a740f48 EJ |
3211 | static struct ofport_dpif * |
3212 | ofport_get_peer(const struct ofport_dpif *ofport_dpif) | |
3213 | { | |
3214 | const struct ofproto_dpif *ofproto; | |
3215 | const char *peer; | |
3216 | ||
3217 | peer = netdev_vport_patch_peer(ofport_dpif->up.netdev); | |
3218 | if (!peer) { | |
3219 | return NULL; | |
3220 | } | |
3221 | ||
3222 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
3223 | struct ofport *ofport; | |
3224 | ||
3225 | ofport = shash_find_data(&ofproto->up.port_by_name, peer); | |
3226 | if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) { | |
3227 | return ofport_dpif_cast(ofport); | |
3228 | } | |
3229 | } | |
3230 | return NULL; | |
3231 | } | |
3232 | ||
0aa66d6e EJ |
3233 | static void |
3234 | port_run_fast(struct ofport_dpif *ofport) | |
3235 | { | |
3236 | if (ofport->cfm && cfm_should_send_ccm(ofport->cfm)) { | |
3237 | struct ofpbuf packet; | |
3238 | ||
3239 | ofpbuf_init(&packet, 0); | |
3240 | cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr); | |
3241 | send_packet(ofport, &packet); | |
3242 | ofpbuf_uninit(&packet); | |
3243 | } | |
ccc09689 EJ |
3244 | |
3245 | if (ofport->bfd && bfd_should_send_packet(ofport->bfd)) { | |
3246 | struct ofpbuf packet; | |
3247 | ||
3248 | ofpbuf_init(&packet, 0); | |
3249 | bfd_put_packet(ofport->bfd, &packet, ofport->up.pp.hw_addr); | |
3250 | send_packet(ofport, &packet); | |
3251 | ofpbuf_uninit(&packet); | |
3252 | } | |
0aa66d6e EJ |
3253 | } |
3254 | ||
abe529af BP |
3255 | static void |
3256 | port_run(struct ofport_dpif *ofport) | |
3257 | { | |
3e5b3fdb EJ |
3258 | long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev); |
3259 | bool carrier_changed = carrier_seq != ofport->carrier_seq; | |
015e08bc EJ |
3260 | bool enable = netdev_get_carrier(ofport->up.netdev); |
3261 | ||
3e5b3fdb EJ |
3262 | ofport->carrier_seq = carrier_seq; |
3263 | ||
0aa66d6e | 3264 | port_run_fast(ofport); |
b9ad7294 EJ |
3265 | |
3266 | if (ofport->tnl_port | |
3267 | && tnl_port_reconfigure(&ofport->up, ofport->odp_port, | |
3268 | &ofport->tnl_port)) { | |
3269 | ofproto_dpif_cast(ofport->up.ofproto)->backer->need_revalidate = true; | |
3270 | } | |
3271 | ||
abe529af | 3272 | if (ofport->cfm) { |
4653c558 EJ |
3273 | int cfm_opup = cfm_get_opup(ofport->cfm); |
3274 | ||
abe529af | 3275 | cfm_run(ofport->cfm); |
4653c558 EJ |
3276 | enable = enable && !cfm_get_fault(ofport->cfm); |
3277 | ||
3278 | if (cfm_opup >= 0) { | |
3279 | enable = enable && cfm_opup; | |
3280 | } | |
abe529af | 3281 | } |
015e08bc | 3282 | |
ccc09689 EJ |
3283 | if (ofport->bfd) { |
3284 | bfd_run(ofport->bfd); | |
3285 | enable = enable && bfd_forwarding(ofport->bfd); | |
3286 | } | |
3287 | ||
015e08bc EJ |
3288 | if (ofport->bundle) { |
3289 | enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport); | |
3e5b3fdb EJ |
3290 | if (carrier_changed) { |
3291 | lacp_slave_carrier_changed(ofport->bundle->lacp, ofport); | |
3292 | } | |
015e08bc EJ |
3293 | } |
3294 | ||
daff3353 EJ |
3295 | if (ofport->may_enable != enable) { |
3296 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); | |
3297 | ||
3298 | if (ofproto->has_bundle_action) { | |
2cc3c58e | 3299 | ofproto->backer->need_revalidate = REV_PORT_TOGGLED; |
daff3353 EJ |
3300 | } |
3301 | } | |
3302 | ||
015e08bc | 3303 | ofport->may_enable = enable; |
abe529af BP |
3304 | } |
3305 | ||
3306 | static void | |
3307 | port_wait(struct ofport_dpif *ofport) | |
3308 | { | |
3309 | if (ofport->cfm) { | |
3310 | cfm_wait(ofport->cfm); | |
3311 | } | |
ccc09689 EJ |
3312 | |
3313 | if (ofport->bfd) { | |
3314 | bfd_wait(ofport->bfd); | |
3315 | } | |
abe529af BP |
3316 | } |
3317 | ||
3318 | static int | |
3319 | port_query_by_name(const struct ofproto *ofproto_, const char *devname, | |
3320 | struct ofproto_port *ofproto_port) | |
3321 | { | |
3322 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
3323 | struct dpif_port dpif_port; | |
3324 | int error; | |
3325 | ||
0a740f48 EJ |
3326 | if (sset_contains(&ofproto->ghost_ports, devname)) { |
3327 | const char *type = netdev_get_type_from_name(devname); | |
3328 | ||
3329 | /* We may be called before ofproto->up.port_by_name is populated with | |
3330 | * the appropriate ofport. For this reason, we must get the name and | |
3331 | * type from the netdev layer directly. */ | |
3332 | if (type) { | |
3333 | const struct ofport *ofport; | |
3334 | ||
3335 | ofport = shash_find_data(&ofproto->up.port_by_name, devname); | |
3336 | ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE; | |
3337 | ofproto_port->name = xstrdup(devname); | |
3338 | ofproto_port->type = xstrdup(type); | |
3339 | return 0; | |
3340 | } | |
3341 | return ENODEV; | |
3342 | } | |
3343 | ||
acf60855 JP |
3344 | if (!sset_contains(&ofproto->ports, devname)) { |
3345 | return ENODEV; | |
3346 | } | |
3347 | error = dpif_port_query_by_name(ofproto->backer->dpif, | |
3348 | devname, &dpif_port); | |
abe529af | 3349 | if (!error) { |
e1b1d06a | 3350 | ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port); |
abe529af BP |
3351 | } |
3352 | return error; | |
3353 | } | |
3354 | ||
3355 | static int | |
e1b1d06a | 3356 | port_add(struct ofproto *ofproto_, struct netdev *netdev) |
abe529af BP |
3357 | { |
3358 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
b9ad7294 | 3359 | const char *devname = netdev_get_name(netdev); |
3aa30359 BP |
3360 | char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; |
3361 | const char *dp_port_name; | |
abe529af | 3362 | |
0a740f48 EJ |
3363 | if (netdev_vport_is_patch(netdev)) { |
3364 | sset_add(&ofproto->ghost_ports, netdev_get_name(netdev)); | |
3365 | return 0; | |
3366 | } | |
3367 | ||
3aa30359 | 3368 | dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); |
b9ad7294 | 3369 | if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) { |
7d82ab2e KM |
3370 | uint32_t port_no = UINT32_MAX; |
3371 | int error; | |
3372 | ||
3373 | error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no); | |
b9ad7294 EJ |
3374 | if (error) { |
3375 | return error; | |
3376 | } | |
7d82ab2e KM |
3377 | if (netdev_get_tunnel_config(netdev)) { |
3378 | simap_put(&ofproto->backer->tnl_backers, dp_port_name, port_no); | |
3379 | } | |
acf60855 | 3380 | } |
b9ad7294 EJ |
3381 | |
3382 | if (netdev_get_tunnel_config(netdev)) { | |
3383 | sset_add(&ofproto->ghost_ports, devname); | |
b9ad7294 EJ |
3384 | } else { |
3385 | sset_add(&ofproto->ports, devname); | |
3386 | } | |
3387 | return 0; | |
3388 | } | |
3389 | ||
abe529af BP |
3390 | static int |
3391 | port_del(struct ofproto *ofproto_, uint16_t ofp_port) | |
3392 | { | |
3393 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
b9ad7294 | 3394 | struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port); |
e1b1d06a | 3395 | int error = 0; |
abe529af | 3396 | |
b9ad7294 EJ |
3397 | if (!ofport) { |
3398 | return 0; | |
e1b1d06a | 3399 | } |
b9ad7294 EJ |
3400 | |
3401 | sset_find_and_delete(&ofproto->ghost_ports, | |
3402 | netdev_get_name(ofport->up.netdev)); | |
a614d823 KM |
3403 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
3404 | if (!ofport->tnl_port) { | |
b9ad7294 EJ |
3405 | error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port); |
3406 | if (!error) { | |
abe529af BP |
3407 | /* The caller is going to close ofport->up.netdev. If this is a |
3408 | * bonded port, then the bond is using that netdev, so remove it | |
3409 | * from the bond. The client will need to reconfigure everything | |
3410 | * after deleting ports, so then the slave will get re-added. */ | |
3411 | bundle_remove(&ofport->up); | |
3412 | } | |
3413 | } | |
3414 | return error; | |
3415 | } | |
3416 | ||
6527c598 PS |
3417 | static int |
3418 | port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats) | |
3419 | { | |
3420 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
3421 | int error; | |
3422 | ||
8844e035 EJ |
3423 | push_all_stats(); |
3424 | ||
6527c598 PS |
3425 | error = netdev_get_stats(ofport->up.netdev, stats); |
3426 | ||
ee382d89 | 3427 | if (!error && ofport_->ofp_port == OFPP_LOCAL) { |
6527c598 PS |
3428 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); |
3429 | ||
3430 | /* ofproto->stats.tx_packets represents packets that we created | |
3431 | * internally and sent to some port (e.g. packets sent with | |
3432 | * send_packet()). Account for them as if they had come from | |
3433 | * OFPP_LOCAL and got forwarded. */ | |
3434 | ||
3435 | if (stats->rx_packets != UINT64_MAX) { | |
3436 | stats->rx_packets += ofproto->stats.tx_packets; | |
3437 | } | |
3438 | ||
3439 | if (stats->rx_bytes != UINT64_MAX) { | |
3440 | stats->rx_bytes += ofproto->stats.tx_bytes; | |
3441 | } | |
3442 | ||
3443 | /* ofproto->stats.rx_packets represents packets that were received on | |
3444 | * some port and we processed internally and dropped (e.g. STP). | |
4e090bc7 | 3445 | * Account for them as if they had been forwarded to OFPP_LOCAL. */ |
6527c598 PS |
3446 | |
3447 | if (stats->tx_packets != UINT64_MAX) { | |
3448 | stats->tx_packets += ofproto->stats.rx_packets; | |
3449 | } | |
3450 | ||
3451 | if (stats->tx_bytes != UINT64_MAX) { | |
3452 | stats->tx_bytes += ofproto->stats.rx_bytes; | |
3453 | } | |
3454 | } | |
3455 | ||
3456 | return error; | |
3457 | } | |
3458 | ||
abe529af | 3459 | struct port_dump_state { |
acf60855 JP |
3460 | uint32_t bucket; |
3461 | uint32_t offset; | |
0a740f48 | 3462 | bool ghost; |
da78d43d BP |
3463 | |
3464 | struct ofproto_port port; | |
3465 | bool has_port; | |
abe529af BP |
3466 | }; |
3467 | ||
3468 | static int | |
acf60855 | 3469 | port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep) |
abe529af | 3470 | { |
0a740f48 | 3471 | *statep = xzalloc(sizeof(struct port_dump_state)); |
abe529af BP |
3472 | return 0; |
3473 | } | |
3474 | ||
3475 | static int | |
b9ad7294 | 3476 | port_dump_next(const struct ofproto *ofproto_, void *state_, |
abe529af BP |
3477 | struct ofproto_port *port) |
3478 | { | |
e1b1d06a | 3479 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); |
abe529af | 3480 | struct port_dump_state *state = state_; |
0a740f48 | 3481 | const struct sset *sset; |
acf60855 | 3482 | struct sset_node *node; |
abe529af | 3483 | |
da78d43d BP |
3484 | if (state->has_port) { |
3485 | ofproto_port_destroy(&state->port); | |
3486 | state->has_port = false; | |
3487 | } | |
0a740f48 EJ |
3488 | sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports; |
3489 | while ((node = sset_at_position(sset, &state->bucket, &state->offset))) { | |
acf60855 JP |
3490 | int error; |
3491 | ||
da78d43d BP |
3492 | error = port_query_by_name(ofproto_, node->name, &state->port); |
3493 | if (!error) { | |
3494 | *port = state->port; | |
3495 | state->has_port = true; | |
3496 | return 0; | |
3497 | } else if (error != ENODEV) { | |
acf60855 JP |
3498 | return error; |
3499 | } | |
abe529af | 3500 | } |
acf60855 | 3501 | |
0a740f48 EJ |
3502 | if (!state->ghost) { |
3503 | state->ghost = true; | |
3504 | state->bucket = 0; | |
3505 | state->offset = 0; | |
3506 | return port_dump_next(ofproto_, state_, port); | |
3507 | } | |
3508 | ||
acf60855 | 3509 | return EOF; |
abe529af BP |
3510 | } |
3511 | ||
3512 | static int | |
3513 | port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_) | |
3514 | { | |
3515 | struct port_dump_state *state = state_; | |
3516 | ||
da78d43d BP |
3517 | if (state->has_port) { |
3518 | ofproto_port_destroy(&state->port); | |
3519 | } | |
abe529af BP |
3520 | free(state); |
3521 | return 0; | |
3522 | } | |
3523 | ||
3524 | static int | |
3525 | port_poll(const struct ofproto *ofproto_, char **devnamep) | |
3526 | { | |
3527 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
acf60855 JP |
3528 | |
3529 | if (ofproto->port_poll_errno) { | |
3530 | int error = ofproto->port_poll_errno; | |
3531 | ofproto->port_poll_errno = 0; | |
3532 | return error; | |
3533 | } | |
3534 | ||
3535 | if (sset_is_empty(&ofproto->port_poll_set)) { | |
3536 | return EAGAIN; | |
3537 | } | |
3538 | ||
3539 | *devnamep = sset_pop(&ofproto->port_poll_set); | |
3540 | return 0; | |
abe529af BP |
3541 | } |
3542 | ||
3543 | static void | |
3544 | port_poll_wait(const struct ofproto *ofproto_) | |
3545 | { | |
3546 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
acf60855 | 3547 | dpif_port_poll_wait(ofproto->backer->dpif); |
abe529af BP |
3548 | } |
3549 | ||
3550 | static int | |
3551 | port_is_lacp_current(const struct ofport *ofport_) | |
3552 | { | |
3553 | const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
3554 | return (ofport->bundle && ofport->bundle->lacp | |
3555 | ? lacp_slave_is_current(ofport->bundle->lacp, ofport) | |
3556 | : -1); | |
3557 | } | |
3558 | \f | |
3559 | /* Upcall handling. */ | |
3560 | ||
501f8d1f BP |
3561 | /* Flow miss batching. |
3562 | * | |
3563 | * Some dpifs implement operations faster when you hand them off in a batch. | |
3564 | * To allow batching, "struct flow_miss" queues the dpif-related work needed | |
3565 | * for a given flow. Each "struct flow_miss" corresponds to sending one or | |
3566 | * more packets, plus possibly installing the flow in the dpif. | |
3567 | * | |
3568 | * So far we only batch the operations that affect flow setup time the most. | |
3569 | * It's possible to batch more than that, but the benefit might be minimal. */ | |
3570 | struct flow_miss { | |
3571 | struct hmap_node hmap_node; | |
acf60855 | 3572 | struct ofproto_dpif *ofproto; |
501f8d1f | 3573 | struct flow flow; |
b0f7b9b5 | 3574 | enum odp_key_fitness key_fitness; |
501f8d1f BP |
3575 | const struct nlattr *key; |
3576 | size_t key_len; | |
14f94f9a | 3577 | struct initial_vals initial_vals; |
501f8d1f | 3578 | struct list packets; |
6a7e895f | 3579 | enum dpif_upcall_type upcall_type; |
501f8d1f BP |
3580 | }; |
3581 | ||
3582 | struct flow_miss_op { | |
c2b565b5 | 3583 | struct dpif_op dpif_op; |
bbafd73b EJ |
3584 | |
3585 | uint64_t slow_stub[128 / 8]; /* Buffer for compose_slow_path() */ | |
3586 | struct xlate_out xout; | |
3587 | bool xout_garbage; /* 'xout' needs to be uninitialized? */ | |
501f8d1f BP |
3588 | }; |
3589 | ||
62cd7072 BP |
3590 | /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each |
3591 | * OpenFlow controller as necessary according to their individual | |
29ebe880 | 3592 | * configurations. */ |
62cd7072 | 3593 | static void |
a39edbd4 | 3594 | send_packet_in_miss(struct ofproto_dpif *ofproto, const struct ofpbuf *packet, |
29ebe880 | 3595 | const struct flow *flow) |
62cd7072 BP |
3596 | { |
3597 | struct ofputil_packet_in pin; | |
3598 | ||
3e3252fa EJ |
3599 | pin.packet = packet->data; |
3600 | pin.packet_len = packet->size; | |
62cd7072 | 3601 | pin.reason = OFPR_NO_MATCH; |
a7349929 | 3602 | pin.controller_id = 0; |
54834960 EJ |
3603 | |
3604 | pin.table_id = 0; | |
3605 | pin.cookie = 0; | |
3606 | ||
62cd7072 | 3607 | pin.send_len = 0; /* not used for flow table misses */ |
5d6c3af0 EJ |
3608 | |
3609 | flow_get_metadata(flow, &pin.fmd); | |
3610 | ||
d8653c38 | 3611 | connmgr_send_packet_in(ofproto->up.connmgr, &pin); |
62cd7072 BP |
3612 | } |
3613 | ||
6a7e895f | 3614 | static enum slow_path_reason |
abe529af | 3615 | process_special(struct ofproto_dpif *ofproto, const struct flow *flow, |
ffaef958 | 3616 | const struct ofport_dpif *ofport, const struct ofpbuf *packet) |
abe529af | 3617 | { |
b6e001b6 | 3618 | if (!ofport) { |
6a7e895f | 3619 | return 0; |
ffaef958 | 3620 | } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) { |
b6e001b6 | 3621 | if (packet) { |
abe529af BP |
3622 | cfm_process_heartbeat(ofport->cfm, packet); |
3623 | } | |
6a7e895f | 3624 | return SLOW_CFM; |
ccc09689 EJ |
3625 | } else if (ofport->bfd && bfd_should_process_flow(flow)) { |
3626 | if (packet) { | |
3627 | bfd_process_packet(ofport->bfd, flow, packet); | |
3628 | } | |
3629 | return SLOW_BFD; | |
b6e001b6 EJ |
3630 | } else if (ofport->bundle && ofport->bundle->lacp |
3631 | && flow->dl_type == htons(ETH_TYPE_LACP)) { | |
3632 | if (packet) { | |
3633 | lacp_process_packet(ofport->bundle->lacp, ofport, packet); | |
abe529af | 3634 | } |
6a7e895f | 3635 | return SLOW_LACP; |
21f7563c JP |
3636 | } else if (ofproto->stp && stp_should_process_flow(flow)) { |
3637 | if (packet) { | |
3638 | stp_process_packet(ofport, packet); | |
3639 | } | |
6a7e895f | 3640 | return SLOW_STP; |
ffaef958 BP |
3641 | } else { |
3642 | return 0; | |
abe529af | 3643 | } |
abe529af BP |
3644 | } |
3645 | ||
501f8d1f | 3646 | static struct flow_miss * |
ddbc5954 JP |
3647 | flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto, |
3648 | const struct flow *flow, uint32_t hash) | |
abe529af | 3649 | { |
501f8d1f | 3650 | struct flow_miss *miss; |
abe529af | 3651 | |
501f8d1f | 3652 | HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) { |
ddbc5954 | 3653 | if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) { |
501f8d1f BP |
3654 | return miss; |
3655 | } | |
3656 | } | |
abe529af | 3657 | |
b23cdad9 | 3658 | return NULL; |
501f8d1f | 3659 | } |
abe529af | 3660 | |
9d6ac44e BP |
3661 | /* Partially Initializes 'op' as an "execute" operation for 'miss' and |
3662 | * 'packet'. The caller must initialize op->actions and op->actions_len. If | |
3663 | * 'miss' is associated with a subfacet the caller must also initialize the | |
3664 | * returned op->subfacet, and if anything needs to be freed after processing | |
3665 | * the op, the caller must initialize op->garbage also. */ | |
501f8d1f | 3666 | static void |
9d6ac44e BP |
3667 | init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet, |
3668 | struct flow_miss_op *op) | |
501f8d1f | 3669 | { |
14f94f9a | 3670 | if (miss->flow.vlan_tci != miss->initial_vals.vlan_tci) { |
9d6ac44e BP |
3671 | /* This packet was received on a VLAN splinter port. We |
3672 | * added a VLAN to the packet to make the packet resemble | |
3673 | * the flow, but the actions were composed assuming that | |
3674 | * the packet contained no VLAN. So, we must remove the | |
3675 | * VLAN header from the packet before trying to execute the | |
3676 | * actions. */ | |
3677 | eth_pop_vlan(packet); | |
3678 | } | |
3679 | ||
bbafd73b | 3680 | op->xout_garbage = false; |
9d6ac44e BP |
3681 | op->dpif_op.type = DPIF_OP_EXECUTE; |
3682 | op->dpif_op.u.execute.key = miss->key; | |
3683 | op->dpif_op.u.execute.key_len = miss->key_len; | |
3684 | op->dpif_op.u.execute.packet = packet; | |
3685 | } | |
3686 | ||
3687 | /* Helper for handle_flow_miss_without_facet() and | |
3688 | * handle_flow_miss_with_facet(). */ | |
3689 | static void | |
3690 | handle_flow_miss_common(struct rule_dpif *rule, | |
3691 | struct ofpbuf *packet, const struct flow *flow) | |
3692 | { | |
3693 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); | |
3694 | ||
9d6ac44e BP |
3695 | if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) { |
3696 | /* | |
3697 | * Extra-special case for fail-open mode. | |
3698 | * | |
3699 | * We are in fail-open mode and the packet matched the fail-open | |
3700 | * rule, but we are connected to a controller too. We should send | |
3701 | * the packet up to the controller in the hope that it will try to | |
3702 | * set up a flow and thereby allow us to exit fail-open. | |
3703 | * | |
3704 | * See the top-level comment in fail-open.c for more information. | |
3705 | */ | |
3706 | send_packet_in_miss(ofproto, packet, flow); | |
3707 | } | |
3708 | } | |
3709 | ||
3710 | /* Figures out whether a flow that missed in 'ofproto', whose details are in | |
bcd2633a JP |
3711 | * 'miss' masked by 'wc', is likely to be worth tracking in detail in userspace |
3712 | * and (usually) installing a datapath flow. The answer is usually "yes" (a | |
3713 | * return value of true). However, for short flows the cost of bookkeeping is | |
3714 | * much higher than the benefits, so when the datapath holds a large number of | |
3715 | * flows we impose some heuristics to decide which flows are likely to be worth | |
3716 | * tracking. */ | |
9d6ac44e | 3717 | static bool |
bcd2633a | 3718 | flow_miss_should_make_facet(struct flow_miss *miss, struct flow_wildcards *wc) |
9d6ac44e | 3719 | { |
04d08d54 | 3720 | struct dpif_backer *backer = miss->ofproto->backer; |
bcd2633a | 3721 | uint32_t hash; |
04d08d54 EJ |
3722 | |
3723 | if (!backer->governor) { | |
9d6ac44e BP |
3724 | size_t n_subfacets; |
3725 | ||
04d08d54 | 3726 | n_subfacets = hmap_count(&backer->subfacets); |
380f49c4 | 3727 | if (n_subfacets * 2 <= flow_eviction_threshold) { |
9d6ac44e BP |
3728 | return true; |
3729 | } | |
3730 | ||
c985ec94 | 3731 | backer->governor = governor_create(); |
9d6ac44e BP |
3732 | } |
3733 | ||
bcd2633a | 3734 | hash = flow_hash_in_wildcards(&miss->flow, wc, 0); |
04d08d54 | 3735 | return governor_should_install_flow(backer->governor, hash, |
9d6ac44e BP |
3736 | list_size(&miss->packets)); |
3737 | } | |
3738 | ||
bcd2633a JP |
3739 | /* Handles 'miss' without creating a facet or subfacet or creating any datapath |
3740 | * flow. 'miss->flow' must have matched 'rule' and been xlated into 'xout'. | |
3741 | * May add an "execute" operation to 'ops' and increment '*n_ops'. */ | |
9d6ac44e | 3742 | static void |
bcd2633a JP |
3743 | handle_flow_miss_without_facet(struct rule_dpif *rule, struct xlate_out *xout, |
3744 | struct flow_miss *miss, | |
9d6ac44e BP |
3745 | struct flow_miss_op *ops, size_t *n_ops) |
3746 | { | |
530a1d91 | 3747 | struct ofpbuf *packet; |
2b459b83 | 3748 | |
9d6ac44e | 3749 | LIST_FOR_EACH (packet, list_node, &miss->packets) { |
abe529af | 3750 | |
9d6ac44e | 3751 | COVERAGE_INC(facet_suppress); |
501f8d1f | 3752 | |
7820e253 EJ |
3753 | handle_flow_miss_common(rule, packet, &miss->flow); |
3754 | ||
bcd2633a JP |
3755 | if (xout->slow) { |
3756 | struct xlate_in xin; | |
abe529af | 3757 | |
bcd2633a JP |
3758 | xlate_in_init(&xin, miss->ofproto, &miss->flow, |
3759 | &miss->initial_vals, rule, 0, packet); | |
3760 | xlate_actions_for_side_effects(&xin); | |
3761 | } | |
abe529af | 3762 | |
bcd2633a JP |
3763 | if (xout->odp_actions.size) { |
3764 | struct flow_miss_op *op = &ops[*n_ops]; | |
9d6ac44e BP |
3765 | struct dpif_execute *execute = &op->dpif_op.u.execute; |
3766 | ||
3767 | init_flow_miss_execute_op(miss, packet, op); | |
bcd2633a | 3768 | xlate_out_copy(&op->xout, xout); |
bbafd73b EJ |
3769 | execute->actions = op->xout.odp_actions.data; |
3770 | execute->actions_len = op->xout.odp_actions.size; | |
3771 | op->xout_garbage = true; | |
9d6ac44e BP |
3772 | |
3773 | (*n_ops)++; | |
9d6ac44e | 3774 | } |
abe529af | 3775 | } |
9d6ac44e BP |
3776 | } |
3777 | ||
3778 | /* Handles 'miss', which matches 'facet'. May add any required datapath | |
459b16a1 BP |
3779 | * operations to 'ops', incrementing '*n_ops' for each new op. |
3780 | * | |
3781 | * All of the packets in 'miss' are considered to have arrived at time 'now'. | |
3782 | * This is really important only for new facets: if we just called time_msec() | |
3783 | * here, then the new subfacet or its packets could look (occasionally) as | |
3784 | * though it was used some time after the facet was used. That can make a | |
3785 | * one-packet flow look like it has a nonzero duration, which looks odd in | |
bcd2633a JP |
3786 | * e.g. NetFlow statistics. |
3787 | * | |
3788 | * If non-null, 'stats' will be folded into 'facet'. */ | |
9d6ac44e BP |
3789 | static void |
3790 | handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet, | |
bcd2633a | 3791 | long long int now, struct dpif_flow_stats *stats, |
9d6ac44e BP |
3792 | struct flow_miss_op *ops, size_t *n_ops) |
3793 | { | |
6a7e895f BP |
3794 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); |
3795 | enum subfacet_path want_path; | |
9d6ac44e BP |
3796 | struct subfacet *subfacet; |
3797 | struct ofpbuf *packet; | |
abe529af | 3798 | |
a088a1ff | 3799 | subfacet = subfacet_create(facet, miss, now); |
bcd2633a JP |
3800 | want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH; |
3801 | if (stats) { | |
3802 | subfacet_update_stats(subfacet, stats); | |
3803 | } | |
b0f7b9b5 | 3804 | |
530a1d91 | 3805 | LIST_FOR_EACH (packet, list_node, &miss->packets) { |
5fe20d5d | 3806 | struct flow_miss_op *op = &ops[*n_ops]; |
67d91f78 | 3807 | |
9d6ac44e | 3808 | handle_flow_miss_common(facet->rule, packet, &miss->flow); |
501f8d1f | 3809 | |
4dff9097 | 3810 | if (want_path != SF_FAST_PATH) { |
bbafd73b | 3811 | struct xlate_in xin; |
454a77e5 | 3812 | |
bbafd73b EJ |
3813 | xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, |
3814 | facet->rule, 0, packet); | |
3815 | xlate_actions_for_side_effects(&xin); | |
501f8d1f | 3816 | } |
67d91f78 | 3817 | |
bbafd73b | 3818 | if (facet->xout.odp_actions.size) { |
9d6ac44e | 3819 | struct dpif_execute *execute = &op->dpif_op.u.execute; |
8338659a | 3820 | |
9d6ac44e | 3821 | init_flow_miss_execute_op(miss, packet, op); |
bbafd73b EJ |
3822 | execute->actions = facet->xout.odp_actions.data, |
3823 | execute->actions_len = facet->xout.odp_actions.size; | |
9d6ac44e | 3824 | (*n_ops)++; |
5fe20d5d | 3825 | } |
501f8d1f BP |
3826 | } |
3827 | ||
6a7e895f | 3828 | if (miss->upcall_type == DPIF_UC_MISS || subfacet->path != want_path) { |
501f8d1f | 3829 | struct flow_miss_op *op = &ops[(*n_ops)++]; |
c2b565b5 | 3830 | struct dpif_flow_put *put = &op->dpif_op.u.flow_put; |
501f8d1f | 3831 | |
c84451a6 EJ |
3832 | subfacet->path = want_path; |
3833 | ||
bbafd73b | 3834 | op->xout_garbage = false; |
c2b565b5 | 3835 | op->dpif_op.type = DPIF_OP_FLOW_PUT; |
501f8d1f BP |
3836 | put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY; |
3837 | put->key = miss->key; | |
3838 | put->key_len = miss->key_len; | |
6a7e895f | 3839 | if (want_path == SF_FAST_PATH) { |
bbafd73b EJ |
3840 | put->actions = facet->xout.odp_actions.data; |
3841 | put->actions_len = facet->xout.odp_actions.size; | |
6a7e895f | 3842 | } else { |
bbafd73b EJ |
3843 | compose_slow_path(ofproto, &facet->flow, facet->xout.slow, |
3844 | op->slow_stub, sizeof op->slow_stub, | |
6a7e895f BP |
3845 | &put->actions, &put->actions_len); |
3846 | } | |
501f8d1f BP |
3847 | put->stats = NULL; |
3848 | } | |
3849 | } | |
3850 | ||
acf60855 JP |
3851 | /* Handles flow miss 'miss'. May add any required datapath operations |
3852 | * to 'ops', incrementing '*n_ops' for each new op. */ | |
9d6ac44e | 3853 | static void |
acf60855 JP |
3854 | handle_flow_miss(struct flow_miss *miss, struct flow_miss_op *ops, |
3855 | size_t *n_ops) | |
9d6ac44e | 3856 | { |
acf60855 | 3857 | struct ofproto_dpif *ofproto = miss->ofproto; |
bcd2633a JP |
3858 | struct dpif_flow_stats stats__; |
3859 | struct dpif_flow_stats *stats = &stats__; | |
3860 | struct ofpbuf *packet; | |
9d6ac44e | 3861 | struct facet *facet; |
459b16a1 | 3862 | long long int now; |
9d6ac44e | 3863 | |
bcd2633a JP |
3864 | now = time_msec(); |
3865 | memset(stats, 0, sizeof *stats); | |
3866 | stats->used = now; | |
3867 | LIST_FOR_EACH (packet, list_node, &miss->packets) { | |
3868 | stats->tcp_flags |= packet_get_tcp_flags(packet, &miss->flow); | |
3869 | stats->n_bytes += packet->size; | |
3870 | stats->n_packets++; | |
3871 | } | |
9d6ac44e | 3872 | |
bcd2633a | 3873 | facet = facet_lookup_valid(ofproto, &miss->flow); |
9d6ac44e | 3874 | if (!facet) { |
bcd2633a JP |
3875 | struct flow_wildcards wc; |
3876 | struct rule_dpif *rule; | |
3877 | struct xlate_out xout; | |
3878 | struct xlate_in xin; | |
3879 | ||
3880 | flow_wildcards_init_catchall(&wc); | |
3881 | rule = rule_dpif_lookup(ofproto, &miss->flow, &wc); | |
3882 | rule_credit_stats(rule, stats); | |
3883 | ||
3884 | xlate_in_init(&xin, ofproto, &miss->flow, &miss->initial_vals, rule, | |
3885 | stats->tcp_flags, NULL); | |
3886 | xin.resubmit_stats = stats; | |
3887 | xin.may_learn = true; | |
3888 | xlate_actions(&xin, &xout); | |
3889 | flow_wildcards_or(&xout.wc, &xout.wc, &wc); | |
3890 | ||
ba33dd03 EJ |
3891 | /* There does not exist a bijection between 'struct flow' and datapath |
3892 | * flow keys with fitness ODP_FIT_TO_LITTLE. This breaks a fundamental | |
3893 | * assumption used throughout the facet and subfacet handling code. | |
3894 | * Since we have to handle these misses in userspace anyway, we simply | |
ec9f40dc | 3895 | * skip facet creation, avoiding the problem altogether. */ |
ba33dd03 | 3896 | if (miss->key_fitness == ODP_FIT_TOO_LITTLE |
bcd2633a JP |
3897 | || !flow_miss_should_make_facet(miss, &xout.wc)) { |
3898 | handle_flow_miss_without_facet(rule, &xout, miss, ops, n_ops); | |
9d6ac44e BP |
3899 | return; |
3900 | } | |
3901 | ||
bcd2633a JP |
3902 | facet = facet_create(miss, rule, &xout, stats); |
3903 | stats = NULL; | |
9d6ac44e | 3904 | } |
bcd2633a | 3905 | handle_flow_miss_with_facet(miss, facet, now, stats, ops, n_ops); |
9d6ac44e BP |
3906 | } |
3907 | ||
8f73d537 EJ |
3908 | static struct drop_key * |
3909 | drop_key_lookup(const struct dpif_backer *backer, const struct nlattr *key, | |
3910 | size_t key_len) | |
3911 | { | |
3912 | struct drop_key *drop_key; | |
3913 | ||
3914 | HMAP_FOR_EACH_WITH_HASH (drop_key, hmap_node, hash_bytes(key, key_len, 0), | |
3915 | &backer->drop_keys) { | |
3916 | if (drop_key->key_len == key_len | |
3917 | && !memcmp(drop_key->key, key, key_len)) { | |
3918 | return drop_key; | |
3919 | } | |
3920 | } | |
3921 | return NULL; | |
3922 | } | |
3923 | ||
3924 | static void | |
3925 | drop_key_clear(struct dpif_backer *backer) | |
3926 | { | |
3927 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15); | |
3928 | struct drop_key *drop_key, *next; | |
3929 | ||
3930 | HMAP_FOR_EACH_SAFE (drop_key, next, hmap_node, &backer->drop_keys) { | |
3931 | int error; | |
3932 | ||
3933 | error = dpif_flow_del(backer->dpif, drop_key->key, drop_key->key_len, | |
3934 | NULL); | |
3935 | if (error && !VLOG_DROP_WARN(&rl)) { | |
3936 | struct ds ds = DS_EMPTY_INITIALIZER; | |
3937 | odp_flow_key_format(drop_key->key, drop_key->key_len, &ds); | |
3938 | VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error), | |
3939 | ds_cstr(&ds)); | |
3940 | ds_destroy(&ds); | |
3941 | } | |
3942 | ||
3943 | hmap_remove(&backer->drop_keys, &drop_key->hmap_node); | |
3944 | free(drop_key->key); | |
3945 | free(drop_key); | |
3946 | } | |
3947 | } | |
3948 | ||
e09ee259 EJ |
3949 | /* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key' |
3950 | * respectively), populates 'flow' with the result of odp_flow_key_to_flow(). | |
3951 | * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as | |
3952 | * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto' | |
3953 | * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that | |
3954 | * 'packet' ingressed. | |
e2a6ca36 | 3955 | * |
e09ee259 EJ |
3956 | * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets |
3957 | * 'flow''s in_port to OFPP_NONE. | |
3958 | * | |
3959 | * This function does post-processing on data returned from | |
3960 | * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest | |
3961 | * of the upcall processing logic. In particular, if the extracted in_port is | |
3962 | * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets | |
3963 | * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes | |
3964 | * a VLAN header onto 'packet' (if it is nonnull). | |
3965 | * | |
c3f6c502 JP |
3966 | * Optionally, if 'initial_vals' is nonnull, sets 'initial_vals->vlan_tci' |
3967 | * to the VLAN TCI with which the packet was really received, that is, the | |
3968 | * actual VLAN TCI extracted by odp_flow_key_to_flow(). (This differs from | |
3969 | * the value returned in flow->vlan_tci only for packets received on | |
715b48aa | 3970 | * VLAN splinters.) |
e09ee259 | 3971 | * |
b9ad7294 EJ |
3972 | * Similarly, this function also includes some logic to help with tunnels. It |
3973 | * may modify 'flow' as necessary to make the tunneling implementation | |
3974 | * transparent to the upcall processing logic. | |
3975 | * | |
e09ee259 EJ |
3976 | * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport, |
3977 | * or some other positive errno if there are other problems. */ | |
3978 | static int | |
3979 | ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet, | |
3980 | const struct nlattr *key, size_t key_len, | |
3981 | struct flow *flow, enum odp_key_fitness *fitnessp, | |
3982 | struct ofproto_dpif **ofproto, uint32_t *odp_in_port, | |
14f94f9a | 3983 | struct initial_vals *initial_vals) |
e84173dc | 3984 | { |
e09ee259 EJ |
3985 | const struct ofport_dpif *port; |
3986 | enum odp_key_fitness fitness; | |
b9ad7294 | 3987 | int error = ENODEV; |
e09ee259 EJ |
3988 | |
3989 | fitness = odp_flow_key_to_flow(key, key_len, flow); | |
e84173dc | 3990 | if (fitness == ODP_FIT_ERROR) { |
e09ee259 EJ |
3991 | error = EINVAL; |
3992 | goto exit; | |
3993 | } | |
3994 | ||
14f94f9a JP |
3995 | if (initial_vals) { |
3996 | initial_vals->vlan_tci = flow->vlan_tci; | |
e84173dc | 3997 | } |
e84173dc | 3998 | |
e09ee259 EJ |
3999 | if (odp_in_port) { |
4000 | *odp_in_port = flow->in_port; | |
4001 | } | |
4002 | ||
3abe1835 BP |
4003 | port = (tnl_port_should_receive(flow) |
4004 | ? ofport_dpif_cast(tnl_port_receive(flow)) | |
4005 | : odp_port_to_ofport(backer, flow->in_port)); | |
4006 | flow->in_port = port ? port->up.ofp_port : OFPP_NONE; | |
4007 | if (!port) { | |
4008 | goto exit; | |
4009 | } | |
4010 | ||
4011 | /* XXX: Since the tunnel module is not scoped per backer, for a tunnel port | |
4012 | * it's theoretically possible that we'll receive an ofport belonging to an | |
4013 | * entirely different datapath. In practice, this can't happen because no | |
4014 | * platforms has two separate datapaths which each support tunneling. */ | |
4015 | ovs_assert(ofproto_dpif_cast(port->up.ofproto)->backer == backer); | |
4016 | ||
4017 | if (vsp_adjust_flow(ofproto_dpif_cast(port->up.ofproto), flow)) { | |
4018 | if (packet) { | |
4019 | /* Make the packet resemble the flow, so that it gets sent to | |
4020 | * an OpenFlow controller properly, so that it looks correct | |
4021 | * for sFlow, and so that flow_extract() will get the correct | |
4022 | * vlan_tci if it is called on 'packet'. | |
4023 | * | |
4024 | * The allocated space inside 'packet' probably also contains | |
4025 | * 'key', that is, both 'packet' and 'key' are probably part of | |
4026 | * a struct dpif_upcall (see the large comment on that | |
4027 | * structure definition), so pushing data on 'packet' is in | |
4028 | * general not a good idea since it could overwrite 'key' or | |
4029 | * free it as a side effect. However, it's OK in this special | |
4030 | * case because we know that 'packet' is inside a Netlink | |
4031 | * attribute: pushing 4 bytes will just overwrite the 4-byte | |
4032 | * "struct nlattr", which is fine since we don't need that | |
4033 | * header anymore. */ | |
4034 | eth_push_vlan(packet, flow->vlan_tci); | |
4035 | } | |
4036 | /* We can't reproduce 'key' from 'flow'. */ | |
4037 | fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness; | |
52a90c29 | 4038 | } |
e09ee259 | 4039 | error = 0; |
52a90c29 | 4040 | |
b9ad7294 EJ |
4041 | if (ofproto) { |
4042 | *ofproto = ofproto_dpif_cast(port->up.ofproto); | |
4043 | } | |
4044 | ||
e09ee259 EJ |
4045 | exit: |
4046 | if (fitnessp) { | |
4047 | *fitnessp = fitness; | |
4048 | } | |
4049 | return error; | |
e84173dc BP |
4050 | } |
4051 | ||
501f8d1f | 4052 | static void |
acf60855 | 4053 | handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls, |
501f8d1f BP |
4054 | size_t n_upcalls) |
4055 | { | |
4056 | struct dpif_upcall *upcall; | |
b23cdad9 BP |
4057 | struct flow_miss *miss; |
4058 | struct flow_miss misses[FLOW_MISS_MAX_BATCH]; | |
501f8d1f | 4059 | struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2]; |
c2b565b5 | 4060 | struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2]; |
501f8d1f | 4061 | struct hmap todo; |
b23cdad9 | 4062 | int n_misses; |
501f8d1f BP |
4063 | size_t n_ops; |
4064 | size_t i; | |
4065 | ||
4066 | if (!n_upcalls) { | |
4067 | return; | |
4068 | } | |
4069 | ||
4070 | /* Construct the to-do list. | |
4071 | * | |
4072 | * This just amounts to extracting the flow from each packet and sticking | |
4073 | * the packets that have the same flow in the same "flow_miss" structure so | |
4074 | * that we can process them together. */ | |
4075 | hmap_init(&todo); | |
b23cdad9 | 4076 | n_misses = 0; |
501f8d1f | 4077 | for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) { |
b23cdad9 BP |
4078 | struct flow_miss *miss = &misses[n_misses]; |
4079 | struct flow_miss *existing_miss; | |
acf60855 | 4080 | struct ofproto_dpif *ofproto; |
a088a1ff | 4081 | uint32_t odp_in_port; |
1d446463 | 4082 | struct flow flow; |
b23cdad9 | 4083 | uint32_t hash; |
e09ee259 | 4084 | int error; |
501f8d1f | 4085 | |
e09ee259 EJ |
4086 | error = ofproto_receive(backer, upcall->packet, upcall->key, |
4087 | upcall->key_len, &flow, &miss->key_fitness, | |
14f94f9a | 4088 | &ofproto, &odp_in_port, &miss->initial_vals); |
e09ee259 | 4089 | if (error == ENODEV) { |
8f73d537 EJ |
4090 | struct drop_key *drop_key; |
4091 | ||
c3385516 AW |
4092 | /* Received packet on datapath port for which we couldn't |
4093 | * associate an ofproto. This can happen if a port is removed | |
4094 | * while traffic is being received. Print a rate-limited message | |
8f73d537 EJ |
4095 | * in case it happens frequently. Install a drop flow so |
4096 | * that future packets of the flow are inexpensively dropped | |
4097 | * in the kernel. */ | |
c3385516 AW |
4098 | VLOG_INFO_RL(&rl, "received packet on unassociated datapath port " |
4099 | "%"PRIu32, odp_in_port); | |
8f73d537 EJ |
4100 | |
4101 | drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len); | |
4102 | if (!drop_key) { | |
4103 | drop_key = xmalloc(sizeof *drop_key); | |
4104 | drop_key->key = xmemdup(upcall->key, upcall->key_len); | |
4105 | drop_key->key_len = upcall->key_len; | |
4106 | ||
4107 | hmap_insert(&backer->drop_keys, &drop_key->hmap_node, | |
4108 | hash_bytes(drop_key->key, drop_key->key_len, 0)); | |
4109 | dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY, | |
4110 | drop_key->key, drop_key->key_len, NULL, 0, NULL); | |
4111 | } | |
4112 | continue; | |
acf60855 | 4113 | } |
e09ee259 | 4114 | if (error) { |
b0f7b9b5 BP |
4115 | continue; |
4116 | } | |
735d7efb AZ |
4117 | |
4118 | ofproto->n_missed++; | |
72e8bf28 | 4119 | flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark, |
1d446463 | 4120 | &flow.tunnel, flow.in_port, &miss->flow); |
501f8d1f | 4121 | |
501f8d1f | 4122 | /* Add other packets to a to-do list. */ |
b23cdad9 | 4123 | hash = flow_hash(&miss->flow, 0); |
ddbc5954 | 4124 | existing_miss = flow_miss_find(&todo, ofproto, &miss->flow, hash); |
b23cdad9 BP |
4125 | if (!existing_miss) { |
4126 | hmap_insert(&todo, &miss->hmap_node, hash); | |
acf60855 | 4127 | miss->ofproto = ofproto; |
b23cdad9 BP |
4128 | miss->key = upcall->key; |
4129 | miss->key_len = upcall->key_len; | |
6a7e895f | 4130 | miss->upcall_type = upcall->type; |
b23cdad9 BP |
4131 | list_init(&miss->packets); |
4132 | ||
4133 | n_misses++; | |
4134 | } else { | |
4135 | miss = existing_miss; | |
4136 | } | |
501f8d1f BP |
4137 | list_push_back(&miss->packets, &upcall->packet->list_node); |
4138 | } | |
4139 | ||
4140 | /* Process each element in the to-do list, constructing the set of | |
4141 | * operations to batch. */ | |
4142 | n_ops = 0; | |
33bb0caa | 4143 | HMAP_FOR_EACH (miss, hmap_node, &todo) { |
acf60855 | 4144 | handle_flow_miss(miss, flow_miss_ops, &n_ops); |
abe529af | 4145 | } |
cb22974d | 4146 | ovs_assert(n_ops <= ARRAY_SIZE(flow_miss_ops)); |
501f8d1f BP |
4147 | |
4148 | /* Execute batch. */ | |
4149 | for (i = 0; i < n_ops; i++) { | |
4150 | dpif_ops[i] = &flow_miss_ops[i].dpif_op; | |
4151 | } | |
acf60855 | 4152 | dpif_operate(backer->dpif, dpif_ops, n_ops); |
501f8d1f | 4153 | |
c84451a6 | 4154 | /* Free memory. */ |
501f8d1f | 4155 | for (i = 0; i < n_ops; i++) { |
bbafd73b EJ |
4156 | if (flow_miss_ops[i].xout_garbage) { |
4157 | xlate_out_uninit(&flow_miss_ops[i].xout); | |
4158 | } | |
501f8d1f | 4159 | } |
33bb0caa | 4160 | hmap_destroy(&todo); |
abe529af BP |
4161 | } |
4162 | ||
29089a54 RL |
4163 | static enum { SFLOW_UPCALL, MISS_UPCALL, BAD_UPCALL, FLOW_SAMPLE_UPCALL, |
4164 | IPFIX_UPCALL } | |
6a7e895f BP |
4165 | classify_upcall(const struct dpif_upcall *upcall) |
4166 | { | |
29089a54 | 4167 | size_t userdata_len; |
6a7e895f BP |
4168 | union user_action_cookie cookie; |
4169 | ||
4170 | /* First look at the upcall type. */ | |
4171 | switch (upcall->type) { | |
4172 | case DPIF_UC_ACTION: | |
4173 | break; | |
4174 | ||
4175 | case DPIF_UC_MISS: | |
4176 | return MISS_UPCALL; | |
4177 | ||
4178 | case DPIF_N_UC_TYPES: | |
4179 | default: | |
4180 | VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type); | |
4181 | return BAD_UPCALL; | |
4182 | } | |
4183 | ||
4184 | /* "action" upcalls need a closer look. */ | |
e995e3df BP |
4185 | if (!upcall->userdata) { |
4186 | VLOG_WARN_RL(&rl, "action upcall missing cookie"); | |
4187 | return BAD_UPCALL; | |
4188 | } | |
29089a54 RL |
4189 | userdata_len = nl_attr_get_size(upcall->userdata); |
4190 | if (userdata_len < sizeof cookie.type | |
4191 | || userdata_len > sizeof cookie) { | |
e995e3df | 4192 | VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu", |
29089a54 | 4193 | userdata_len); |
e995e3df BP |
4194 | return BAD_UPCALL; |
4195 | } | |
29089a54 RL |
4196 | memset(&cookie, 0, sizeof cookie); |
4197 | memcpy(&cookie, nl_attr_get(upcall->userdata), userdata_len); | |
4198 | if (userdata_len == sizeof cookie.sflow | |
4199 | && cookie.type == USER_ACTION_COOKIE_SFLOW) { | |
6a7e895f | 4200 | return SFLOW_UPCALL; |
29089a54 RL |
4201 | } else if (userdata_len == sizeof cookie.slow_path |
4202 | && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) { | |
6a7e895f | 4203 | return MISS_UPCALL; |
29089a54 RL |
4204 | } else if (userdata_len == sizeof cookie.flow_sample |
4205 | && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) { | |
4206 | return FLOW_SAMPLE_UPCALL; | |
4207 | } else if (userdata_len == sizeof cookie.ipfix | |
4208 | && cookie.type == USER_ACTION_COOKIE_IPFIX) { | |
4209 | return IPFIX_UPCALL; | |
4210 | } else { | |
4211 | VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16 | |
4212 | " and size %zu", cookie.type, userdata_len); | |
6a7e895f BP |
4213 | return BAD_UPCALL; |
4214 | } | |
4215 | } | |
4216 | ||
abe529af | 4217 | static void |
acf60855 | 4218 | handle_sflow_upcall(struct dpif_backer *backer, |
6a7e895f | 4219 | const struct dpif_upcall *upcall) |
abe529af | 4220 | { |
acf60855 | 4221 | struct ofproto_dpif *ofproto; |
1673e0e4 | 4222 | union user_action_cookie cookie; |
e84173dc | 4223 | struct flow flow; |
e1b1d06a | 4224 | uint32_t odp_in_port; |
abe529af | 4225 | |
e09ee259 EJ |
4226 | if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len, |
4227 | &flow, NULL, &ofproto, &odp_in_port, NULL) | |
4228 | || !ofproto->sflow) { | |
e84173dc BP |
4229 | return; |
4230 | } | |
4231 | ||
29089a54 RL |
4232 | memset(&cookie, 0, sizeof cookie); |
4233 | memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof cookie.sflow); | |
e1b1d06a JP |
4234 | dpif_sflow_received(ofproto->sflow, upcall->packet, &flow, |
4235 | odp_in_port, &cookie); | |
6ff686f2 PS |
4236 | } |
4237 | ||
29089a54 RL |
4238 | static void |
4239 | handle_flow_sample_upcall(struct dpif_backer *backer, | |
4240 | const struct dpif_upcall *upcall) | |
4241 | { | |
4242 | struct ofproto_dpif *ofproto; | |
4243 | union user_action_cookie cookie; | |
4244 | struct flow flow; | |
4245 | ||
4246 | if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len, | |
4247 | &flow, NULL, &ofproto, NULL, NULL) | |
4248 | || !ofproto->ipfix) { | |
4249 | return; | |
4250 | } | |
4251 | ||
4252 | memset(&cookie, 0, sizeof cookie); | |
4253 | memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof cookie.flow_sample); | |
4254 | ||
4255 | /* The flow reflects exactly the contents of the packet. Sample | |
4256 | * the packet using it. */ | |
4257 | dpif_ipfix_flow_sample(ofproto->ipfix, upcall->packet, &flow, | |
4258 | cookie.flow_sample.collector_set_id, | |
4259 | cookie.flow_sample.probability, | |
4260 | cookie.flow_sample.obs_domain_id, | |
4261 | cookie.flow_sample.obs_point_id); | |
4262 | } | |
4263 | ||
4264 | static void | |
4265 | handle_ipfix_upcall(struct dpif_backer *backer, | |
4266 | const struct dpif_upcall *upcall) | |
4267 | { | |
4268 | struct ofproto_dpif *ofproto; | |
4269 | struct flow flow; | |
4270 | ||
4271 | if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len, | |
4272 | &flow, NULL, &ofproto, NULL, NULL) | |
4273 | || !ofproto->ipfix) { | |
4274 | return; | |
4275 | } | |
4276 | ||
4277 | /* The flow reflects exactly the contents of the packet. Sample | |
4278 | * the packet using it. */ | |
4279 | dpif_ipfix_bridge_sample(ofproto->ipfix, upcall->packet, &flow); | |
4280 | } | |
4281 | ||
9b16c439 | 4282 | static int |
acf60855 | 4283 | handle_upcalls(struct dpif_backer *backer, unsigned int max_batch) |
6ff686f2 | 4284 | { |
9b16c439 | 4285 | struct dpif_upcall misses[FLOW_MISS_MAX_BATCH]; |
90a7c55e BP |
4286 | struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH]; |
4287 | uint64_t miss_buf_stubs[FLOW_MISS_MAX_BATCH][4096 / 8]; | |
4288 | int n_processed; | |
9b16c439 BP |
4289 | int n_misses; |
4290 | int i; | |
abe529af | 4291 | |
cb22974d | 4292 | ovs_assert(max_batch <= FLOW_MISS_MAX_BATCH); |
abe529af | 4293 | |
9b16c439 | 4294 | n_misses = 0; |
90a7c55e | 4295 | for (n_processed = 0; n_processed < max_batch; n_processed++) { |
9b16c439 | 4296 | struct dpif_upcall *upcall = &misses[n_misses]; |
90a7c55e | 4297 | struct ofpbuf *buf = &miss_bufs[n_misses]; |
9b16c439 BP |
4298 | int error; |
4299 | ||
90a7c55e BP |
4300 | ofpbuf_use_stub(buf, miss_buf_stubs[n_misses], |
4301 | sizeof miss_buf_stubs[n_misses]); | |
acf60855 | 4302 | error = dpif_recv(backer->dpif, upcall, buf); |
9b16c439 | 4303 | if (error) { |
90a7c55e | 4304 | ofpbuf_uninit(buf); |
9b16c439 BP |
4305 | break; |
4306 | } | |
4307 | ||
6a7e895f BP |
4308 | switch (classify_upcall(upcall)) { |
4309 | case MISS_UPCALL: | |
9b16c439 BP |
4310 | /* Handle it later. */ |
4311 | n_misses++; | |
4312 | break; | |
4313 | ||
6a7e895f | 4314 | case SFLOW_UPCALL: |
acf60855 | 4315 | handle_sflow_upcall(backer, upcall); |
6a7e895f BP |
4316 | ofpbuf_uninit(buf); |
4317 | break; | |
4318 | ||
29089a54 RL |
4319 | case FLOW_SAMPLE_UPCALL: |
4320 | handle_flow_sample_upcall(backer, upcall); | |
4321 | ofpbuf_uninit(buf); | |
4322 | break; | |
4323 | ||
4324 | case IPFIX_UPCALL: | |
4325 | handle_ipfix_upcall(backer, upcall); | |
4326 | ofpbuf_uninit(buf); | |
4327 | break; | |
4328 | ||
6a7e895f BP |
4329 | case BAD_UPCALL: |
4330 | ofpbuf_uninit(buf); | |
9b16c439 BP |
4331 | break; |
4332 | } | |
abe529af | 4333 | } |
9b16c439 | 4334 | |
6a7e895f | 4335 | /* Handle deferred MISS_UPCALL processing. */ |
acf60855 | 4336 | handle_miss_upcalls(backer, misses, n_misses); |
90a7c55e BP |
4337 | for (i = 0; i < n_misses; i++) { |
4338 | ofpbuf_uninit(&miss_bufs[i]); | |
4339 | } | |
9b16c439 | 4340 | |
90a7c55e | 4341 | return n_processed; |
abe529af BP |
4342 | } |
4343 | \f | |
4344 | /* Flow expiration. */ | |
4345 | ||
04d08d54 | 4346 | static int subfacet_max_idle(const struct dpif_backer *); |
acf60855 | 4347 | static void update_stats(struct dpif_backer *); |
abe529af | 4348 | static void rule_expire(struct rule_dpif *); |
04d08d54 | 4349 | static void expire_subfacets(struct dpif_backer *, int dp_max_idle); |
abe529af BP |
4350 | |
4351 | /* This function is called periodically by run(). Its job is to collect | |
4352 | * updates for the flows that have been installed into the datapath, most | |
4353 | * importantly when they last were used, and then use that information to | |
4354 | * expire flows that have not been used recently. | |
4355 | * | |
4356 | * Returns the number of milliseconds after which it should be called again. */ | |
4357 | static int | |
acf60855 | 4358 | expire(struct dpif_backer *backer) |
abe529af | 4359 | { |
acf60855 | 4360 | struct ofproto_dpif *ofproto; |
dc54ef36 | 4361 | size_t n_subfacets; |
04d08d54 | 4362 | int max_idle; |
abe529af | 4363 | |
8f73d537 EJ |
4364 | /* Periodically clear out the drop keys in an effort to keep them |
4365 | * relatively few. */ | |
4366 | drop_key_clear(backer); | |
4367 | ||
acf60855 JP |
4368 | /* Update stats for each flow in the backer. */ |
4369 | update_stats(backer); | |
abe529af | 4370 | |
04d08d54 EJ |
4371 | n_subfacets = hmap_count(&backer->subfacets); |
4372 | if (n_subfacets) { | |
9fc0165a | 4373 | struct subfacet *subfacet; |
04d08d54 | 4374 | long long int total, now; |
abe529af | 4375 | |
04d08d54 EJ |
4376 | total = 0; |
4377 | now = time_msec(); | |
4378 | HMAP_FOR_EACH (subfacet, hmap_node, &backer->subfacets) { | |
4379 | total += now - subfacet->created; | |
acf60855 | 4380 | } |
04d08d54 EJ |
4381 | backer->avg_subfacet_life += total / n_subfacets; |
4382 | } | |
4383 | backer->avg_subfacet_life /= 2; | |
0697b5c3 | 4384 | |
04d08d54 EJ |
4385 | backer->avg_n_subfacet += n_subfacets; |
4386 | backer->avg_n_subfacet /= 2; | |
4387 | ||
4388 | backer->max_n_subfacet = MAX(backer->max_n_subfacet, n_subfacets); | |
655ab909 | 4389 | |
04d08d54 EJ |
4390 | max_idle = subfacet_max_idle(backer); |
4391 | expire_subfacets(backer, max_idle); | |
acf60855 | 4392 | |
04d08d54 EJ |
4393 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { |
4394 | struct rule *rule, *next_rule; | |
4395 | ||
4396 | if (ofproto->backer != backer) { | |
4397 | continue; | |
4398 | } | |
acf60855 JP |
4399 | |
4400 | /* Expire OpenFlow flows whose idle_timeout or hard_timeout | |
4401 | * has passed. */ | |
e503cc19 SH |
4402 | LIST_FOR_EACH_SAFE (rule, next_rule, expirable, |
4403 | &ofproto->up.expirable) { | |
4404 | rule_expire(rule_dpif_cast(rule)); | |
0697b5c3 | 4405 | } |
abe529af | 4406 | |
acf60855 JP |
4407 | /* All outstanding data in existing flows has been accounted, so it's a |
4408 | * good time to do bond rebalancing. */ | |
4409 | if (ofproto->has_bonded_bundles) { | |
4410 | struct ofbundle *bundle; | |
abe529af | 4411 | |
acf60855 JP |
4412 | HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { |
4413 | if (bundle->bond) { | |
2cc3c58e | 4414 | bond_rebalance(bundle->bond, &backer->revalidate_set); |
acf60855 | 4415 | } |
abe529af BP |
4416 | } |
4417 | } | |
4418 | } | |
4419 | ||
acf60855 | 4420 | return MIN(max_idle, 1000); |
abe529af BP |
4421 | } |
4422 | ||
a218c879 BP |
4423 | /* Updates flow table statistics given that the datapath just reported 'stats' |
4424 | * as 'subfacet''s statistics. */ | |
4425 | static void | |
4426 | update_subfacet_stats(struct subfacet *subfacet, | |
4427 | const struct dpif_flow_stats *stats) | |
4428 | { | |
4429 | struct facet *facet = subfacet->facet; | |
787f3d59 | 4430 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); |
9dfb1f78 EJ |
4431 | struct dpif_flow_stats diff; |
4432 | ||
4433 | diff.tcp_flags = stats->tcp_flags; | |
4434 | diff.used = stats->used; | |
a218c879 BP |
4435 | |
4436 | if (stats->n_packets >= subfacet->dp_packet_count) { | |
9dfb1f78 | 4437 | diff.n_packets = stats->n_packets - subfacet->dp_packet_count; |
a218c879 BP |
4438 | } else { |
4439 | VLOG_WARN_RL(&rl, "unexpected packet count from the datapath"); | |
9dfb1f78 | 4440 | diff.n_packets = 0; |
a218c879 BP |
4441 | } |
4442 | ||
4443 | if (stats->n_bytes >= subfacet->dp_byte_count) { | |
9dfb1f78 | 4444 | diff.n_bytes = stats->n_bytes - subfacet->dp_byte_count; |
a218c879 BP |
4445 | } else { |
4446 | VLOG_WARN_RL(&rl, "unexpected byte count from datapath"); | |
9dfb1f78 | 4447 | diff.n_bytes = 0; |
a218c879 BP |
4448 | } |
4449 | ||
787f3d59 | 4450 | ofproto->n_hit += diff.n_packets; |
a218c879 BP |
4451 | subfacet->dp_packet_count = stats->n_packets; |
4452 | subfacet->dp_byte_count = stats->n_bytes; | |
9dfb1f78 | 4453 | subfacet_update_stats(subfacet, &diff); |
a218c879 | 4454 | |
a218c879 BP |
4455 | if (facet->accounted_bytes < facet->byte_count) { |
4456 | facet_learn(facet); | |
4457 | facet_account(facet); | |
4458 | facet->accounted_bytes = facet->byte_count; | |
4459 | } | |
a218c879 BP |
4460 | } |
4461 | ||
4462 | /* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing | |
4463 | * about, or a flow that shouldn't be installed but was anyway. Delete it. */ | |
4464 | static void | |
04d08d54 | 4465 | delete_unexpected_flow(struct dpif_backer *backer, |
a218c879 BP |
4466 | const struct nlattr *key, size_t key_len) |
4467 | { | |
4468 | if (!VLOG_DROP_WARN(&rl)) { | |
4469 | struct ds s; | |
4470 | ||
4471 | ds_init(&s); | |
4472 | odp_flow_key_format(key, key_len, &s); | |
04d08d54 | 4473 | VLOG_WARN("unexpected flow: %s", ds_cstr(&s)); |
a218c879 BP |
4474 | ds_destroy(&s); |
4475 | } | |
4476 | ||
4477 | COVERAGE_INC(facet_unexpected); | |
04d08d54 | 4478 | dpif_flow_del(backer->dpif, key, key_len, NULL); |
a218c879 BP |
4479 | } |
4480 | ||
abe529af BP |
4481 | /* Update 'packet_count', 'byte_count', and 'used' members of installed facets. |
4482 | * | |
4483 | * This function also pushes statistics updates to rules which each facet | |
4484 | * resubmits into. Generally these statistics will be accurate. However, if a | |
4485 | * facet changes the rule it resubmits into at some time in between | |
4486 | * update_stats() runs, it is possible that statistics accrued to the | |
4487 | * old rule will be incorrectly attributed to the new rule. This could be | |
4488 | * avoided by calling update_stats() whenever rules are created or | |
4489 | * deleted. However, the performance impact of making so many calls to the | |
4490 | * datapath do not justify the benefit of having perfectly accurate statistics. | |
735d7efb AZ |
4491 | * |
4492 | * In addition, this function maintains per ofproto flow hit counts. The patch | |
4493 | * port is not treated specially. e.g. A packet ingress from br0 patched into | |
4494 | * br1 will increase the hit count of br0 by 1, however, does not affect | |
4495 | * the hit or miss counts of br1. | |
abe529af BP |
4496 | */ |
4497 | static void | |
acf60855 | 4498 | update_stats(struct dpif_backer *backer) |
abe529af BP |
4499 | { |
4500 | const struct dpif_flow_stats *stats; | |
4501 | struct dpif_flow_dump dump; | |
4502 | const struct nlattr *key; | |
4503 | size_t key_len; | |
4504 | ||
acf60855 | 4505 | dpif_flow_dump_start(&dump, backer->dpif); |
abe529af | 4506 | while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) { |
b0f7b9b5 | 4507 | struct subfacet *subfacet; |
acf60855 | 4508 | uint32_t key_hash; |
abe529af | 4509 | |
acf60855 | 4510 | key_hash = odp_flow_key_hash(key, key_len); |
04d08d54 | 4511 | subfacet = subfacet_find(backer, key, key_len, key_hash); |
6a7e895f BP |
4512 | switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) { |
4513 | case SF_FAST_PATH: | |
a218c879 | 4514 | update_subfacet_stats(subfacet, stats); |
6a7e895f BP |
4515 | break; |
4516 | ||
4517 | case SF_SLOW_PATH: | |
4518 | /* Stats are updated per-packet. */ | |
4519 | break; | |
4520 | ||
4521 | case SF_NOT_INSTALLED: | |
4522 | default: | |
04d08d54 | 4523 | delete_unexpected_flow(backer, key, key_len); |
6a7e895f | 4524 | break; |
abe529af | 4525 | } |
8fa4d1d0 | 4526 | run_fast_rl(); |
abe529af BP |
4527 | } |
4528 | dpif_flow_dump_done(&dump); | |
3af56aef | 4529 | |
dc54ef36 | 4530 | update_moving_averages(backer); |
abe529af BP |
4531 | } |
4532 | ||
4533 | /* Calculates and returns the number of milliseconds of idle time after which | |
b0f7b9b5 BP |
4534 | * subfacets should expire from the datapath. When a subfacet expires, we fold |
4535 | * its statistics into its facet, and when a facet's last subfacet expires, we | |
4536 | * fold its statistic into its rule. */ | |
abe529af | 4537 | static int |
04d08d54 | 4538 | subfacet_max_idle(const struct dpif_backer *backer) |
abe529af BP |
4539 | { |
4540 | /* | |
4541 | * Idle time histogram. | |
4542 | * | |
b0f7b9b5 BP |
4543 | * Most of the time a switch has a relatively small number of subfacets. |
4544 | * When this is the case we might as well keep statistics for all of them | |
4545 | * in userspace and to cache them in the kernel datapath for performance as | |
abe529af BP |
4546 | * well. |
4547 | * | |
b0f7b9b5 | 4548 | * As the number of subfacets increases, the memory required to maintain |
abe529af | 4549 | * statistics about them in userspace and in the kernel becomes |
b0f7b9b5 BP |
4550 | * significant. However, with a large number of subfacets it is likely |
4551 | * that only a few of them are "heavy hitters" that consume a large amount | |
4552 | * of bandwidth. At this point, only heavy hitters are worth caching in | |
4553 | * the kernel and maintaining in userspaces; other subfacets we can | |
4554 | * discard. | |
abe529af BP |
4555 | * |
4556 | * The technique used to compute the idle time is to build a histogram with | |
b0f7b9b5 | 4557 | * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet |
abe529af BP |
4558 | * that is installed in the kernel gets dropped in the appropriate bucket. |
4559 | * After the histogram has been built, we compute the cutoff so that only | |
b0f7b9b5 | 4560 | * the most-recently-used 1% of subfacets (but at least |
380f49c4 | 4561 | * flow_eviction_threshold flows) are kept cached. At least |
b0f7b9b5 BP |
4562 | * the most-recently-used bucket of subfacets is kept, so actually an |
4563 | * arbitrary number of subfacets can be kept in any given expiration run | |
084f5290 SH |
4564 | * (though the next run will delete most of those unless they receive |
4565 | * additional data). | |
abe529af | 4566 | * |
b0f7b9b5 BP |
4567 | * This requires a second pass through the subfacets, in addition to the |
4568 | * pass made by update_stats(), because the former function never looks at | |
4569 | * uninstallable subfacets. | |
abe529af BP |
4570 | */ |
4571 | enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) }; | |
4572 | enum { N_BUCKETS = 5000 / BUCKET_WIDTH }; | |
4573 | int buckets[N_BUCKETS] = { 0 }; | |
f11c1ef4 | 4574 | int total, subtotal, bucket; |
b0f7b9b5 | 4575 | struct subfacet *subfacet; |
abe529af BP |
4576 | long long int now; |
4577 | int i; | |
4578 | ||
04d08d54 | 4579 | total = hmap_count(&backer->subfacets); |
380f49c4 | 4580 | if (total <= flow_eviction_threshold) { |
abe529af BP |
4581 | return N_BUCKETS * BUCKET_WIDTH; |
4582 | } | |
4583 | ||
4584 | /* Build histogram. */ | |
4585 | now = time_msec(); | |
04d08d54 | 4586 | HMAP_FOR_EACH (subfacet, hmap_node, &backer->subfacets) { |
b0f7b9b5 | 4587 | long long int idle = now - subfacet->used; |
abe529af BP |
4588 | int bucket = (idle <= 0 ? 0 |
4589 | : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1 | |
4590 | : (unsigned int) idle / BUCKET_WIDTH); | |
4591 | buckets[bucket]++; | |
4592 | } | |
4593 | ||
4594 | /* Find the first bucket whose flows should be expired. */ | |
f11c1ef4 SH |
4595 | subtotal = bucket = 0; |
4596 | do { | |
4597 | subtotal += buckets[bucket++]; | |
084f5290 | 4598 | } while (bucket < N_BUCKETS && |
380f49c4 | 4599 | subtotal < MAX(flow_eviction_threshold, total / 100)); |
abe529af BP |
4600 | |
4601 | if (VLOG_IS_DBG_ENABLED()) { | |
4602 | struct ds s; | |
4603 | ||
4604 | ds_init(&s); | |
4605 | ds_put_cstr(&s, "keep"); | |
4606 | for (i = 0; i < N_BUCKETS; i++) { | |
4607 | if (i == bucket) { | |
4608 | ds_put_cstr(&s, ", drop"); | |
4609 | } | |
4610 | if (buckets[i]) { | |
4611 | ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]); | |
4612 | } | |
4613 | } | |
04d08d54 | 4614 | VLOG_INFO("%s (msec:count)", ds_cstr(&s)); |
abe529af BP |
4615 | ds_destroy(&s); |
4616 | } | |
4617 | ||
4618 | return bucket * BUCKET_WIDTH; | |
4619 | } | |
4620 | ||
abe529af | 4621 | static void |
04d08d54 | 4622 | expire_subfacets(struct dpif_backer *backer, int dp_max_idle) |
abe529af | 4623 | { |
625b0720 BP |
4624 | /* Cutoff time for most flows. */ |
4625 | long long int normal_cutoff = time_msec() - dp_max_idle; | |
4626 | ||
4627 | /* We really want to keep flows for special protocols around, so use a more | |
4628 | * conservative cutoff. */ | |
4629 | long long int special_cutoff = time_msec() - 10000; | |
b99d3cee | 4630 | |
b0f7b9b5 | 4631 | struct subfacet *subfacet, *next_subfacet; |
1d85f9e5 | 4632 | struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH]; |
b99d3cee | 4633 | int n_batch; |
abe529af | 4634 | |
b99d3cee | 4635 | n_batch = 0; |
b0f7b9b5 | 4636 | HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node, |
04d08d54 | 4637 | &backer->subfacets) { |
625b0720 BP |
4638 | long long int cutoff; |
4639 | ||
bbafd73b EJ |
4640 | cutoff = (subfacet->facet->xout.slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP |
4641 | | SLOW_STP) | |
625b0720 BP |
4642 | ? special_cutoff |
4643 | : normal_cutoff); | |
b0f7b9b5 | 4644 | if (subfacet->used < cutoff) { |
6a7e895f | 4645 | if (subfacet->path != SF_NOT_INSTALLED) { |
b99d3cee | 4646 | batch[n_batch++] = subfacet; |
1d85f9e5 | 4647 | if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) { |
04d08d54 | 4648 | subfacet_destroy_batch(backer, batch, n_batch); |
b99d3cee BP |
4649 | n_batch = 0; |
4650 | } | |
4651 | } else { | |
4652 | subfacet_destroy(subfacet); | |
4653 | } | |
abe529af BP |
4654 | } |
4655 | } | |
b99d3cee BP |
4656 | |
4657 | if (n_batch > 0) { | |
04d08d54 | 4658 | subfacet_destroy_batch(backer, batch, n_batch); |
b99d3cee | 4659 | } |
abe529af BP |
4660 | } |
4661 | ||
4662 | /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules, | |
4663 | * then delete it entirely. */ | |
4664 | static void | |
4665 | rule_expire(struct rule_dpif *rule) | |
4666 | { | |
abe529af BP |
4667 | struct facet *facet, *next_facet; |
4668 | long long int now; | |
4669 | uint8_t reason; | |
4670 | ||
e2a3d183 BP |
4671 | if (rule->up.pending) { |
4672 | /* We'll have to expire it later. */ | |
4673 | return; | |
4674 | } | |
4675 | ||
abe529af BP |
4676 | /* Has 'rule' expired? */ |
4677 | now = time_msec(); | |
4678 | if (rule->up.hard_timeout | |
308881af | 4679 | && now > rule->up.modified + rule->up.hard_timeout * 1000) { |
abe529af | 4680 | reason = OFPRR_HARD_TIMEOUT; |
8ea6ac3e | 4681 | } else if (rule->up.idle_timeout |
1745cd08 | 4682 | && now > rule->up.used + rule->up.idle_timeout * 1000) { |
abe529af BP |
4683 | reason = OFPRR_IDLE_TIMEOUT; |
4684 | } else { | |
4685 | return; | |
4686 | } | |
4687 | ||
4688 | COVERAGE_INC(ofproto_dpif_expired); | |
4689 | ||
4690 | /* Update stats. (This is a no-op if the rule expired due to an idle | |
4691 | * timeout, because that only happens when the rule has no facets left.) */ | |
4692 | LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) { | |
15baa734 | 4693 | facet_remove(facet); |
abe529af BP |
4694 | } |
4695 | ||
4696 | /* Get rid of the rule. */ | |
4697 | ofproto_rule_expire(&rule->up, reason); | |
4698 | } | |
4699 | \f | |
4700 | /* Facets. */ | |
4701 | ||
4dff9097 | 4702 | /* Creates and returns a new facet based on 'miss'. |
abe529af BP |
4703 | * |
4704 | * The caller must already have determined that no facet with an identical | |
4dff9097 | 4705 | * 'miss->flow' exists in 'miss->ofproto'. |
f3827897 | 4706 | * |
bcd2633a JP |
4707 | * 'rule' and 'xout' must have been created based on 'miss'. |
4708 | * | |
4709 | * 'facet'' statistics are initialized based on 'stats'. | |
2b459b83 | 4710 | * |
b0f7b9b5 BP |
4711 | * The facet will initially have no subfacets. The caller should create (at |
4712 | * least) one subfacet with subfacet_create(). */ | |
abe529af | 4713 | static struct facet * |
bcd2633a JP |
4714 | facet_create(const struct flow_miss *miss, struct rule_dpif *rule, |
4715 | struct xlate_out *xout, struct dpif_flow_stats *stats) | |
abe529af | 4716 | { |
4dff9097 | 4717 | struct ofproto_dpif *ofproto = miss->ofproto; |
abe529af | 4718 | struct facet *facet; |
bcd2633a | 4719 | struct match match; |
abe529af BP |
4720 | |
4721 | facet = xzalloc(sizeof *facet); | |
bcd2633a JP |
4722 | facet->packet_count = facet->prev_packet_count = stats->n_packets; |
4723 | facet->byte_count = facet->prev_byte_count = stats->n_bytes; | |
4724 | facet->tcp_flags = stats->tcp_flags; | |
4725 | facet->used = stats->used; | |
4dff9097 EJ |
4726 | facet->flow = miss->flow; |
4727 | facet->initial_vals = miss->initial_vals; | |
4dff9097 | 4728 | facet->learn_rl = time_msec() + 500; |
bcd2633a | 4729 | facet->rule = rule; |
4dff9097 | 4730 | |
4dff9097 | 4731 | list_push_back(&facet->rule->facets, &facet->list_node); |
b0f7b9b5 | 4732 | list_init(&facet->subfacets); |
abe529af BP |
4733 | netflow_flow_init(&facet->nf_flow); |
4734 | netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used); | |
4735 | ||
bcd2633a JP |
4736 | xlate_out_copy(&facet->xout, xout); |
4737 | ||
4738 | match_init(&match, &facet->flow, &facet->xout.wc); | |
4739 | cls_rule_init(&facet->cr, &match, OFP_DEFAULT_PRIORITY); | |
4740 | classifier_insert(&ofproto->facets, &facet->cr); | |
4741 | ||
bbafd73b | 4742 | facet->nf_flow.output_iface = facet->xout.nf_output_iface; |
6cf474d7 | 4743 | |
abe529af BP |
4744 | return facet; |
4745 | } | |
4746 | ||
4747 | static void | |
4748 | facet_free(struct facet *facet) | |
4749 | { | |
4dff9097 | 4750 | if (facet) { |
bbafd73b | 4751 | xlate_out_uninit(&facet->xout); |
4dff9097 EJ |
4752 | free(facet); |
4753 | } | |
abe529af BP |
4754 | } |
4755 | ||
3d9e05f8 | 4756 | /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on |
0a740f48 | 4757 | * 'packet', which arrived on 'in_port'. */ |
3d9e05f8 BP |
4758 | static bool |
4759 | execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow, | |
4760 | const struct nlattr *odp_actions, size_t actions_len, | |
4761 | struct ofpbuf *packet) | |
4762 | { | |
4763 | struct odputil_keybuf keybuf; | |
4764 | struct ofpbuf key; | |
4765 | int error; | |
4766 | ||
6ff686f2 | 4767 | ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); |
e1b1d06a JP |
4768 | odp_flow_key_from_flow(&key, flow, |
4769 | ofp_port_to_odp_port(ofproto, flow->in_port)); | |
80e5eed9 | 4770 | |
acf60855 | 4771 | error = dpif_execute(ofproto->backer->dpif, key.data, key.size, |
6ff686f2 | 4772 | odp_actions, actions_len, packet); |
6ff686f2 | 4773 | return !error; |
abe529af BP |
4774 | } |
4775 | ||
bcd2633a | 4776 | /* Remove 'facet' from its ofproto and free up the associated memory: |
abe529af BP |
4777 | * |
4778 | * - If 'facet' was installed in the datapath, uninstalls it and updates its | |
b0f7b9b5 | 4779 | * rule's statistics, via subfacet_uninstall(). |
abe529af BP |
4780 | * |
4781 | * - Removes 'facet' from its rule and from ofproto->facets. | |
4782 | */ | |
4783 | static void | |
15baa734 | 4784 | facet_remove(struct facet *facet) |
abe529af | 4785 | { |
15baa734 | 4786 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); |
b0f7b9b5 BP |
4787 | struct subfacet *subfacet, *next_subfacet; |
4788 | ||
cb22974d | 4789 | ovs_assert(!list_is_empty(&facet->subfacets)); |
551a2f6c BP |
4790 | |
4791 | /* First uninstall all of the subfacets to get final statistics. */ | |
4792 | LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) { | |
15baa734 | 4793 | subfacet_uninstall(subfacet); |
551a2f6c BP |
4794 | } |
4795 | ||
4796 | /* Flush the final stats to the rule. | |
4797 | * | |
4798 | * This might require us to have at least one subfacet around so that we | |
4799 | * can use its actions for accounting in facet_account(), which is why we | |
4800 | * have uninstalled but not yet destroyed the subfacets. */ | |
15baa734 | 4801 | facet_flush_stats(facet); |
551a2f6c BP |
4802 | |
4803 | /* Now we're really all done so destroy everything. */ | |
b0f7b9b5 BP |
4804 | LIST_FOR_EACH_SAFE (subfacet, next_subfacet, list_node, |
4805 | &facet->subfacets) { | |
15baa734 | 4806 | subfacet_destroy__(subfacet); |
b0f7b9b5 | 4807 | } |
bcd2633a JP |
4808 | classifier_remove(&ofproto->facets, &facet->cr); |
4809 | cls_rule_destroy(&facet->cr); | |
abe529af BP |
4810 | list_remove(&facet->list_node); |
4811 | facet_free(facet); | |
4812 | } | |
4813 | ||
3de9590b BP |
4814 | /* Feed information from 'facet' back into the learning table to keep it in |
4815 | * sync with what is actually flowing through the datapath. */ | |
abe529af | 4816 | static void |
3de9590b | 4817 | facet_learn(struct facet *facet) |
abe529af | 4818 | { |
62bd7349 | 4819 | long long int now = time_msec(); |
abe529af | 4820 | |
bbafd73b | 4821 | if (!facet->xout.has_fin_timeout && now < facet->learn_rl) { |
6cf474d7 EJ |
4822 | return; |
4823 | } | |
4824 | ||
62bd7349 | 4825 | facet->learn_rl = now + 500; |
6cf474d7 | 4826 | |
bbafd73b EJ |
4827 | if (!facet->xout.has_learn |
4828 | && !facet->xout.has_normal | |
4829 | && (!facet->xout.has_fin_timeout | |
3de9590b | 4830 | || !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) { |
abe529af BP |
4831 | return; |
4832 | } | |
abe529af | 4833 | |
9dfb1f78 | 4834 | facet_push_stats(facet, true); |
3de9590b BP |
4835 | } |
4836 | ||
4837 | static void | |
4838 | facet_account(struct facet *facet) | |
4839 | { | |
4840 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); | |
3de9590b BP |
4841 | const struct nlattr *a; |
4842 | unsigned int left; | |
4843 | ovs_be16 vlan_tci; | |
4844 | uint64_t n_bytes; | |
abe529af | 4845 | |
bbafd73b | 4846 | if (!facet->xout.has_normal || !ofproto->has_bonded_bundles) { |
abe529af BP |
4847 | return; |
4848 | } | |
3de9590b | 4849 | n_bytes = facet->byte_count - facet->accounted_bytes; |
d78be13b BP |
4850 | |
4851 | /* This loop feeds byte counters to bond_account() for rebalancing to use | |
4852 | * as a basis. We also need to track the actual VLAN on which the packet | |
4853 | * is going to be sent to ensure that it matches the one passed to | |
4854 | * bond_choose_output_slave(). (Otherwise, we will account to the wrong | |
b95fc6ba BP |
4855 | * hash bucket.) |
4856 | * | |
4857 | * We use the actions from an arbitrary subfacet because they should all | |
4858 | * be equally valid for our purpose. */ | |
d78be13b | 4859 | vlan_tci = facet->flow.vlan_tci; |
bbafd73b EJ |
4860 | NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->xout.odp_actions.data, |
4861 | facet->xout.odp_actions.size) { | |
fea393b1 | 4862 | const struct ovs_action_push_vlan *vlan; |
d78be13b | 4863 | struct ofport_dpif *port; |
abe529af | 4864 | |
d78be13b | 4865 | switch (nl_attr_type(a)) { |
df2c07f4 | 4866 | case OVS_ACTION_ATTR_OUTPUT: |
abe529af BP |
4867 | port = get_odp_port(ofproto, nl_attr_get_u32(a)); |
4868 | if (port && port->bundle && port->bundle->bond) { | |
d78be13b | 4869 | bond_account(port->bundle->bond, &facet->flow, |
dc155bff | 4870 | vlan_tci_to_vid(vlan_tci), n_bytes); |
abe529af | 4871 | } |
d78be13b BP |
4872 | break; |
4873 | ||
fea393b1 BP |
4874 | case OVS_ACTION_ATTR_POP_VLAN: |
4875 | vlan_tci = htons(0); | |
d78be13b BP |
4876 | break; |
4877 | ||
fea393b1 BP |
4878 | case OVS_ACTION_ATTR_PUSH_VLAN: |
4879 | vlan = nl_attr_get(a); | |
4880 | vlan_tci = vlan->vlan_tci; | |
d78be13b | 4881 | break; |
abe529af BP |
4882 | } |
4883 | } | |
4884 | } | |
4885 | ||
abe529af BP |
4886 | /* Returns true if the only action for 'facet' is to send to the controller. |
4887 | * (We don't report NetFlow expiration messages for such facets because they | |
4888 | * are just part of the control logic for the network, not real traffic). */ | |
4889 | static bool | |
4890 | facet_is_controller_flow(struct facet *facet) | |
4891 | { | |
f25d0cf3 BP |
4892 | if (facet) { |
4893 | const struct rule *rule = &facet->rule->up; | |
4894 | const struct ofpact *ofpacts = rule->ofpacts; | |
4895 | size_t ofpacts_len = rule->ofpacts_len; | |
4896 | ||
dd30ff28 BP |
4897 | if (ofpacts_len > 0 && |
4898 | ofpacts->type == OFPACT_CONTROLLER && | |
f25d0cf3 BP |
4899 | ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) { |
4900 | return true; | |
4901 | } | |
4902 | } | |
4903 | return false; | |
abe529af BP |
4904 | } |
4905 | ||
4906 | /* Folds all of 'facet''s statistics into its rule. Also updates the | |
4907 | * accounting ofhook and emits a NetFlow expiration if appropriate. All of | |
4908 | * 'facet''s statistics in the datapath should have been zeroed and folded into | |
4909 | * its packet and byte counts before this function is called. */ | |
4910 | static void | |
15baa734 | 4911 | facet_flush_stats(struct facet *facet) |
abe529af | 4912 | { |
15baa734 | 4913 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); |
b0f7b9b5 BP |
4914 | struct subfacet *subfacet; |
4915 | ||
4916 | LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) { | |
cb22974d BP |
4917 | ovs_assert(!subfacet->dp_byte_count); |
4918 | ovs_assert(!subfacet->dp_packet_count); | |
b0f7b9b5 | 4919 | } |
abe529af | 4920 | |
9dfb1f78 | 4921 | facet_push_stats(facet, false); |
3de9590b BP |
4922 | if (facet->accounted_bytes < facet->byte_count) { |
4923 | facet_account(facet); | |
4924 | facet->accounted_bytes = facet->byte_count; | |
4925 | } | |
abe529af BP |
4926 | |
4927 | if (ofproto->netflow && !facet_is_controller_flow(facet)) { | |
4928 | struct ofexpired expired; | |
4929 | expired.flow = facet->flow; | |
4930 | expired.packet_count = facet->packet_count; | |
4931 | expired.byte_count = facet->byte_count; | |
4932 | expired.used = facet->used; | |
4933 | netflow_expire(ofproto->netflow, &facet->nf_flow, &expired); | |
4934 | } | |
4935 | ||
abe529af BP |
4936 | /* Reset counters to prevent double counting if 'facet' ever gets |
4937 | * reinstalled. */ | |
bbb5d219 | 4938 | facet_reset_counters(facet); |
abe529af BP |
4939 | |
4940 | netflow_flow_clear(&facet->nf_flow); | |
0e553d9c | 4941 | facet->tcp_flags = 0; |
abe529af BP |
4942 | } |
4943 | ||
bcd2633a JP |
4944 | /* Searches 'ofproto''s table of facets for one which would be responsible for |
4945 | * 'flow'. Returns it if found, otherwise a null pointer. | |
2b459b83 | 4946 | * |
abe529af BP |
4947 | * The returned facet might need revalidation; use facet_lookup_valid() |
4948 | * instead if that is important. */ | |
4949 | static struct facet * | |
bcd2633a | 4950 | facet_find(struct ofproto_dpif *ofproto, const struct flow *flow) |
abe529af | 4951 | { |
bcd2633a JP |
4952 | struct cls_rule *cr = classifier_lookup(&ofproto->facets, flow, NULL); |
4953 | return cr ? CONTAINER_OF(cr, struct facet, cr) : NULL; | |
abe529af BP |
4954 | } |
4955 | ||
bcd2633a JP |
4956 | /* Searches 'ofproto''s table of facets for one capable that covers |
4957 | * 'flow'. Returns it if found, otherwise a null pointer. | |
2b459b83 | 4958 | * |
abe529af BP |
4959 | * The returned facet is guaranteed to be valid. */ |
4960 | static struct facet * | |
bcd2633a | 4961 | facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow) |
abe529af | 4962 | { |
c57b2226 | 4963 | struct facet *facet; |
abe529af | 4964 | |
bcd2633a | 4965 | facet = facet_find(ofproto, flow); |
abe529af | 4966 | if (facet |
2cc3c58e EJ |
4967 | && (ofproto->backer->need_revalidate |
4968 | || tag_set_intersects(&ofproto->backer->revalidate_set, | |
bbafd73b | 4969 | facet->xout.tags)) |
5bf64ade | 4970 | && !facet_revalidate(facet)) { |
0305ce1f | 4971 | return NULL; |
abe529af BP |
4972 | } |
4973 | ||
4974 | return facet; | |
4975 | } | |
4976 | ||
6814e51f BP |
4977 | static bool |
4978 | facet_check_consistency(struct facet *facet) | |
4979 | { | |
4980 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15); | |
4981 | ||
4982 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); | |
4983 | ||
bbafd73b EJ |
4984 | struct xlate_out xout; |
4985 | struct xlate_in xin; | |
050ac423 | 4986 | |
6814e51f | 4987 | struct rule_dpif *rule; |
6814e51f BP |
4988 | bool ok; |
4989 | ||
4990 | /* Check the rule for consistency. */ | |
bcd2633a | 4991 | rule = rule_dpif_lookup(ofproto, &facet->flow, NULL); |
4dff9097 EJ |
4992 | if (rule != facet->rule) { |
4993 | if (!VLOG_DROP_WARN(&rl)) { | |
4994 | struct ds s = DS_EMPTY_INITIALIZER; | |
6814e51f | 4995 | |
c53e1132 BP |
4996 | flow_format(&s, &facet->flow); |
4997 | ds_put_format(&s, ": facet associated with wrong rule (was " | |
4998 | "table=%"PRIu8",", facet->rule->up.table_id); | |
4999 | cls_rule_format(&facet->rule->up.cr, &s); | |
5000 | ds_put_format(&s, ") (should have been table=%"PRIu8",", | |
5001 | rule->up.table_id); | |
5002 | cls_rule_format(&rule->up.cr, &s); | |
c0a71f4e | 5003 | ds_put_char(&s, ')'); |
6814e51f | 5004 | |
c0a71f4e | 5005 | VLOG_WARN("%s", ds_cstr(&s)); |
c53e1132 BP |
5006 | ds_destroy(&s); |
5007 | } | |
4dff9097 | 5008 | return false; |
6814e51f BP |
5009 | } |
5010 | ||
5011 | /* Check the datapath actions for consistency. */ | |
bbafd73b EJ |
5012 | xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, rule, |
5013 | 0, NULL); | |
5014 | xlate_actions(&xin, &xout); | |
6814e51f | 5015 | |
bbafd73b EJ |
5016 | ok = ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions) |
5017 | && facet->xout.slow == xout.slow; | |
4dff9097 EJ |
5018 | if (!ok && !VLOG_DROP_WARN(&rl)) { |
5019 | struct ds s = DS_EMPTY_INITIALIZER; | |
c53e1132 | 5020 | |
4dff9097 EJ |
5021 | flow_format(&s, &facet->flow); |
5022 | ds_put_cstr(&s, ": inconsistency in facet"); | |
c53e1132 | 5023 | |
bbafd73b | 5024 | if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) { |
9616614b | 5025 | ds_put_cstr(&s, " (actions were: "); |
bbafd73b EJ |
5026 | format_odp_actions(&s, facet->xout.odp_actions.data, |
5027 | facet->xout.odp_actions.size); | |
9616614b | 5028 | ds_put_cstr(&s, ") (correct actions: "); |
bbafd73b EJ |
5029 | format_odp_actions(&s, xout.odp_actions.data, |
5030 | xout.odp_actions.size); | |
c0a71f4e | 5031 | ds_put_char(&s, ')'); |
6814e51f | 5032 | } |
4dff9097 | 5033 | |
bbafd73b EJ |
5034 | if (facet->xout.slow != xout.slow) { |
5035 | ds_put_format(&s, " slow path incorrect. should be %d", xout.slow); | |
4dff9097 EJ |
5036 | } |
5037 | ||
c0a71f4e | 5038 | VLOG_WARN("%s", ds_cstr(&s)); |
9616614b | 5039 | ds_destroy(&s); |
6814e51f | 5040 | } |
bbafd73b | 5041 | xlate_out_uninit(&xout); |
6814e51f BP |
5042 | |
5043 | return ok; | |
5044 | } | |
5045 | ||
15baa734 | 5046 | /* Re-searches the classifier for 'facet': |
abe529af BP |
5047 | * |
5048 | * - If the rule found is different from 'facet''s current rule, moves | |
5049 | * 'facet' to the new rule and recompiles its actions. | |
5050 | * | |
5051 | * - If the rule found is the same as 'facet''s current rule, leaves 'facet' | |
f231418e EJ |
5052 | * where it is and recompiles its actions anyway. |
5053 | * | |
5054 | * - If any of 'facet''s subfacets correspond to a new flow according to | |
5bf64ade EJ |
5055 | * ofproto_receive(), 'facet' is removed. |
5056 | * | |
5057 | * Returns true if 'facet' is still valid. False if 'facet' was removed. */ | |
5058 | static bool | |
15baa734 | 5059 | facet_revalidate(struct facet *facet) |
abe529af | 5060 | { |
15baa734 | 5061 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); |
abe529af | 5062 | struct rule_dpif *new_rule; |
b0f7b9b5 | 5063 | struct subfacet *subfacet; |
bcd2633a | 5064 | struct flow_wildcards wc; |
bbafd73b EJ |
5065 | struct xlate_out xout; |
5066 | struct xlate_in xin; | |
abe529af BP |
5067 | |
5068 | COVERAGE_INC(facet_revalidate); | |
5069 | ||
f231418e EJ |
5070 | /* Check that child subfacets still correspond to this facet. Tunnel |
5071 | * configuration changes could cause a subfacet's OpenFlow in_port to | |
5072 | * change. */ | |
5073 | LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) { | |
5074 | struct ofproto_dpif *recv_ofproto; | |
5075 | struct flow recv_flow; | |
5076 | int error; | |
5077 | ||
5078 | error = ofproto_receive(ofproto->backer, NULL, subfacet->key, | |
5079 | subfacet->key_len, &recv_flow, NULL, | |
5080 | &recv_ofproto, NULL, NULL); | |
5081 | if (error | |
5082 | || recv_ofproto != ofproto | |
bcd2633a | 5083 | || facet != facet_find(ofproto, &recv_flow)) { |
f231418e | 5084 | facet_remove(facet); |
5bf64ade | 5085 | return false; |
f231418e EJ |
5086 | } |
5087 | } | |
5088 | ||
bcd2633a JP |
5089 | flow_wildcards_init_catchall(&wc); |
5090 | new_rule = rule_dpif_lookup(ofproto, &facet->flow, &wc); | |
abe529af | 5091 | |
df2c07f4 | 5092 | /* Calculate new datapath actions. |
abe529af BP |
5093 | * |
5094 | * We do not modify any 'facet' state yet, because we might need to, e.g., | |
5095 | * emit a NetFlow expiration and, if so, we need to have the old state | |
5096 | * around to properly compose it. */ | |
bbafd73b EJ |
5097 | xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, new_rule, |
5098 | 0, NULL); | |
5099 | xlate_actions(&xin, &xout); | |
bcd2633a | 5100 | flow_wildcards_or(&xout.wc, &xout.wc, &wc); |
4dff9097 EJ |
5101 | |
5102 | /* A facet's slow path reason should only change under dramatic | |
5103 | * circumstances. Rather than try to update everything, it's simpler to | |
bcd2633a JP |
5104 | * remove the facet and start over. |
5105 | * | |
5106 | * More importantly, if a facet's wildcards change, it will be relatively | |
5107 | * difficult to figure out if its subfacets still belong to it, and if not | |
5108 | * which facet they may belong to. Again, to avoid the complexity, we | |
5109 | * simply give up instead. */ | |
5110 | if (facet->xout.slow != xout.slow | |
5111 | || memcmp(&facet->xout.wc, &xout.wc, sizeof xout.wc)) { | |
4dff9097 | 5112 | facet_remove(facet); |
bbafd73b | 5113 | xlate_out_uninit(&xout); |
4dff9097 EJ |
5114 | return false; |
5115 | } | |
6a7e895f | 5116 | |
bbafd73b | 5117 | if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) { |
4dff9097 EJ |
5118 | LIST_FOR_EACH(subfacet, list_node, &facet->subfacets) { |
5119 | if (subfacet->path == SF_FAST_PATH) { | |
5120 | struct dpif_flow_stats stats; | |
b95fc6ba | 5121 | |
bbafd73b | 5122 | subfacet_install(subfacet, &xout.odp_actions, &stats); |
4dff9097 | 5123 | subfacet_update_stats(subfacet, &stats); |
b95fc6ba | 5124 | } |
abe529af | 5125 | } |
b95fc6ba | 5126 | |
15baa734 | 5127 | facet_flush_stats(facet); |
4dff9097 | 5128 | |
bbafd73b EJ |
5129 | ofpbuf_clear(&facet->xout.odp_actions); |
5130 | ofpbuf_put(&facet->xout.odp_actions, xout.odp_actions.data, | |
5131 | xout.odp_actions.size); | |
abe529af BP |
5132 | } |
5133 | ||
5134 | /* Update 'facet' now that we've taken care of all the old state. */ | |
bbafd73b EJ |
5135 | facet->xout.tags = xout.tags; |
5136 | facet->xout.slow = xout.slow; | |
5137 | facet->xout.has_learn = xout.has_learn; | |
5138 | facet->xout.has_normal = xout.has_normal; | |
5139 | facet->xout.has_fin_timeout = xout.has_fin_timeout; | |
5140 | facet->xout.nf_output_iface = xout.nf_output_iface; | |
5141 | facet->xout.mirrors = xout.mirrors; | |
5142 | facet->nf_flow.output_iface = facet->xout.nf_output_iface; | |
6a7e895f | 5143 | |
abe529af BP |
5144 | if (facet->rule != new_rule) { |
5145 | COVERAGE_INC(facet_changed_rule); | |
5146 | list_remove(&facet->list_node); | |
5147 | list_push_back(&new_rule->facets, &facet->list_node); | |
5148 | facet->rule = new_rule; | |
5149 | facet->used = new_rule->up.created; | |
9d24de3b | 5150 | facet->prev_used = facet->used; |
abe529af | 5151 | } |
5bf64ade | 5152 | |
bbafd73b | 5153 | xlate_out_uninit(&xout); |
5bf64ade | 5154 | return true; |
abe529af BP |
5155 | } |
5156 | ||
bbb5d219 EJ |
5157 | static void |
5158 | facet_reset_counters(struct facet *facet) | |
5159 | { | |
5160 | facet->packet_count = 0; | |
5161 | facet->byte_count = 0; | |
9d24de3b JP |
5162 | facet->prev_packet_count = 0; |
5163 | facet->prev_byte_count = 0; | |
bbb5d219 EJ |
5164 | facet->accounted_bytes = 0; |
5165 | } | |
5166 | ||
abe529af | 5167 | static void |
9dfb1f78 | 5168 | facet_push_stats(struct facet *facet, bool may_learn) |
abe529af | 5169 | { |
112bc5f4 | 5170 | struct dpif_flow_stats stats; |
abe529af | 5171 | |
cb22974d BP |
5172 | ovs_assert(facet->packet_count >= facet->prev_packet_count); |
5173 | ovs_assert(facet->byte_count >= facet->prev_byte_count); | |
5174 | ovs_assert(facet->used >= facet->prev_used); | |
abe529af | 5175 | |
112bc5f4 BP |
5176 | stats.n_packets = facet->packet_count - facet->prev_packet_count; |
5177 | stats.n_bytes = facet->byte_count - facet->prev_byte_count; | |
5178 | stats.used = facet->used; | |
9dfb1f78 EJ |
5179 | stats.tcp_flags = facet->tcp_flags; |
5180 | ||
5181 | if (may_learn || stats.n_packets || facet->used > facet->prev_used) { | |
5182 | struct ofproto_dpif *ofproto = | |
5183 | ofproto_dpif_cast(facet->rule->up.ofproto); | |
5184 | ||
5185 | struct ofport_dpif *in_port; | |
5186 | struct xlate_in xin; | |
abe529af | 5187 | |
9d24de3b JP |
5188 | facet->prev_packet_count = facet->packet_count; |
5189 | facet->prev_byte_count = facet->byte_count; | |
5190 | facet->prev_used = facet->used; | |
abe529af | 5191 | |
9dfb1f78 EJ |
5192 | in_port = get_ofp_port(ofproto, facet->flow.in_port); |
5193 | if (in_port && in_port->tnl_port) { | |
5194 | netdev_vport_inc_rx(in_port->up.netdev, &stats); | |
5195 | } | |
9d24de3b | 5196 | |
9dfb1f78 EJ |
5197 | rule_credit_stats(facet->rule, &stats); |
5198 | netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, | |
5199 | facet->used); | |
5200 | netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags); | |
5201 | update_mirror_stats(ofproto, facet->xout.mirrors, stats.n_packets, | |
bbafd73b | 5202 | stats.n_bytes); |
9dfb1f78 EJ |
5203 | |
5204 | xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, | |
5205 | facet->rule, stats.tcp_flags, NULL); | |
5206 | xin.resubmit_stats = &stats; | |
5207 | xin.may_learn = may_learn; | |
5208 | xlate_actions_for_side_effects(&xin); | |
abe529af BP |
5209 | } |
5210 | } | |
5211 | ||
8844e035 | 5212 | static void |
4464589e | 5213 | push_all_stats__(bool run_fast) |
8844e035 EJ |
5214 | { |
5215 | static long long int rl = LLONG_MIN; | |
5216 | struct ofproto_dpif *ofproto; | |
5217 | ||
5218 | if (time_msec() < rl) { | |
5219 | return; | |
5220 | } | |
5221 | ||
5222 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
bcd2633a | 5223 | struct cls_cursor cursor; |
8844e035 EJ |
5224 | struct facet *facet; |
5225 | ||
bcd2633a JP |
5226 | cls_cursor_init(&cursor, &ofproto->facets, NULL); |
5227 | CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { | |
9dfb1f78 | 5228 | facet_push_stats(facet, false); |
4464589e EJ |
5229 | if (run_fast) { |
5230 | run_fast_rl(); | |
5231 | } | |
8844e035 EJ |
5232 | } |
5233 | } | |
5234 | ||
5235 | rl = time_msec() + 100; | |
5236 | } | |
5237 | ||
4464589e EJ |
5238 | static void |
5239 | push_all_stats(void) | |
5240 | { | |
5241 | push_all_stats__(true); | |
5242 | } | |
5243 | ||
abe529af | 5244 | static void |
112bc5f4 | 5245 | rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats) |
abe529af | 5246 | { |
112bc5f4 BP |
5247 | rule->packet_count += stats->n_packets; |
5248 | rule->byte_count += stats->n_bytes; | |
5249 | ofproto_rule_update_used(&rule->up, stats->used); | |
abe529af | 5250 | } |
abe529af | 5251 | \f |
b0f7b9b5 BP |
5252 | /* Subfacets. */ |
5253 | ||
5254 | static struct subfacet * | |
04d08d54 EJ |
5255 | subfacet_find(struct dpif_backer *backer, const struct nlattr *key, |
5256 | size_t key_len, uint32_t key_hash) | |
b0f7b9b5 BP |
5257 | { |
5258 | struct subfacet *subfacet; | |
5259 | ||
5260 | HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash, | |
04d08d54 | 5261 | &backer->subfacets) { |
9566abf9 EJ |
5262 | if (subfacet->key_len == key_len |
5263 | && !memcmp(key, subfacet->key, key_len)) { | |
b0f7b9b5 BP |
5264 | return subfacet; |
5265 | } | |
5266 | } | |
5267 | ||
5268 | return NULL; | |
5269 | } | |
5270 | ||
5271 | /* Searches 'facet' (within 'ofproto') for a subfacet with the specified | |
a088a1ff JP |
5272 | * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the |
5273 | * existing subfacet if there is one, otherwise creates and returns a | |
4dff9097 | 5274 | * new subfacet. */ |
b0f7b9b5 | 5275 | static struct subfacet * |
a088a1ff JP |
5276 | subfacet_create(struct facet *facet, struct flow_miss *miss, |
5277 | long long int now) | |
b0f7b9b5 | 5278 | { |
04d08d54 | 5279 | struct dpif_backer *backer = miss->ofproto->backer; |
a088a1ff JP |
5280 | enum odp_key_fitness key_fitness = miss->key_fitness; |
5281 | const struct nlattr *key = miss->key; | |
5282 | size_t key_len = miss->key_len; | |
5283 | uint32_t key_hash; | |
b0f7b9b5 BP |
5284 | struct subfacet *subfacet; |
5285 | ||
a088a1ff JP |
5286 | key_hash = odp_flow_key_hash(key, key_len); |
5287 | ||
3b145dd7 BP |
5288 | if (list_is_empty(&facet->subfacets)) { |
5289 | subfacet = &facet->one_subfacet; | |
5290 | } else { | |
04d08d54 | 5291 | subfacet = subfacet_find(backer, key, key_len, key_hash); |
3b145dd7 BP |
5292 | if (subfacet) { |
5293 | if (subfacet->facet == facet) { | |
5294 | return subfacet; | |
5295 | } | |
5296 | ||
5297 | /* This shouldn't happen. */ | |
5298 | VLOG_ERR_RL(&rl, "subfacet with wrong facet"); | |
5299 | subfacet_destroy(subfacet); | |
b0f7b9b5 BP |
5300 | } |
5301 | ||
3b145dd7 | 5302 | subfacet = xmalloc(sizeof *subfacet); |
b0f7b9b5 BP |
5303 | } |
5304 | ||
04d08d54 | 5305 | hmap_insert(&backer->subfacets, &subfacet->hmap_node, key_hash); |
b0f7b9b5 BP |
5306 | list_push_back(&facet->subfacets, &subfacet->list_node); |
5307 | subfacet->facet = facet; | |
b0f7b9b5 | 5308 | subfacet->key_fitness = key_fitness; |
9566abf9 EJ |
5309 | subfacet->key = xmemdup(key, key_len); |
5310 | subfacet->key_len = key_len; | |
459b16a1 | 5311 | subfacet->used = now; |
655ab909 | 5312 | subfacet->created = now; |
26cd7e34 BP |
5313 | subfacet->dp_packet_count = 0; |
5314 | subfacet->dp_byte_count = 0; | |
6a7e895f | 5315 | subfacet->path = SF_NOT_INSTALLED; |
04d08d54 | 5316 | subfacet->backer = backer; |
b0f7b9b5 | 5317 | |
04d08d54 | 5318 | backer->subfacet_add_count++; |
b0f7b9b5 BP |
5319 | return subfacet; |
5320 | } | |
5321 | ||
b0f7b9b5 BP |
5322 | /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from |
5323 | * its facet within 'ofproto', and frees it. */ | |
5324 | static void | |
15baa734 | 5325 | subfacet_destroy__(struct subfacet *subfacet) |
b0f7b9b5 | 5326 | { |
15baa734 BP |
5327 | struct facet *facet = subfacet->facet; |
5328 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); | |
5329 | ||
655ab909 | 5330 | /* Update ofproto stats before uninstall the subfacet. */ |
dc54ef36 | 5331 | ofproto->backer->subfacet_del_count++; |
655ab909 | 5332 | |
15baa734 | 5333 | subfacet_uninstall(subfacet); |
04d08d54 | 5334 | hmap_remove(&subfacet->backer->subfacets, &subfacet->hmap_node); |
b0f7b9b5 BP |
5335 | list_remove(&subfacet->list_node); |
5336 | free(subfacet->key); | |
26cd7e34 BP |
5337 | if (subfacet != &facet->one_subfacet) { |
5338 | free(subfacet); | |
5339 | } | |
b0f7b9b5 BP |
5340 | } |
5341 | ||
5342 | /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the | |
5343 | * last remaining subfacet in its facet destroys the facet too. */ | |
5344 | static void | |
15baa734 | 5345 | subfacet_destroy(struct subfacet *subfacet) |
b0f7b9b5 BP |
5346 | { |
5347 | struct facet *facet = subfacet->facet; | |
5348 | ||
551a2f6c BP |
5349 | if (list_is_singleton(&facet->subfacets)) { |
5350 | /* facet_remove() needs at least one subfacet (it will remove it). */ | |
15baa734 | 5351 | facet_remove(facet); |
551a2f6c | 5352 | } else { |
15baa734 | 5353 | subfacet_destroy__(subfacet); |
b0f7b9b5 BP |
5354 | } |
5355 | } | |
5356 | ||
1d85f9e5 | 5357 | static void |
04d08d54 | 5358 | subfacet_destroy_batch(struct dpif_backer *backer, |
1d85f9e5 JP |
5359 | struct subfacet **subfacets, int n) |
5360 | { | |
1d85f9e5 JP |
5361 | struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH]; |
5362 | struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH]; | |
1d85f9e5 JP |
5363 | struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH]; |
5364 | int i; | |
5365 | ||
5366 | for (i = 0; i < n; i++) { | |
5367 | ops[i].type = DPIF_OP_FLOW_DEL; | |
9566abf9 EJ |
5368 | ops[i].u.flow_del.key = subfacets[i]->key; |
5369 | ops[i].u.flow_del.key_len = subfacets[i]->key_len; | |
1d85f9e5 JP |
5370 | ops[i].u.flow_del.stats = &stats[i]; |
5371 | opsp[i] = &ops[i]; | |
5372 | } | |
5373 | ||
04d08d54 | 5374 | dpif_operate(backer->dpif, opsp, n); |
1d85f9e5 JP |
5375 | for (i = 0; i < n; i++) { |
5376 | subfacet_reset_dp_stats(subfacets[i], &stats[i]); | |
5377 | subfacets[i]->path = SF_NOT_INSTALLED; | |
5378 | subfacet_destroy(subfacets[i]); | |
8fa4d1d0 | 5379 | run_fast_rl(); |
1d85f9e5 JP |
5380 | } |
5381 | } | |
5382 | ||
b0f7b9b5 BP |
5383 | /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len' |
5384 | * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters | |
5385 | * in the datapath will be zeroed and 'stats' will be updated with traffic new | |
5386 | * since 'subfacet' was last updated. | |
5387 | * | |
5388 | * Returns 0 if successful, otherwise a positive errno value. */ | |
5389 | static int | |
4dff9097 EJ |
5390 | subfacet_install(struct subfacet *subfacet, const struct ofpbuf *odp_actions, |
5391 | struct dpif_flow_stats *stats) | |
b0f7b9b5 | 5392 | { |
15baa734 BP |
5393 | struct facet *facet = subfacet->facet; |
5394 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); | |
bbafd73b | 5395 | enum subfacet_path path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH; |
4dff9097 EJ |
5396 | const struct nlattr *actions = odp_actions->data; |
5397 | size_t actions_len = odp_actions->size; | |
5398 | ||
6a7e895f | 5399 | uint64_t slow_path_stub[128 / 8]; |
b0f7b9b5 | 5400 | enum dpif_flow_put_flags flags; |
b0f7b9b5 BP |
5401 | int ret; |
5402 | ||
5403 | flags = DPIF_FP_CREATE | DPIF_FP_MODIFY; | |
5404 | if (stats) { | |
5405 | flags |= DPIF_FP_ZERO_STATS; | |
5406 | } | |
5407 | ||
6a7e895f | 5408 | if (path == SF_SLOW_PATH) { |
bbafd73b | 5409 | compose_slow_path(ofproto, &facet->flow, facet->xout.slow, |
6a7e895f BP |
5410 | slow_path_stub, sizeof slow_path_stub, |
5411 | &actions, &actions_len); | |
5412 | } | |
5413 | ||
04d08d54 | 5414 | ret = dpif_flow_put(subfacet->backer->dpif, flags, subfacet->key, |
9566abf9 | 5415 | subfacet->key_len, actions, actions_len, stats); |
b0f7b9b5 BP |
5416 | |
5417 | if (stats) { | |
5418 | subfacet_reset_dp_stats(subfacet, stats); | |
5419 | } | |
5420 | ||
6a7e895f BP |
5421 | if (!ret) { |
5422 | subfacet->path = path; | |
5423 | } | |
b0f7b9b5 BP |
5424 | return ret; |
5425 | } | |
5426 | ||
5427 | /* If 'subfacet' is installed in the datapath, uninstalls it. */ | |
5428 | static void | |
15baa734 | 5429 | subfacet_uninstall(struct subfacet *subfacet) |
b0f7b9b5 | 5430 | { |
6a7e895f | 5431 | if (subfacet->path != SF_NOT_INSTALLED) { |
15baa734 BP |
5432 | struct rule_dpif *rule = subfacet->facet->rule; |
5433 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); | |
b0f7b9b5 | 5434 | struct dpif_flow_stats stats; |
b0f7b9b5 BP |
5435 | int error; |
5436 | ||
9566abf9 EJ |
5437 | error = dpif_flow_del(ofproto->backer->dpif, subfacet->key, |
5438 | subfacet->key_len, &stats); | |
b0f7b9b5 BP |
5439 | subfacet_reset_dp_stats(subfacet, &stats); |
5440 | if (!error) { | |
15baa734 | 5441 | subfacet_update_stats(subfacet, &stats); |
b0f7b9b5 | 5442 | } |
6a7e895f | 5443 | subfacet->path = SF_NOT_INSTALLED; |
b0f7b9b5 | 5444 | } else { |
cb22974d BP |
5445 | ovs_assert(subfacet->dp_packet_count == 0); |
5446 | ovs_assert(subfacet->dp_byte_count == 0); | |
b0f7b9b5 BP |
5447 | } |
5448 | } | |
5449 | ||
5450 | /* Resets 'subfacet''s datapath statistics counters. This should be called | |
5451 | * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is | |
5452 | * non-null, it should contain the statistics returned by dpif when 'subfacet' | |
5453 | * was reset in the datapath. 'stats' will be modified to include only | |
5454 | * statistics new since 'subfacet' was last updated. */ | |
5455 | static void | |
5456 | subfacet_reset_dp_stats(struct subfacet *subfacet, | |
5457 | struct dpif_flow_stats *stats) | |
5458 | { | |
5459 | if (stats | |
5460 | && subfacet->dp_packet_count <= stats->n_packets | |
5461 | && subfacet->dp_byte_count <= stats->n_bytes) { | |
5462 | stats->n_packets -= subfacet->dp_packet_count; | |
5463 | stats->n_bytes -= subfacet->dp_byte_count; | |
5464 | } | |
5465 | ||
5466 | subfacet->dp_packet_count = 0; | |
5467 | subfacet->dp_byte_count = 0; | |
5468 | } | |
5469 | ||
b0f7b9b5 BP |
5470 | /* Folds the statistics from 'stats' into the counters in 'subfacet'. |
5471 | * | |
5472 | * Because of the meaning of a subfacet's counters, it only makes sense to do | |
5473 | * this if 'stats' are not tracked in the datapath, that is, if 'stats' | |
5474 | * represents a packet that was sent by hand or if it represents statistics | |
5475 | * that have been cleared out of the datapath. */ | |
5476 | static void | |
15baa734 | 5477 | subfacet_update_stats(struct subfacet *subfacet, |
b0f7b9b5 BP |
5478 | const struct dpif_flow_stats *stats) |
5479 | { | |
5480 | if (stats->n_packets || stats->used > subfacet->used) { | |
5481 | struct facet *facet = subfacet->facet; | |
5482 | ||
9dfb1f78 EJ |
5483 | subfacet->used = MAX(subfacet->used, stats->used); |
5484 | facet->used = MAX(facet->used, stats->used); | |
b0f7b9b5 BP |
5485 | facet->packet_count += stats->n_packets; |
5486 | facet->byte_count += stats->n_bytes; | |
0e553d9c | 5487 | facet->tcp_flags |= stats->tcp_flags; |
b0f7b9b5 BP |
5488 | } |
5489 | } | |
5490 | \f | |
abe529af BP |
5491 | /* Rules. */ |
5492 | ||
bcd2633a JP |
5493 | /* Lookup 'flow' in 'ofproto''s classifier. If 'wc' is non-null, sets |
5494 | * the fields that were relevant as part of the lookup. */ | |
abe529af | 5495 | static struct rule_dpif * |
bcd2633a JP |
5496 | rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow, |
5497 | struct flow_wildcards *wc) | |
c57b2226 | 5498 | { |
c57b2226 BP |
5499 | struct rule_dpif *rule; |
5500 | ||
bcd2633a | 5501 | rule = rule_dpif_lookup__(ofproto, flow, wc, 0); |
c57b2226 BP |
5502 | if (rule) { |
5503 | return rule; | |
5504 | } | |
5505 | ||
c376f9a3 | 5506 | return rule_dpif_miss_rule(ofproto, flow); |
c57b2226 BP |
5507 | } |
5508 | ||
5509 | static struct rule_dpif * | |
5510 | rule_dpif_lookup__(struct ofproto_dpif *ofproto, const struct flow *flow, | |
bcd2633a | 5511 | struct flow_wildcards *wc, uint8_t table_id) |
abe529af | 5512 | { |
7257b535 BP |
5513 | struct cls_rule *cls_rule; |
5514 | struct classifier *cls; | |
7fd51d39 | 5515 | bool frag; |
7257b535 | 5516 | |
9cdaaebe BP |
5517 | if (table_id >= N_TABLES) { |
5518 | return NULL; | |
5519 | } | |
5520 | ||
d0918789 | 5521 | cls = &ofproto->up.tables[table_id].cls; |
7fd51d39 BP |
5522 | frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0; |
5523 | if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) { | |
5524 | /* We must pretend that transport ports are unavailable. */ | |
7257b535 BP |
5525 | struct flow ofpc_normal_flow = *flow; |
5526 | ofpc_normal_flow.tp_src = htons(0); | |
5527 | ofpc_normal_flow.tp_dst = htons(0); | |
bcd2633a | 5528 | cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc); |
7fd51d39 BP |
5529 | } else if (frag && ofproto->up.frag_handling == OFPC_FRAG_DROP) { |
5530 | cls_rule = &ofproto->drop_frags_rule->up.cr; | |
bcd2633a JP |
5531 | if (wc) { |
5532 | flow_wildcards_init_exact(wc); | |
5533 | } | |
7257b535 | 5534 | } else { |
bcd2633a | 5535 | cls_rule = classifier_lookup(cls, flow, wc); |
7257b535 BP |
5536 | } |
5537 | return rule_dpif_cast(rule_from_cls_rule(cls_rule)); | |
abe529af BP |
5538 | } |
5539 | ||
c376f9a3 IY |
5540 | static struct rule_dpif * |
5541 | rule_dpif_miss_rule(struct ofproto_dpif *ofproto, const struct flow *flow) | |
5542 | { | |
5543 | struct ofport_dpif *port; | |
5544 | ||
5545 | port = get_ofp_port(ofproto, flow->in_port); | |
5546 | if (!port) { | |
5547 | VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, flow->in_port); | |
5548 | return ofproto->miss_rule; | |
5549 | } | |
5550 | ||
5551 | if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) { | |
5552 | return ofproto->no_packet_in_rule; | |
5553 | } | |
5554 | return ofproto->miss_rule; | |
5555 | } | |
5556 | ||
7ee20df1 BP |
5557 | static void |
5558 | complete_operation(struct rule_dpif *rule) | |
5559 | { | |
5560 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); | |
5561 | ||
54a9cbc9 | 5562 | rule_invalidate(rule); |
7ee20df1 BP |
5563 | if (clogged) { |
5564 | struct dpif_completion *c = xmalloc(sizeof *c); | |
5565 | c->op = rule->up.pending; | |
5566 | list_push_back(&ofproto->completions, &c->list_node); | |
5567 | } else { | |
5568 | ofoperation_complete(rule->up.pending, 0); | |
5569 | } | |
5570 | } | |
5571 | ||
abe529af BP |
5572 | static struct rule * |
5573 | rule_alloc(void) | |
5574 | { | |
5575 | struct rule_dpif *rule = xmalloc(sizeof *rule); | |
5576 | return &rule->up; | |
5577 | } | |
5578 | ||
5579 | static void | |
5580 | rule_dealloc(struct rule *rule_) | |
5581 | { | |
5582 | struct rule_dpif *rule = rule_dpif_cast(rule_); | |
5583 | free(rule); | |
5584 | } | |
5585 | ||
90bf1e07 | 5586 | static enum ofperr |
abe529af BP |
5587 | rule_construct(struct rule *rule_) |
5588 | { | |
5589 | struct rule_dpif *rule = rule_dpif_cast(rule_); | |
5590 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); | |
7ee20df1 | 5591 | struct rule_dpif *victim; |
54a9cbc9 | 5592 | uint8_t table_id; |
abe529af | 5593 | |
abe529af BP |
5594 | rule->packet_count = 0; |
5595 | rule->byte_count = 0; | |
abe529af | 5596 | |
7ee20df1 BP |
5597 | victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending)); |
5598 | if (victim && !list_is_empty(&victim->facets)) { | |
5599 | struct facet *facet; | |
5600 | ||
5601 | rule->facets = victim->facets; | |
5602 | list_moved(&rule->facets); | |
5603 | LIST_FOR_EACH (facet, list_node, &rule->facets) { | |
bbb5d219 EJ |
5604 | /* XXX: We're only clearing our local counters here. It's possible |
5605 | * that quite a few packets are unaccounted for in the datapath | |
5606 | * statistics. These will be accounted to the new rule instead of | |
5607 | * cleared as required. This could be fixed by clearing out the | |
5608 | * datapath statistics for this facet, but currently it doesn't | |
5609 | * seem worth it. */ | |
5610 | facet_reset_counters(facet); | |
7ee20df1 BP |
5611 | facet->rule = rule; |
5612 | } | |
5613 | } else { | |
5614 | /* Must avoid list_moved() in this case. */ | |
5615 | list_init(&rule->facets); | |
5616 | } | |
abe529af | 5617 | |
54a9cbc9 | 5618 | table_id = rule->up.table_id; |
5cb7a798 BP |
5619 | if (victim) { |
5620 | rule->tag = victim->tag; | |
5621 | } else if (table_id == 0) { | |
5622 | rule->tag = 0; | |
5623 | } else { | |
5624 | struct flow flow; | |
5625 | ||
5626 | miniflow_expand(&rule->up.cr.match.flow, &flow); | |
5627 | rule->tag = rule_calculate_tag(&flow, &rule->up.cr.match.mask, | |
5628 | ofproto->tables[table_id].basis); | |
5629 | } | |
54a9cbc9 | 5630 | |
7ee20df1 | 5631 | complete_operation(rule); |
abe529af BP |
5632 | return 0; |
5633 | } | |
5634 | ||
5635 | static void | |
5636 | rule_destruct(struct rule *rule_) | |
5637 | { | |
5638 | struct rule_dpif *rule = rule_dpif_cast(rule_); | |
abe529af BP |
5639 | struct facet *facet, *next_facet; |
5640 | ||
abe529af | 5641 | LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) { |
15baa734 | 5642 | facet_revalidate(facet); |
abe529af | 5643 | } |
7ee20df1 BP |
5644 | |
5645 | complete_operation(rule); | |
abe529af BP |
5646 | } |
5647 | ||
5648 | static void | |
5649 | rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes) | |
5650 | { | |
5651 | struct rule_dpif *rule = rule_dpif_cast(rule_); | |
abe529af | 5652 | |
4464589e EJ |
5653 | /* push_all_stats() can handle flow misses which, when using the learn |
5654 | * action, can cause rules to be added and deleted. This can corrupt our | |
5655 | * caller's datastructures which assume that rule_get_stats() doesn't have | |
5656 | * an impact on the flow table. To be safe, we disable miss handling. */ | |
5657 | push_all_stats__(false); | |
bf1e8ff9 | 5658 | |
abe529af BP |
5659 | /* Start from historical data for 'rule' itself that are no longer tracked |
5660 | * in facets. This counts, for example, facets that have expired. */ | |
5661 | *packets = rule->packet_count; | |
5662 | *bytes = rule->byte_count; | |
abe529af BP |
5663 | } |
5664 | ||
0a740f48 EJ |
5665 | static void |
5666 | rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow, | |
5667 | struct ofpbuf *packet) | |
abe529af | 5668 | { |
abe529af | 5669 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); |
14f94f9a | 5670 | struct initial_vals initial_vals; |
112bc5f4 | 5671 | struct dpif_flow_stats stats; |
bbafd73b EJ |
5672 | struct xlate_out xout; |
5673 | struct xlate_in xin; | |
abe529af | 5674 | |
a7752d4a | 5675 | dpif_flow_stats_extract(flow, packet, time_msec(), &stats); |
112bc5f4 BP |
5676 | rule_credit_stats(rule, &stats); |
5677 | ||
14f94f9a | 5678 | initial_vals.vlan_tci = flow->vlan_tci; |
bbafd73b EJ |
5679 | xlate_in_init(&xin, ofproto, flow, &initial_vals, rule, stats.tcp_flags, |
5680 | packet); | |
5681 | xin.resubmit_stats = &stats; | |
5682 | xlate_actions(&xin, &xout); | |
112bc5f4 | 5683 | |
bbafd73b EJ |
5684 | execute_odp_actions(ofproto, flow, xout.odp_actions.data, |
5685 | xout.odp_actions.size, packet); | |
112bc5f4 | 5686 | |
bbafd73b | 5687 | xlate_out_uninit(&xout); |
0a740f48 | 5688 | } |
5bf0e941 | 5689 | |
0a740f48 EJ |
5690 | static enum ofperr |
5691 | rule_execute(struct rule *rule, const struct flow *flow, | |
5692 | struct ofpbuf *packet) | |
5693 | { | |
5694 | rule_dpif_execute(rule_dpif_cast(rule), flow, packet); | |
5695 | ofpbuf_delete(packet); | |
5bf0e941 | 5696 | return 0; |
abe529af BP |
5697 | } |
5698 | ||
7ee20df1 BP |
5699 | static void |
5700 | rule_modify_actions(struct rule *rule_) | |
abe529af BP |
5701 | { |
5702 | struct rule_dpif *rule = rule_dpif_cast(rule_); | |
7ee20df1 BP |
5703 | |
5704 | complete_operation(rule); | |
abe529af BP |
5705 | } |
5706 | \f | |
97d6520b | 5707 | /* Sends 'packet' out 'ofport'. |
52a90c29 | 5708 | * May modify 'packet'. |
abe529af BP |
5709 | * Returns 0 if successful, otherwise a positive errno value. */ |
5710 | static int | |
52a90c29 | 5711 | send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet) |
abe529af | 5712 | { |
79e15b78 | 5713 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); |
b9ad7294 | 5714 | uint64_t odp_actions_stub[1024 / 8]; |
80e5eed9 | 5715 | struct ofpbuf key, odp_actions; |
0f49659a | 5716 | struct dpif_flow_stats stats; |
80e5eed9 | 5717 | struct odputil_keybuf keybuf; |
0f49659a | 5718 | struct ofpact_output output; |
bbafd73b EJ |
5719 | struct xlate_out xout; |
5720 | struct xlate_in xin; | |
80e5eed9 | 5721 | struct flow flow; |
abe529af BP |
5722 | int error; |
5723 | ||
b9ad7294 | 5724 | ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub); |
0f49659a | 5725 | ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); |
b9ad7294 | 5726 | |
0f49659a EJ |
5727 | /* Use OFPP_NONE as the in_port to avoid special packet processing. */ |
5728 | flow_extract(packet, 0, 0, NULL, OFPP_NONE, &flow); | |
5729 | odp_flow_key_from_flow(&key, &flow, ofp_port_to_odp_port(ofproto, | |
5730 | OFPP_LOCAL)); | |
5731 | dpif_flow_stats_extract(&flow, packet, time_msec(), &stats); | |
b9ad7294 | 5732 | |
0f49659a EJ |
5733 | ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output); |
5734 | output.port = ofport->up.ofp_port; | |
5735 | output.max_len = 0; | |
b9ad7294 | 5736 | |
bbafd73b EJ |
5737 | xlate_in_init(&xin, ofproto, &flow, NULL, NULL, 0, packet); |
5738 | xin.ofpacts_len = sizeof output; | |
5739 | xin.ofpacts = &output.ofpact; | |
5740 | xin.resubmit_stats = &stats; | |
5741 | xlate_actions(&xin, &xout); | |
6ff686f2 | 5742 | |
acf60855 | 5743 | error = dpif_execute(ofproto->backer->dpif, |
80e5eed9 | 5744 | key.data, key.size, |
bbafd73b | 5745 | xout.odp_actions.data, xout.odp_actions.size, |
abe529af | 5746 | packet); |
bbafd73b | 5747 | xlate_out_uninit(&xout); |
abe529af BP |
5748 | |
5749 | if (error) { | |
0f49659a EJ |
5750 | VLOG_WARN_RL(&rl, "%s: failed to send packet on port %s (%s)", |
5751 | ofproto->up.name, netdev_get_name(ofport->up.netdev), | |
5752 | strerror(error)); | |
abe529af | 5753 | } |
79e15b78 EJ |
5754 | |
5755 | ofproto->stats.tx_packets++; | |
5756 | ofproto->stats.tx_bytes += packet->size; | |
abe529af BP |
5757 | return error; |
5758 | } | |
5759 | \f | |
df2c07f4 | 5760 | /* OpenFlow to datapath action translation. */ |
abe529af | 5761 | |
f03a84b9 | 5762 | static bool may_receive(const struct ofport_dpif *, struct xlate_ctx *); |
f25d0cf3 | 5763 | static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len, |
f03a84b9 EJ |
5764 | struct xlate_ctx *); |
5765 | static void xlate_normal(struct xlate_ctx *); | |
abe529af | 5766 | |
6a7e895f BP |
5767 | /* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'. |
5768 | * The action will state 'slow' as the reason that the action is in the slow | |
5769 | * path. (This is purely informational: it allows a human viewing "ovs-dpctl | |
5770 | * dump-flows" output to see why a flow is in the slow path.) | |
5771 | * | |
5772 | * The 'stub_size' bytes in 'stub' will be used to store the action. | |
5773 | * 'stub_size' must be large enough for the action. | |
5774 | * | |
5775 | * The action and its size will be stored in '*actionsp' and '*actions_lenp', | |
5776 | * respectively. */ | |
5777 | static void | |
5778 | compose_slow_path(const struct ofproto_dpif *ofproto, const struct flow *flow, | |
5779 | enum slow_path_reason slow, | |
5780 | uint64_t *stub, size_t stub_size, | |
5781 | const struct nlattr **actionsp, size_t *actions_lenp) | |
5782 | { | |
5783 | union user_action_cookie cookie; | |
5784 | struct ofpbuf buf; | |
5785 | ||
5786 | cookie.type = USER_ACTION_COOKIE_SLOW_PATH; | |
5787 | cookie.slow_path.unused = 0; | |
5788 | cookie.slow_path.reason = slow; | |
5789 | ||
5790 | ofpbuf_use_stack(&buf, stub, stub_size); | |
ccc09689 | 5791 | if (slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)) { |
9032f11e | 5792 | uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif, UINT32_MAX); |
29089a54 | 5793 | odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, &buf); |
625b0720 | 5794 | } else { |
29089a54 RL |
5795 | put_userspace_action(ofproto, &buf, flow, &cookie, |
5796 | sizeof cookie.slow_path); | |
625b0720 | 5797 | } |
6a7e895f BP |
5798 | *actionsp = buf.data; |
5799 | *actions_lenp = buf.size; | |
5800 | } | |
5801 | ||
98403001 BP |
5802 | static size_t |
5803 | put_userspace_action(const struct ofproto_dpif *ofproto, | |
5804 | struct ofpbuf *odp_actions, | |
5805 | const struct flow *flow, | |
29089a54 RL |
5806 | const union user_action_cookie *cookie, |
5807 | const size_t cookie_size) | |
98403001 | 5808 | { |
98403001 BP |
5809 | uint32_t pid; |
5810 | ||
acf60855 | 5811 | pid = dpif_port_get_pid(ofproto->backer->dpif, |
e1b1d06a | 5812 | ofp_port_to_odp_port(ofproto, flow->in_port)); |
98403001 | 5813 | |
29089a54 RL |
5814 | return odp_put_userspace_action(pid, cookie, cookie_size, odp_actions); |
5815 | } | |
5816 | ||
5817 | /* Compose SAMPLE action for sFlow or IPFIX. The given probability is | |
5818 | * the number of packets out of UINT32_MAX to sample. The given | |
5819 | * cookie is passed back in the callback for each sampled packet. | |
5820 | */ | |
5821 | static size_t | |
5822 | compose_sample_action(const struct ofproto_dpif *ofproto, | |
5823 | struct ofpbuf *odp_actions, | |
5824 | const struct flow *flow, | |
5825 | const uint32_t probability, | |
5826 | const union user_action_cookie *cookie, | |
5827 | const size_t cookie_size) | |
5828 | { | |
5829 | size_t sample_offset, actions_offset; | |
5830 | int cookie_offset; | |
5831 | ||
5832 | sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE); | |
5833 | ||
5834 | nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability); | |
5835 | ||
5836 | actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS); | |
5837 | cookie_offset = put_userspace_action(ofproto, odp_actions, flow, cookie, | |
5838 | cookie_size); | |
5839 | ||
5840 | nl_msg_end_nested(odp_actions, actions_offset); | |
5841 | nl_msg_end_nested(odp_actions, sample_offset); | |
5842 | return cookie_offset; | |
98403001 BP |
5843 | } |
5844 | ||
36fc5f18 BP |
5845 | static void |
5846 | compose_sflow_cookie(const struct ofproto_dpif *ofproto, | |
5847 | ovs_be16 vlan_tci, uint32_t odp_port, | |
1673e0e4 | 5848 | unsigned int n_outputs, union user_action_cookie *cookie) |
36fc5f18 BP |
5849 | { |
5850 | int ifindex; | |
5851 | ||
5852 | cookie->type = USER_ACTION_COOKIE_SFLOW; | |
1673e0e4 | 5853 | cookie->sflow.vlan_tci = vlan_tci; |
36fc5f18 BP |
5854 | |
5855 | /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output | |
5856 | * port information") for the interpretation of cookie->output. */ | |
5857 | switch (n_outputs) { | |
5858 | case 0: | |
5859 | /* 0x40000000 | 256 means "packet dropped for unknown reason". */ | |
1673e0e4 | 5860 | cookie->sflow.output = 0x40000000 | 256; |
36fc5f18 BP |
5861 | break; |
5862 | ||
5863 | case 1: | |
5864 | ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port); | |
5865 | if (ifindex) { | |
1673e0e4 | 5866 | cookie->sflow.output = ifindex; |
36fc5f18 BP |
5867 | break; |
5868 | } | |
5869 | /* Fall through. */ | |
5870 | default: | |
5871 | /* 0x80000000 means "multiple output ports. */ | |
1673e0e4 | 5872 | cookie->sflow.output = 0x80000000 | n_outputs; |
36fc5f18 BP |
5873 | break; |
5874 | } | |
5875 | } | |
5876 | ||
29089a54 | 5877 | /* Compose SAMPLE action for sFlow bridge sampling. */ |
6ff686f2 PS |
5878 | static size_t |
5879 | compose_sflow_action(const struct ofproto_dpif *ofproto, | |
5880 | struct ofpbuf *odp_actions, | |
5881 | const struct flow *flow, | |
5882 | uint32_t odp_port) | |
5883 | { | |
6ff686f2 | 5884 | uint32_t probability; |
1673e0e4 | 5885 | union user_action_cookie cookie; |
6ff686f2 PS |
5886 | |
5887 | if (!ofproto->sflow || flow->in_port == OFPP_NONE) { | |
5888 | return 0; | |
5889 | } | |
5890 | ||
6ff686f2 | 5891 | probability = dpif_sflow_get_probability(ofproto->sflow); |
36fc5f18 BP |
5892 | compose_sflow_cookie(ofproto, htons(0), odp_port, |
5893 | odp_port == OVSP_NONE ? 0 : 1, &cookie); | |
6ff686f2 | 5894 | |
29089a54 RL |
5895 | return compose_sample_action(ofproto, odp_actions, flow, probability, |
5896 | &cookie, sizeof cookie.sflow); | |
5897 | } | |
5898 | ||
5899 | static void | |
5900 | compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id, | |
5901 | uint32_t obs_domain_id, uint32_t obs_point_id, | |
5902 | union user_action_cookie *cookie) | |
5903 | { | |
5904 | cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE; | |
5905 | cookie->flow_sample.probability = probability; | |
5906 | cookie->flow_sample.collector_set_id = collector_set_id; | |
5907 | cookie->flow_sample.obs_domain_id = obs_domain_id; | |
5908 | cookie->flow_sample.obs_point_id = obs_point_id; | |
5909 | } | |
5910 | ||
5911 | static void | |
5912 | compose_ipfix_cookie(union user_action_cookie *cookie) | |
5913 | { | |
5914 | cookie->type = USER_ACTION_COOKIE_IPFIX; | |
5915 | } | |
5916 | ||
5917 | /* Compose SAMPLE action for IPFIX bridge sampling. */ | |
5918 | static void | |
5919 | compose_ipfix_action(const struct ofproto_dpif *ofproto, | |
5920 | struct ofpbuf *odp_actions, | |
5921 | const struct flow *flow) | |
5922 | { | |
5923 | uint32_t probability; | |
5924 | union user_action_cookie cookie; | |
5925 | ||
5926 | if (!ofproto->ipfix || flow->in_port == OFPP_NONE) { | |
5927 | return; | |
5928 | } | |
5929 | ||
5930 | probability = dpif_ipfix_get_bridge_exporter_probability(ofproto->ipfix); | |
5931 | compose_ipfix_cookie(&cookie); | |
5932 | ||
5933 | compose_sample_action(ofproto, odp_actions, flow, probability, | |
5934 | &cookie, sizeof cookie.ipfix); | |
6ff686f2 PS |
5935 | } |
5936 | ||
29089a54 RL |
5937 | /* SAMPLE action for sFlow must be first action in any given list of |
5938 | * actions. At this point we do not have all information required to | |
5939 | * build it. So try to build sample action as complete as possible. */ | |
6ff686f2 | 5940 | static void |
f03a84b9 | 5941 | add_sflow_action(struct xlate_ctx *ctx) |
6ff686f2 PS |
5942 | { |
5943 | ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto, | |
bbafd73b EJ |
5944 | &ctx->xout->odp_actions, |
5945 | &ctx->xin->flow, OVSP_NONE); | |
6ff686f2 PS |
5946 | ctx->sflow_odp_port = 0; |
5947 | ctx->sflow_n_outputs = 0; | |
5948 | } | |
5949 | ||
29089a54 RL |
5950 | /* SAMPLE action for IPFIX must be 1st or 2nd action in any given list |
5951 | * of actions, eventually after the SAMPLE action for sFlow. */ | |
5952 | static void | |
f03a84b9 | 5953 | add_ipfix_action(struct xlate_ctx *ctx) |
29089a54 | 5954 | { |
bbafd73b EJ |
5955 | compose_ipfix_action(ctx->ofproto, &ctx->xout->odp_actions, |
5956 | &ctx->xin->flow); | |
29089a54 RL |
5957 | } |
5958 | ||
6ff686f2 PS |
5959 | /* Fix SAMPLE action according to data collected while composing ODP actions. |
5960 | * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested | |
5961 | * USERSPACE action's user-cookie which is required for sflow. */ | |
5962 | static void | |
f03a84b9 | 5963 | fix_sflow_action(struct xlate_ctx *ctx) |
6ff686f2 PS |
5964 | { |
5965 | const struct flow *base = &ctx->base_flow; | |
1673e0e4 | 5966 | union user_action_cookie *cookie; |
6ff686f2 PS |
5967 | |
5968 | if (!ctx->user_cookie_offset) { | |
5969 | return; | |
5970 | } | |
5971 | ||
bbafd73b | 5972 | cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset, |
a93f1927 | 5973 | sizeof cookie->sflow); |
cb22974d | 5974 | ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW); |
6ff686f2 | 5975 | |
36fc5f18 BP |
5976 | compose_sflow_cookie(ctx->ofproto, base->vlan_tci, |
5977 | ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie); | |
6ff686f2 PS |
5978 | } |
5979 | ||
6ff686f2 | 5980 | static void |
f03a84b9 | 5981 | compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port, |
81b1afb1 | 5982 | bool check_stp) |
6ff686f2 | 5983 | { |
d59906fb | 5984 | const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port); |
d8558b4a JR |
5985 | ovs_be16 flow_vlan_tci; |
5986 | uint32_t flow_skb_mark; | |
5987 | uint8_t flow_nw_tos; | |
a4454ac6 | 5988 | struct priority_to_dscp *pdscp; |
0a740f48 EJ |
5989 | uint32_t out_port, odp_port; |
5990 | ||
5991 | /* If 'struct flow' gets additional metadata, we'll need to zero it out | |
5992 | * before traversing a patch port. */ | |
cff78c88 | 5993 | BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20); |
d59906fb | 5994 | |
a4454ac6 EJ |
5995 | if (!ofport) { |
5996 | xlate_report(ctx, "Nonexistent output port"); | |
5997 | return; | |
5998 | } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) { | |
5999 | xlate_report(ctx, "OFPPC_NO_FWD set, skipping output"); | |
6000 | return; | |
6001 | } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) { | |
6002 | xlate_report(ctx, "STP not in forwarding state, skipping output"); | |
6003 | return; | |
6004 | } | |
8b36f51e | 6005 | |
0a740f48 EJ |
6006 | if (netdev_vport_is_patch(ofport->up.netdev)) { |
6007 | struct ofport_dpif *peer = ofport_get_peer(ofport); | |
bbafd73b | 6008 | struct flow old_flow = ctx->xin->flow; |
0a740f48 | 6009 | const struct ofproto_dpif *peer_ofproto; |
bb374ef6 | 6010 | enum slow_path_reason special; |
ffaef958 | 6011 | struct ofport_dpif *in_port; |
0a740f48 EJ |
6012 | |
6013 | if (!peer) { | |
6014 | xlate_report(ctx, "Nonexistent patch port peer"); | |
6015 | return; | |
6016 | } | |
6017 | ||
6018 | peer_ofproto = ofproto_dpif_cast(peer->up.ofproto); | |
6019 | if (peer_ofproto->backer != ctx->ofproto->backer) { | |
6020 | xlate_report(ctx, "Patch port peer on a different datapath"); | |
6021 | return; | |
6022 | } | |
6023 | ||
6024 | ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto); | |
bbafd73b EJ |
6025 | ctx->xin->flow.in_port = peer->up.ofp_port; |
6026 | ctx->xin->flow.metadata = htonll(0); | |
6027 | memset(&ctx->xin->flow.tunnel, 0, sizeof ctx->xin->flow.tunnel); | |
6028 | memset(ctx->xin->flow.regs, 0, sizeof ctx->xin->flow.regs); | |
6029 | ||
6030 | in_port = get_ofp_port(ctx->ofproto, ctx->xin->flow.in_port); | |
6031 | special = process_special(ctx->ofproto, &ctx->xin->flow, in_port, | |
6032 | ctx->xin->packet); | |
bb374ef6 | 6033 | if (special) { |
bbafd73b | 6034 | ctx->xout->slow = special; |
bb374ef6 | 6035 | } else if (!in_port || may_receive(in_port, ctx)) { |
ffaef958 | 6036 | if (!in_port || stp_forward_in_state(in_port->stp_state)) { |
bbafd73b | 6037 | xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true); |
ffaef958 BP |
6038 | } else { |
6039 | /* Forwarding is disabled by STP. Let OFPP_NORMAL and the | |
6040 | * learning action look at the packet, then drop it. */ | |
6041 | struct flow old_base_flow = ctx->base_flow; | |
bbafd73b EJ |
6042 | size_t old_size = ctx->xout->odp_actions.size; |
6043 | xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true); | |
ffaef958 | 6044 | ctx->base_flow = old_base_flow; |
bbafd73b | 6045 | ctx->xout->odp_actions.size = old_size; |
ffaef958 BP |
6046 | } |
6047 | } | |
6048 | ||
bbafd73b | 6049 | ctx->xin->flow = old_flow; |
0a740f48 EJ |
6050 | ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto); |
6051 | ||
bbafd73b EJ |
6052 | if (ctx->xin->resubmit_stats) { |
6053 | netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats); | |
6054 | netdev_vport_inc_rx(peer->up.netdev, ctx->xin->resubmit_stats); | |
0a740f48 EJ |
6055 | } |
6056 | ||
6057 | return; | |
6058 | } | |
6059 | ||
bbafd73b EJ |
6060 | flow_vlan_tci = ctx->xin->flow.vlan_tci; |
6061 | flow_skb_mark = ctx->xin->flow.skb_mark; | |
6062 | flow_nw_tos = ctx->xin->flow.nw_tos; | |
d8558b4a | 6063 | |
bbafd73b | 6064 | pdscp = get_priority(ofport, ctx->xin->flow.skb_priority); |
a4454ac6 | 6065 | if (pdscp) { |
bbafd73b EJ |
6066 | ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK; |
6067 | ctx->xin->flow.nw_tos |= pdscp->dscp; | |
d59906fb EJ |
6068 | } |
6069 | ||
b9ad7294 | 6070 | if (ofport->tnl_port) { |
d8558b4a JR |
6071 | /* Save tunnel metadata so that changes made due to |
6072 | * the Logical (tunnel) Port are not visible for any further | |
6073 | * matches, while explicit set actions on tunnel metadata are. | |
6074 | */ | |
bbafd73b EJ |
6075 | struct flow_tnl flow_tnl = ctx->xin->flow.tunnel; |
6076 | odp_port = tnl_port_send(ofport->tnl_port, &ctx->xin->flow); | |
b9ad7294 EJ |
6077 | if (odp_port == OVSP_NONE) { |
6078 | xlate_report(ctx, "Tunneling decided against output"); | |
88e90491 | 6079 | goto out; /* restore flow_nw_tos */ |
b9ad7294 | 6080 | } |
bbafd73b | 6081 | if (ctx->xin->flow.tunnel.ip_dst == ctx->orig_tunnel_ip_dst) { |
0ad90c84 JR |
6082 | xlate_report(ctx, "Not tunneling to our own address"); |
6083 | goto out; /* restore flow_nw_tos */ | |
6084 | } | |
bbafd73b EJ |
6085 | if (ctx->xin->resubmit_stats) { |
6086 | netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats); | |
b9ad7294 EJ |
6087 | } |
6088 | out_port = odp_port; | |
bbafd73b EJ |
6089 | commit_odp_tunnel_action(&ctx->xin->flow, &ctx->base_flow, |
6090 | &ctx->xout->odp_actions); | |
6091 | ctx->xin->flow.tunnel = flow_tnl; /* Restore tunnel metadata */ | |
b9ad7294 | 6092 | } else { |
deea1200 | 6093 | uint16_t vlandev_port; |
cf630ea3 | 6094 | odp_port = ofport->odp_port; |
deea1200 AW |
6095 | vlandev_port = vsp_realdev_to_vlandev(ctx->ofproto, ofp_port, |
6096 | ctx->xin->flow.vlan_tci); | |
6097 | if (vlandev_port == ofp_port) { | |
6098 | out_port = odp_port; | |
6099 | } else { | |
6100 | out_port = ofp_port_to_odp_port(ctx->ofproto, vlandev_port); | |
bbafd73b | 6101 | ctx->xin->flow.vlan_tci = htons(0); |
b9ad7294 | 6102 | } |
bbafd73b | 6103 | ctx->xin->flow.skb_mark &= ~IPSEC_MARK; |
52a90c29 | 6104 | } |
bbafd73b EJ |
6105 | commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, |
6106 | &ctx->xout->odp_actions); | |
6107 | nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port); | |
52a90c29 | 6108 | |
6ff686f2 PS |
6109 | ctx->sflow_odp_port = odp_port; |
6110 | ctx->sflow_n_outputs++; | |
bbafd73b | 6111 | ctx->xout->nf_output_iface = ofp_port; |
d8558b4a JR |
6112 | |
6113 | /* Restore flow */ | |
bbafd73b EJ |
6114 | ctx->xin->flow.vlan_tci = flow_vlan_tci; |
6115 | ctx->xin->flow.skb_mark = flow_skb_mark; | |
88e90491 | 6116 | out: |
bbafd73b | 6117 | ctx->xin->flow.nw_tos = flow_nw_tos; |
6ff686f2 PS |
6118 | } |
6119 | ||
abe529af | 6120 | static void |
f03a84b9 | 6121 | compose_output_action(struct xlate_ctx *ctx, uint16_t ofp_port) |
abe529af | 6122 | { |
81b1afb1 | 6123 | compose_output_action__(ctx, ofp_port, true); |
abe529af BP |
6124 | } |
6125 | ||
55599423 | 6126 | static void |
f03a84b9 | 6127 | tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule) |
55599423 JR |
6128 | { |
6129 | struct ofproto_dpif *ofproto = ctx->ofproto; | |
6130 | uint8_t table_id = ctx->table_id; | |
6131 | ||
6132 | if (table_id > 0 && table_id < N_TABLES) { | |
6133 | struct table_dpif *table = &ofproto->tables[table_id]; | |
6134 | if (table->other_table) { | |
bbafd73b EJ |
6135 | ctx->xout->tags |= (rule && rule->tag |
6136 | ? rule->tag | |
6137 | : rule_calculate_tag(&ctx->xin->flow, | |
6138 | &table->other_table->mask, | |
6139 | table->basis)); | |
55599423 JR |
6140 | } |
6141 | } | |
6142 | } | |
6143 | ||
6144 | /* Common rule processing in one place to avoid duplicating code. */ | |
6145 | static struct rule_dpif * | |
f03a84b9 | 6146 | ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule, |
55599423 JR |
6147 | bool may_packet_in) |
6148 | { | |
bbafd73b EJ |
6149 | if (ctx->xin->resubmit_hook) { |
6150 | ctx->xin->resubmit_hook(ctx, rule); | |
55599423 JR |
6151 | } |
6152 | if (rule == NULL && may_packet_in) { | |
6153 | /* XXX | |
6154 | * check if table configuration flags | |
6155 | * OFPTC_TABLE_MISS_CONTROLLER, default. | |
6156 | * OFPTC_TABLE_MISS_CONTINUE, | |
6157 | * OFPTC_TABLE_MISS_DROP | |
6158 | * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do? | |
6159 | */ | |
bbafd73b | 6160 | rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->xin->flow); |
55599423 | 6161 | } |
bbafd73b EJ |
6162 | if (rule && ctx->xin->resubmit_stats) { |
6163 | rule_credit_stats(rule, ctx->xin->resubmit_stats); | |
55599423 JR |
6164 | } |
6165 | return rule; | |
6166 | } | |
6167 | ||
abe529af | 6168 | static void |
f03a84b9 | 6169 | xlate_table_action(struct xlate_ctx *ctx, |
1688c479 | 6170 | uint16_t in_port, uint8_t table_id, bool may_packet_in) |
abe529af BP |
6171 | { |
6172 | if (ctx->recurse < MAX_RESUBMIT_RECURSION) { | |
6173 | struct rule_dpif *rule; | |
bbafd73b | 6174 | uint16_t old_in_port = ctx->xin->flow.in_port; |
55599423 | 6175 | uint8_t old_table_id = ctx->table_id; |
29901626 | 6176 | |
29901626 | 6177 | ctx->table_id = table_id; |
abe529af | 6178 | |
54a9cbc9 | 6179 | /* Look up a flow with 'in_port' as the input port. */ |
bbafd73b | 6180 | ctx->xin->flow.in_port = in_port; |
bcd2633a JP |
6181 | rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, |
6182 | &ctx->xout->wc, table_id); | |
55599423 JR |
6183 | |
6184 | tag_the_flow(ctx, rule); | |
54a9cbc9 BP |
6185 | |
6186 | /* Restore the original input port. Otherwise OFPP_NORMAL and | |
6187 | * OFPP_IN_PORT will have surprising behavior. */ | |
bbafd73b | 6188 | ctx->xin->flow.in_port = old_in_port; |
abe529af | 6189 | |
55599423 | 6190 | rule = ctx_rule_hooks(ctx, rule, may_packet_in); |
1688c479 | 6191 | |
abe529af | 6192 | if (rule) { |
18b2a258 | 6193 | struct rule_dpif *old_rule = ctx->rule; |
54834960 | 6194 | |
abe529af | 6195 | ctx->recurse++; |
18b2a258 | 6196 | ctx->rule = rule; |
f25d0cf3 | 6197 | do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx); |
18b2a258 | 6198 | ctx->rule = old_rule; |
abe529af BP |
6199 | ctx->recurse--; |
6200 | } | |
29901626 BP |
6201 | |
6202 | ctx->table_id = old_table_id; | |
abe529af BP |
6203 | } else { |
6204 | static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1); | |
6205 | ||
29901626 | 6206 | VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times", |
abe529af | 6207 | MAX_RESUBMIT_RECURSION); |
6a6455e5 | 6208 | ctx->max_resubmit_trigger = true; |
abe529af BP |
6209 | } |
6210 | } | |
6211 | ||
29901626 | 6212 | static void |
f03a84b9 | 6213 | xlate_ofpact_resubmit(struct xlate_ctx *ctx, |
f25d0cf3 | 6214 | const struct ofpact_resubmit *resubmit) |
29901626 BP |
6215 | { |
6216 | uint16_t in_port; | |
6217 | uint8_t table_id; | |
6218 | ||
f25d0cf3 BP |
6219 | in_port = resubmit->in_port; |
6220 | if (in_port == OFPP_IN_PORT) { | |
bbafd73b | 6221 | in_port = ctx->xin->flow.in_port; |
f25d0cf3 BP |
6222 | } |
6223 | ||
6224 | table_id = resubmit->table_id; | |
6225 | if (table_id == 255) { | |
6226 | table_id = ctx->table_id; | |
6227 | } | |
29901626 | 6228 | |
1688c479 | 6229 | xlate_table_action(ctx, in_port, table_id, false); |
29901626 BP |
6230 | } |
6231 | ||
abe529af | 6232 | static void |
f03a84b9 | 6233 | flood_packets(struct xlate_ctx *ctx, bool all) |
abe529af BP |
6234 | { |
6235 | struct ofport_dpif *ofport; | |
6236 | ||
b3e9b2ed | 6237 | HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) { |
abe529af | 6238 | uint16_t ofp_port = ofport->up.ofp_port; |
d59906fb | 6239 | |
bbafd73b | 6240 | if (ofp_port == ctx->xin->flow.in_port) { |
d59906fb EJ |
6241 | continue; |
6242 | } | |
6243 | ||
5e48dc2b | 6244 | if (all) { |
81b1afb1 | 6245 | compose_output_action__(ctx, ofp_port, false); |
9e1fd49b | 6246 | } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) { |
5e48dc2b | 6247 | compose_output_action(ctx, ofp_port); |
abe529af BP |
6248 | } |
6249 | } | |
b3e9b2ed | 6250 | |
bbafd73b | 6251 | ctx->xout->nf_output_iface = NF_OUT_FLOOD; |
abe529af BP |
6252 | } |
6253 | ||
6ff686f2 | 6254 | static void |
f03a84b9 | 6255 | execute_controller_action(struct xlate_ctx *ctx, int len, |
a7349929 BP |
6256 | enum ofp_packet_in_reason reason, |
6257 | uint16_t controller_id) | |
6ff686f2 | 6258 | { |
999fba59 EJ |
6259 | struct ofputil_packet_in pin; |
6260 | struct ofpbuf *packet; | |
1ac7c9bd | 6261 | struct flow key; |
6ff686f2 | 6262 | |
bbafd73b EJ |
6263 | ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER); |
6264 | ctx->xout->slow = SLOW_CONTROLLER; | |
6265 | if (!ctx->xin->packet) { | |
999fba59 EJ |
6266 | return; |
6267 | } | |
6268 | ||
bbafd73b | 6269 | packet = ofpbuf_clone(ctx->xin->packet); |
999fba59 | 6270 | |
1ac7c9bd SH |
6271 | key.skb_priority = 0; |
6272 | key.skb_mark = 0; | |
6273 | memset(&key.tunnel, 0, sizeof key.tunnel); | |
999fba59 | 6274 | |
1ac7c9bd SH |
6275 | commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, |
6276 | &ctx->xout->odp_actions); | |
999fba59 | 6277 | |
1ac7c9bd SH |
6278 | odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data, |
6279 | ctx->xout->odp_actions.size, NULL, NULL); | |
999fba59 EJ |
6280 | |
6281 | pin.packet = packet->data; | |
6282 | pin.packet_len = packet->size; | |
f0fd1a17 | 6283 | pin.reason = reason; |
a7349929 | 6284 | pin.controller_id = controller_id; |
54834960 | 6285 | pin.table_id = ctx->table_id; |
18b2a258 | 6286 | pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0; |
54834960 | 6287 | |
999fba59 | 6288 | pin.send_len = len; |
bbafd73b | 6289 | flow_get_metadata(&ctx->xin->flow, &pin.fmd); |
999fba59 | 6290 | |
d8653c38 | 6291 | connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin); |
999fba59 | 6292 | ofpbuf_delete(packet); |
6ff686f2 PS |
6293 | } |
6294 | ||
b02475c5 | 6295 | static void |
f03a84b9 | 6296 | execute_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type) |
b02475c5 SH |
6297 | { |
6298 | ovs_assert(eth_type_mpls(eth_type)); | |
6299 | ||
bcd2633a JP |
6300 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6301 | sizeof ctx->xout->wc.masks.dl_type); | |
6302 | memset(&ctx->xout->wc.masks.mpls_lse, 0xff, | |
6303 | sizeof ctx->xout->wc.masks.mpls_lse); | |
6304 | memset(&ctx->xout->wc.masks.mpls_depth, 0xff, | |
6305 | sizeof ctx->xout->wc.masks.mpls_depth); | |
6306 | ||
b02475c5 | 6307 | if (ctx->base_flow.mpls_depth) { |
bbafd73b EJ |
6308 | ctx->xin->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK); |
6309 | ctx->xin->flow.mpls_depth++; | |
b02475c5 SH |
6310 | } else { |
6311 | ovs_be32 label; | |
6312 | uint8_t tc, ttl; | |
6313 | ||
bbafd73b | 6314 | if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IPV6)) { |
b02475c5 SH |
6315 | label = htonl(0x2); /* IPV6 Explicit Null. */ |
6316 | } else { | |
6317 | label = htonl(0x0); /* IPV4 Explicit Null. */ | |
6318 | } | |
bbafd73b EJ |
6319 | tc = (ctx->xin->flow.nw_tos & IP_DSCP_MASK) >> 2; |
6320 | ttl = ctx->xin->flow.nw_ttl ? ctx->xin->flow.nw_ttl : 0x40; | |
6321 | ctx->xin->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label); | |
6322 | ctx->xin->flow.mpls_depth = 1; | |
b02475c5 | 6323 | } |
bbafd73b | 6324 | ctx->xin->flow.dl_type = eth_type; |
b02475c5 SH |
6325 | } |
6326 | ||
6327 | static void | |
f03a84b9 | 6328 | execute_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type) |
b02475c5 | 6329 | { |
bbafd73b | 6330 | ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type)); |
b02475c5 SH |
6331 | ovs_assert(!eth_type_mpls(eth_type)); |
6332 | ||
bcd2633a JP |
6333 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6334 | sizeof ctx->xout->wc.masks.dl_type); | |
6335 | memset(&ctx->xout->wc.masks.mpls_lse, 0xff, | |
6336 | sizeof ctx->xout->wc.masks.mpls_lse); | |
6337 | memset(&ctx->xout->wc.masks.mpls_depth, 0xff, | |
6338 | sizeof ctx->xout->wc.masks.mpls_depth); | |
6339 | ||
bbafd73b EJ |
6340 | if (ctx->xin->flow.mpls_depth) { |
6341 | ctx->xin->flow.mpls_depth--; | |
6342 | ctx->xin->flow.mpls_lse = htonl(0); | |
6343 | if (!ctx->xin->flow.mpls_depth) { | |
6344 | ctx->xin->flow.dl_type = eth_type; | |
b02475c5 SH |
6345 | } |
6346 | } | |
6347 | } | |
6348 | ||
f0fd1a17 | 6349 | static bool |
f03a84b9 | 6350 | compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids) |
f0fd1a17 | 6351 | { |
bbafd73b EJ |
6352 | if (ctx->xin->flow.dl_type != htons(ETH_TYPE_IP) && |
6353 | ctx->xin->flow.dl_type != htons(ETH_TYPE_IPV6)) { | |
f0fd1a17 PS |
6354 | return false; |
6355 | } | |
6356 | ||
bbafd73b EJ |
6357 | if (ctx->xin->flow.nw_ttl > 1) { |
6358 | ctx->xin->flow.nw_ttl--; | |
f0fd1a17 PS |
6359 | return false; |
6360 | } else { | |
c2d967a5 MM |
6361 | size_t i; |
6362 | ||
6363 | for (i = 0; i < ids->n_controllers; i++) { | |
6364 | execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, | |
6365 | ids->cnt_ids[i]); | |
6366 | } | |
f0fd1a17 PS |
6367 | |
6368 | /* Stop processing for current table. */ | |
6369 | return true; | |
6370 | } | |
6371 | } | |
6372 | ||
0f3f3c3d | 6373 | static bool |
f03a84b9 | 6374 | execute_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl) |
0f3f3c3d | 6375 | { |
bbafd73b | 6376 | if (!eth_type_mpls(ctx->xin->flow.dl_type)) { |
0f3f3c3d SH |
6377 | return true; |
6378 | } | |
6379 | ||
bbafd73b | 6380 | set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl); |
0f3f3c3d SH |
6381 | return false; |
6382 | } | |
6383 | ||
b676167a | 6384 | static bool |
f03a84b9 | 6385 | execute_dec_mpls_ttl_action(struct xlate_ctx *ctx) |
b676167a | 6386 | { |
bbafd73b | 6387 | uint8_t ttl = mpls_lse_to_ttl(ctx->xin->flow.mpls_lse); |
b676167a | 6388 | |
bbafd73b | 6389 | if (!eth_type_mpls(ctx->xin->flow.dl_type)) { |
b676167a SH |
6390 | return false; |
6391 | } | |
6392 | ||
be80bc65 | 6393 | if (ttl > 1) { |
b676167a | 6394 | ttl--; |
bbafd73b | 6395 | set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl); |
b676167a SH |
6396 | return false; |
6397 | } else { | |
6398 | execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0); | |
6399 | ||
6400 | /* Stop processing for current table. */ | |
6401 | return true; | |
6402 | } | |
6403 | } | |
6404 | ||
abe529af | 6405 | static void |
f03a84b9 | 6406 | xlate_output_action(struct xlate_ctx *ctx, |
1688c479 | 6407 | uint16_t port, uint16_t max_len, bool may_packet_in) |
abe529af | 6408 | { |
bbafd73b | 6409 | uint16_t prev_nf_output_iface = ctx->xout->nf_output_iface; |
abe529af | 6410 | |
bbafd73b | 6411 | ctx->xout->nf_output_iface = NF_OUT_DROP; |
abe529af BP |
6412 | |
6413 | switch (port) { | |
6414 | case OFPP_IN_PORT: | |
bbafd73b | 6415 | compose_output_action(ctx, ctx->xin->flow.in_port); |
abe529af BP |
6416 | break; |
6417 | case OFPP_TABLE: | |
bbafd73b | 6418 | xlate_table_action(ctx, ctx->xin->flow.in_port, 0, may_packet_in); |
abe529af BP |
6419 | break; |
6420 | case OFPP_NORMAL: | |
6421 | xlate_normal(ctx); | |
6422 | break; | |
6423 | case OFPP_FLOOD: | |
d59906fb | 6424 | flood_packets(ctx, false); |
abe529af BP |
6425 | break; |
6426 | case OFPP_ALL: | |
d59906fb | 6427 | flood_packets(ctx, true); |
abe529af BP |
6428 | break; |
6429 | case OFPP_CONTROLLER: | |
a7349929 | 6430 | execute_controller_action(ctx, max_len, OFPR_ACTION, 0); |
abe529af | 6431 | break; |
e81d2933 EJ |
6432 | case OFPP_NONE: |
6433 | break; | |
a0fbe94a | 6434 | case OFPP_LOCAL: |
abe529af | 6435 | default: |
bbafd73b | 6436 | if (port != ctx->xin->flow.in_port) { |
81b1afb1 | 6437 | compose_output_action(ctx, port); |
3dd3eace BP |
6438 | } else { |
6439 | xlate_report(ctx, "skipping output to input port"); | |
abe529af BP |
6440 | } |
6441 | break; | |
6442 | } | |
6443 | ||
6444 | if (prev_nf_output_iface == NF_OUT_FLOOD) { | |
bbafd73b EJ |
6445 | ctx->xout->nf_output_iface = NF_OUT_FLOOD; |
6446 | } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) { | |
6447 | ctx->xout->nf_output_iface = prev_nf_output_iface; | |
abe529af | 6448 | } else if (prev_nf_output_iface != NF_OUT_DROP && |
bbafd73b EJ |
6449 | ctx->xout->nf_output_iface != NF_OUT_FLOOD) { |
6450 | ctx->xout->nf_output_iface = NF_OUT_MULTI; | |
abe529af BP |
6451 | } |
6452 | } | |
6453 | ||
f694937d | 6454 | static void |
f03a84b9 | 6455 | xlate_output_reg_action(struct xlate_ctx *ctx, |
f25d0cf3 | 6456 | const struct ofpact_output_reg *or) |
f694937d | 6457 | { |
bbafd73b | 6458 | uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow); |
f25d0cf3 | 6459 | if (port <= UINT16_MAX) { |
bcd2633a JP |
6460 | union mf_subvalue value; |
6461 | ||
6462 | memset(&value, 0xff, sizeof value); | |
6463 | mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks); | |
1688c479 | 6464 | xlate_output_action(ctx, port, or->max_len, false); |
f694937d EJ |
6465 | } |
6466 | } | |
6467 | ||
abe529af | 6468 | static void |
f03a84b9 | 6469 | xlate_enqueue_action(struct xlate_ctx *ctx, |
f25d0cf3 | 6470 | const struct ofpact_enqueue *enqueue) |
abe529af | 6471 | { |
f25d0cf3 BP |
6472 | uint16_t ofp_port = enqueue->port; |
6473 | uint32_t queue_id = enqueue->queue; | |
abff858b | 6474 | uint32_t flow_priority, priority; |
abe529af BP |
6475 | int error; |
6476 | ||
f25d0cf3 | 6477 | /* Translate queue to priority. */ |
acf60855 JP |
6478 | error = dpif_queue_to_priority(ctx->ofproto->backer->dpif, |
6479 | queue_id, &priority); | |
abe529af BP |
6480 | if (error) { |
6481 | /* Fall back to ordinary output action. */ | |
1688c479 | 6482 | xlate_output_action(ctx, enqueue->port, 0, false); |
abe529af BP |
6483 | return; |
6484 | } | |
6485 | ||
f25d0cf3 | 6486 | /* Check output port. */ |
abe529af | 6487 | if (ofp_port == OFPP_IN_PORT) { |
bbafd73b EJ |
6488 | ofp_port = ctx->xin->flow.in_port; |
6489 | } else if (ofp_port == ctx->xin->flow.in_port) { | |
8ba855c1 | 6490 | return; |
abe529af | 6491 | } |
abe529af | 6492 | |
df2c07f4 | 6493 | /* Add datapath actions. */ |
bbafd73b EJ |
6494 | flow_priority = ctx->xin->flow.skb_priority; |
6495 | ctx->xin->flow.skb_priority = priority; | |
81b1afb1 | 6496 | compose_output_action(ctx, ofp_port); |
bbafd73b | 6497 | ctx->xin->flow.skb_priority = flow_priority; |
abe529af BP |
6498 | |
6499 | /* Update NetFlow output port. */ | |
bbafd73b EJ |
6500 | if (ctx->xout->nf_output_iface == NF_OUT_DROP) { |
6501 | ctx->xout->nf_output_iface = ofp_port; | |
6502 | } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) { | |
6503 | ctx->xout->nf_output_iface = NF_OUT_MULTI; | |
abe529af BP |
6504 | } |
6505 | } | |
6506 | ||
6507 | static void | |
f03a84b9 | 6508 | xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id) |
abe529af | 6509 | { |
f25d0cf3 | 6510 | uint32_t skb_priority; |
abe529af | 6511 | |
acf60855 JP |
6512 | if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif, |
6513 | queue_id, &skb_priority)) { | |
bbafd73b | 6514 | ctx->xin->flow.skb_priority = skb_priority; |
f25d0cf3 BP |
6515 | } else { |
6516 | /* Couldn't translate queue to a priority. Nothing to do. A warning | |
abe529af | 6517 | * has already been logged. */ |
abe529af | 6518 | } |
abe529af BP |
6519 | } |
6520 | ||
daff3353 EJ |
6521 | static bool |
6522 | slave_enabled_cb(uint16_t ofp_port, void *ofproto_) | |
6523 | { | |
6524 | struct ofproto_dpif *ofproto = ofproto_; | |
6525 | struct ofport_dpif *port; | |
6526 | ||
6527 | switch (ofp_port) { | |
6528 | case OFPP_IN_PORT: | |
6529 | case OFPP_TABLE: | |
6530 | case OFPP_NORMAL: | |
6531 | case OFPP_FLOOD: | |
6532 | case OFPP_ALL: | |
439e4d8c | 6533 | case OFPP_NONE: |
daff3353 EJ |
6534 | return true; |
6535 | case OFPP_CONTROLLER: /* Not supported by the bundle action. */ | |
6536 | return false; | |
6537 | default: | |
6538 | port = get_ofp_port(ofproto, ofp_port); | |
6539 | return port ? port->may_enable : false; | |
6540 | } | |
6541 | } | |
6542 | ||
f25d0cf3 | 6543 | static void |
f03a84b9 | 6544 | xlate_bundle_action(struct xlate_ctx *ctx, |
f25d0cf3 BP |
6545 | const struct ofpact_bundle *bundle) |
6546 | { | |
6547 | uint16_t port; | |
6548 | ||
bcd2633a JP |
6549 | port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc, |
6550 | slave_enabled_cb, ctx->ofproto); | |
f25d0cf3 | 6551 | if (bundle->dst.field) { |
bbafd73b | 6552 | nxm_reg_load(&bundle->dst, port, &ctx->xin->flow); |
f25d0cf3 | 6553 | } else { |
1688c479 | 6554 | xlate_output_action(ctx, port, 0, false); |
f25d0cf3 BP |
6555 | } |
6556 | } | |
6557 | ||
75a75043 | 6558 | static void |
f03a84b9 | 6559 | xlate_learn_action(struct xlate_ctx *ctx, |
f25d0cf3 | 6560 | const struct ofpact_learn *learn) |
75a75043 BP |
6561 | { |
6562 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1); | |
6563 | struct ofputil_flow_mod fm; | |
f25d0cf3 BP |
6564 | uint64_t ofpacts_stub[1024 / 8]; |
6565 | struct ofpbuf ofpacts; | |
75a75043 BP |
6566 | int error; |
6567 | ||
bcd2633a JP |
6568 | ctx->xout->has_learn = true; |
6569 | ||
6570 | learn_mask(learn, &ctx->xout->wc); | |
6571 | ||
6572 | if (!ctx->xin->may_learn) { | |
6573 | return; | |
6574 | } | |
6575 | ||
f25d0cf3 | 6576 | ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); |
bbafd73b | 6577 | learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts); |
75a75043 BP |
6578 | |
6579 | error = ofproto_flow_mod(&ctx->ofproto->up, &fm); | |
6580 | if (error && !VLOG_DROP_WARN(&rl)) { | |
90bf1e07 BP |
6581 | VLOG_WARN("learning action failed to modify flow table (%s)", |
6582 | ofperr_get_name(error)); | |
75a75043 BP |
6583 | } |
6584 | ||
f25d0cf3 | 6585 | ofpbuf_uninit(&ofpacts); |
75a75043 BP |
6586 | } |
6587 | ||
0e553d9c BP |
6588 | /* Reduces '*timeout' to no more than 'max'. A value of zero in either case |
6589 | * means "infinite". */ | |
6590 | static void | |
6591 | reduce_timeout(uint16_t max, uint16_t *timeout) | |
6592 | { | |
6593 | if (max && (!*timeout || *timeout > max)) { | |
6594 | *timeout = max; | |
6595 | } | |
6596 | } | |
6597 | ||
6598 | static void | |
f03a84b9 | 6599 | xlate_fin_timeout(struct xlate_ctx *ctx, |
f25d0cf3 | 6600 | const struct ofpact_fin_timeout *oft) |
0e553d9c | 6601 | { |
bbafd73b | 6602 | if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) { |
0e553d9c BP |
6603 | struct rule_dpif *rule = ctx->rule; |
6604 | ||
f25d0cf3 BP |
6605 | reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout); |
6606 | reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout); | |
0e553d9c BP |
6607 | } |
6608 | } | |
6609 | ||
29089a54 | 6610 | static void |
f03a84b9 | 6611 | xlate_sample_action(struct xlate_ctx *ctx, |
29089a54 RL |
6612 | const struct ofpact_sample *os) |
6613 | { | |
6614 | union user_action_cookie cookie; | |
6615 | /* Scale the probability from 16-bit to 32-bit while representing | |
6616 | * the same percentage. */ | |
6617 | uint32_t probability = (os->probability << 16) | os->probability; | |
6618 | ||
bbafd73b EJ |
6619 | commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, |
6620 | &ctx->xout->odp_actions); | |
29089a54 RL |
6621 | |
6622 | compose_flow_sample_cookie(os->probability, os->collector_set_id, | |
6623 | os->obs_domain_id, os->obs_point_id, &cookie); | |
bbafd73b | 6624 | compose_sample_action(ctx->ofproto, &ctx->xout->odp_actions, &ctx->xin->flow, |
29089a54 RL |
6625 | probability, &cookie, sizeof cookie.flow_sample); |
6626 | } | |
6627 | ||
21f7563c | 6628 | static bool |
f03a84b9 | 6629 | may_receive(const struct ofport_dpif *port, struct xlate_ctx *ctx) |
21f7563c | 6630 | { |
bbafd73b EJ |
6631 | if (port->up.pp.config & (eth_addr_equals(ctx->xin->flow.dl_dst, |
6632 | eth_addr_stp) | |
9e1fd49b BP |
6633 | ? OFPUTIL_PC_NO_RECV_STP |
6634 | : OFPUTIL_PC_NO_RECV)) { | |
21f7563c JP |
6635 | return false; |
6636 | } | |
6637 | ||
6638 | /* Only drop packets here if both forwarding and learning are | |
6639 | * disabled. If just learning is enabled, we need to have | |
6640 | * OFPP_NORMAL and the learning action have a look at the packet | |
6641 | * before we can drop it. */ | |
6642 | if (!stp_forward_in_state(port->stp_state) | |
6643 | && !stp_learn_in_state(port->stp_state)) { | |
6644 | return false; | |
6645 | } | |
6646 | ||
6647 | return true; | |
6648 | } | |
6649 | ||
4863c249 | 6650 | static bool |
f03a84b9 | 6651 | tunnel_ecn_ok(struct xlate_ctx *ctx) |
4863c249 JP |
6652 | { |
6653 | if (is_ip_any(&ctx->base_flow) | |
bbafd73b | 6654 | && (ctx->xin->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) { |
29a5df0a JP |
6655 | if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) { |
6656 | VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE" | |
6657 | " but is not ECN capable"); | |
6658 | return false; | |
6659 | } else { | |
6660 | /* Set the ECN CE value in the tunneled packet. */ | |
bbafd73b | 6661 | ctx->xin->flow.nw_tos |= IP_ECN_CE; |
29a5df0a | 6662 | } |
4863c249 JP |
6663 | } |
6664 | ||
6665 | return true; | |
6666 | } | |
6667 | ||
abe529af | 6668 | static void |
f25d0cf3 | 6669 | do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, |
f03a84b9 | 6670 | struct xlate_ctx *ctx) |
abe529af | 6671 | { |
254750ce | 6672 | bool was_evictable = true; |
f25d0cf3 | 6673 | const struct ofpact *a; |
abe529af | 6674 | |
254750ce BP |
6675 | if (ctx->rule) { |
6676 | /* Don't let the rule we're working on get evicted underneath us. */ | |
6677 | was_evictable = ctx->rule->up.evictable; | |
6678 | ctx->rule->up.evictable = false; | |
6679 | } | |
55599423 JR |
6680 | |
6681 | do_xlate_actions_again: | |
f25d0cf3 BP |
6682 | OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) { |
6683 | struct ofpact_controller *controller; | |
4cceacb9 | 6684 | const struct ofpact_metadata *metadata; |
38f2e360 | 6685 | |
848e8809 EJ |
6686 | if (ctx->exit) { |
6687 | break; | |
6688 | } | |
6689 | ||
f25d0cf3 BP |
6690 | switch (a->type) { |
6691 | case OFPACT_OUTPUT: | |
6692 | xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port, | |
1688c479 | 6693 | ofpact_get_OUTPUT(a)->max_len, true); |
f25d0cf3 BP |
6694 | break; |
6695 | ||
6696 | case OFPACT_CONTROLLER: | |
6697 | controller = ofpact_get_CONTROLLER(a); | |
6698 | execute_controller_action(ctx, controller->max_len, | |
6699 | controller->reason, | |
6700 | controller->controller_id); | |
6701 | break; | |
690a61c5 | 6702 | |
f25d0cf3 BP |
6703 | case OFPACT_ENQUEUE: |
6704 | xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a)); | |
abe529af BP |
6705 | break; |
6706 | ||
f25d0cf3 | 6707 | case OFPACT_SET_VLAN_VID: |
bbafd73b EJ |
6708 | ctx->xin->flow.vlan_tci &= ~htons(VLAN_VID_MASK); |
6709 | ctx->xin->flow.vlan_tci |= | |
6710 | (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) | |
6711 | | htons(VLAN_CFI)); | |
abe529af BP |
6712 | break; |
6713 | ||
f25d0cf3 | 6714 | case OFPACT_SET_VLAN_PCP: |
bbafd73b EJ |
6715 | ctx->xin->flow.vlan_tci &= ~htons(VLAN_PCP_MASK); |
6716 | ctx->xin->flow.vlan_tci |= | |
6717 | htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT) | |
6718 | | VLAN_CFI); | |
abe529af BP |
6719 | break; |
6720 | ||
f25d0cf3 | 6721 | case OFPACT_STRIP_VLAN: |
bbafd73b | 6722 | ctx->xin->flow.vlan_tci = htons(0); |
abe529af BP |
6723 | break; |
6724 | ||
3e34fbdd | 6725 | case OFPACT_PUSH_VLAN: |
5dca28b5 | 6726 | /* XXX 802.1AD(QinQ) */ |
bbafd73b | 6727 | ctx->xin->flow.vlan_tci = htons(VLAN_CFI); |
3e34fbdd IY |
6728 | break; |
6729 | ||
f25d0cf3 | 6730 | case OFPACT_SET_ETH_SRC: |
bbafd73b | 6731 | memcpy(ctx->xin->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac, |
f25d0cf3 | 6732 | ETH_ADDR_LEN); |
abe529af BP |
6733 | break; |
6734 | ||
f25d0cf3 | 6735 | case OFPACT_SET_ETH_DST: |
bbafd73b | 6736 | memcpy(ctx->xin->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac, |
f25d0cf3 | 6737 | ETH_ADDR_LEN); |
abe529af BP |
6738 | break; |
6739 | ||
f25d0cf3 | 6740 | case OFPACT_SET_IPV4_SRC: |
bcd2633a JP |
6741 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6742 | sizeof ctx->xout->wc.masks.dl_type); | |
bbafd73b EJ |
6743 | if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { |
6744 | ctx->xin->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4; | |
1b035ef2 | 6745 | } |
abe529af BP |
6746 | break; |
6747 | ||
f25d0cf3 | 6748 | case OFPACT_SET_IPV4_DST: |
bcd2633a JP |
6749 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6750 | sizeof ctx->xout->wc.masks.dl_type); | |
bbafd73b EJ |
6751 | if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { |
6752 | ctx->xin->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4; | |
1b035ef2 | 6753 | } |
abe529af BP |
6754 | break; |
6755 | ||
f25d0cf3 | 6756 | case OFPACT_SET_IPV4_DSCP: |
c4f2731d | 6757 | /* OpenFlow 1.0 only supports IPv4. */ |
bcd2633a JP |
6758 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6759 | sizeof ctx->xout->wc.masks.dl_type); | |
bbafd73b EJ |
6760 | if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { |
6761 | ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK; | |
6762 | ctx->xin->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp; | |
c4f2731d | 6763 | } |
abe529af BP |
6764 | break; |
6765 | ||
f25d0cf3 | 6766 | case OFPACT_SET_L4_SRC_PORT: |
bcd2633a JP |
6767 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6768 | sizeof ctx->xout->wc.masks.dl_type); | |
6769 | memset(&ctx->xout->wc.masks.nw_proto, 0xff, | |
6770 | sizeof ctx->xout->wc.masks.nw_proto); | |
bbafd73b EJ |
6771 | if (is_ip_any(&ctx->xin->flow)) { |
6772 | ctx->xin->flow.tp_src = | |
6773 | htons(ofpact_get_SET_L4_SRC_PORT(a)->port); | |
1b035ef2 | 6774 | } |
abe529af BP |
6775 | break; |
6776 | ||
f25d0cf3 | 6777 | case OFPACT_SET_L4_DST_PORT: |
bcd2633a JP |
6778 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6779 | sizeof ctx->xout->wc.masks.dl_type); | |
6780 | memset(&ctx->xout->wc.masks.nw_proto, 0xff, | |
6781 | sizeof ctx->xout->wc.masks.nw_proto); | |
bbafd73b EJ |
6782 | if (is_ip_any(&ctx->xin->flow)) { |
6783 | ctx->xin->flow.tp_dst = | |
6784 | htons(ofpact_get_SET_L4_DST_PORT(a)->port); | |
1b035ef2 | 6785 | } |
abe529af BP |
6786 | break; |
6787 | ||
f25d0cf3 BP |
6788 | case OFPACT_RESUBMIT: |
6789 | xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a)); | |
38f2e360 BP |
6790 | break; |
6791 | ||
f25d0cf3 | 6792 | case OFPACT_SET_TUNNEL: |
bbafd73b EJ |
6793 | ctx->xin->flow.tunnel.tun_id = |
6794 | htonll(ofpact_get_SET_TUNNEL(a)->tun_id); | |
29901626 BP |
6795 | break; |
6796 | ||
f25d0cf3 BP |
6797 | case OFPACT_SET_QUEUE: |
6798 | xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id); | |
abe529af BP |
6799 | break; |
6800 | ||
f25d0cf3 | 6801 | case OFPACT_POP_QUEUE: |
bcd2633a JP |
6802 | memset(&ctx->xout->wc.masks.skb_priority, 0xff, |
6803 | sizeof ctx->xout->wc.masks.skb_priority); | |
6804 | ||
bbafd73b | 6805 | ctx->xin->flow.skb_priority = ctx->orig_skb_priority; |
38f2e360 BP |
6806 | break; |
6807 | ||
f25d0cf3 | 6808 | case OFPACT_REG_MOVE: |
bcd2633a JP |
6809 | nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow, |
6810 | &ctx->xout->wc); | |
38f2e360 BP |
6811 | break; |
6812 | ||
f25d0cf3 | 6813 | case OFPACT_REG_LOAD: |
bbafd73b | 6814 | nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->xin->flow); |
38f2e360 BP |
6815 | break; |
6816 | ||
bd85dac1 | 6817 | case OFPACT_STACK_PUSH: |
bbafd73b | 6818 | nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->xin->flow, |
bcd2633a | 6819 | &ctx->xout->wc, &ctx->stack); |
bd85dac1 AZ |
6820 | break; |
6821 | ||
6822 | case OFPACT_STACK_POP: | |
bbafd73b | 6823 | nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->xin->flow, |
bd85dac1 AZ |
6824 | &ctx->stack); |
6825 | break; | |
6826 | ||
b02475c5 SH |
6827 | case OFPACT_PUSH_MPLS: |
6828 | execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype); | |
6829 | break; | |
6830 | ||
6831 | case OFPACT_POP_MPLS: | |
6832 | execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype); | |
6833 | break; | |
6834 | ||
0f3f3c3d | 6835 | case OFPACT_SET_MPLS_TTL: |
bbafd73b EJ |
6836 | if (execute_set_mpls_ttl_action(ctx, |
6837 | ofpact_get_SET_MPLS_TTL(a)->ttl)) { | |
0f3f3c3d SH |
6838 | goto out; |
6839 | } | |
6840 | break; | |
6841 | ||
b676167a SH |
6842 | case OFPACT_DEC_MPLS_TTL: |
6843 | if (execute_dec_mpls_ttl_action(ctx)) { | |
6844 | goto out; | |
6845 | } | |
6846 | break; | |
6847 | ||
f25d0cf3 | 6848 | case OFPACT_DEC_TTL: |
bcd2633a JP |
6849 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6850 | sizeof ctx->xout->wc.masks.dl_type); | |
c2d967a5 | 6851 | if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) { |
f25d0cf3 BP |
6852 | goto out; |
6853 | } | |
38f2e360 BP |
6854 | break; |
6855 | ||
f25d0cf3 BP |
6856 | case OFPACT_NOTE: |
6857 | /* Nothing to do. */ | |
abe529af BP |
6858 | break; |
6859 | ||
f25d0cf3 | 6860 | case OFPACT_MULTIPATH: |
bcd2633a JP |
6861 | multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow, |
6862 | &ctx->xout->wc); | |
abe529af | 6863 | break; |
daff3353 | 6864 | |
f25d0cf3 | 6865 | case OFPACT_BUNDLE: |
a368bb53 | 6866 | ctx->ofproto->has_bundle_action = true; |
f25d0cf3 | 6867 | xlate_bundle_action(ctx, ofpact_get_BUNDLE(a)); |
a368bb53 | 6868 | break; |
f694937d | 6869 | |
f25d0cf3 BP |
6870 | case OFPACT_OUTPUT_REG: |
6871 | xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a)); | |
f694937d | 6872 | break; |
75a75043 | 6873 | |
f25d0cf3 | 6874 | case OFPACT_LEARN: |
bcd2633a | 6875 | xlate_learn_action(ctx, ofpact_get_LEARN(a)); |
75a75043 | 6876 | break; |
848e8809 | 6877 | |
f25d0cf3 | 6878 | case OFPACT_EXIT: |
848e8809 EJ |
6879 | ctx->exit = true; |
6880 | break; | |
0e553d9c | 6881 | |
f25d0cf3 | 6882 | case OFPACT_FIN_TIMEOUT: |
bcd2633a JP |
6883 | memset(&ctx->xout->wc.masks.dl_type, 0xff, |
6884 | sizeof ctx->xout->wc.masks.dl_type); | |
6885 | memset(&ctx->xout->wc.masks.nw_proto, 0xff, | |
6886 | sizeof ctx->xout->wc.masks.nw_proto); | |
bbafd73b | 6887 | ctx->xout->has_fin_timeout = true; |
f25d0cf3 | 6888 | xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a)); |
a7349929 | 6889 | break; |
8dd54666 | 6890 | |
b19e8793 | 6891 | case OFPACT_CLEAR_ACTIONS: |
5dca28b5 | 6892 | /* XXX |
b19e8793 IY |
6893 | * Nothing to do because writa-actions is not supported for now. |
6894 | * When writa-actions is supported, clear-actions also must | |
6895 | * be supported at the same time. | |
6896 | */ | |
6897 | break; | |
6898 | ||
4cceacb9 JS |
6899 | case OFPACT_WRITE_METADATA: |
6900 | metadata = ofpact_get_WRITE_METADATA(a); | |
bbafd73b EJ |
6901 | ctx->xin->flow.metadata &= ~metadata->mask; |
6902 | ctx->xin->flow.metadata |= metadata->metadata & metadata->mask; | |
4cceacb9 JS |
6903 | break; |
6904 | ||
8dd54666 | 6905 | case OFPACT_GOTO_TABLE: { |
55599423 | 6906 | /* It is assumed that goto-table is the last action. */ |
8dd54666 | 6907 | struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a); |
55599423 JR |
6908 | struct rule_dpif *rule; |
6909 | ||
cb22974d | 6910 | ovs_assert(ctx->table_id < ogt->table_id); |
55599423 JR |
6911 | |
6912 | ctx->table_id = ogt->table_id; | |
6913 | ||
6914 | /* Look up a flow from the new table. */ | |
bcd2633a JP |
6915 | rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, |
6916 | &ctx->xout->wc, ctx->table_id); | |
55599423 JR |
6917 | |
6918 | tag_the_flow(ctx, rule); | |
6919 | ||
6920 | rule = ctx_rule_hooks(ctx, rule, true); | |
6921 | ||
6922 | if (rule) { | |
6923 | if (ctx->rule) { | |
6924 | ctx->rule->up.evictable = was_evictable; | |
6925 | } | |
6926 | ctx->rule = rule; | |
6927 | was_evictable = rule->up.evictable; | |
6928 | rule->up.evictable = false; | |
6929 | ||
6930 | /* Tail recursion removal. */ | |
6931 | ofpacts = rule->up.ofpacts; | |
6932 | ofpacts_len = rule->up.ofpacts_len; | |
6933 | goto do_xlate_actions_again; | |
6934 | } | |
8dd54666 IY |
6935 | break; |
6936 | } | |
29089a54 RL |
6937 | |
6938 | case OFPACT_SAMPLE: | |
6939 | xlate_sample_action(ctx, ofpact_get_SAMPLE(a)); | |
6940 | break; | |
abe529af BP |
6941 | } |
6942 | } | |
21f7563c | 6943 | |
f0fd1a17 | 6944 | out: |
254750ce BP |
6945 | if (ctx->rule) { |
6946 | ctx->rule->up.evictable = was_evictable; | |
6947 | } | |
abe529af BP |
6948 | } |
6949 | ||
6950 | static void | |
bbafd73b EJ |
6951 | xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto, |
6952 | const struct flow *flow, | |
6953 | const struct initial_vals *initial_vals, | |
6954 | struct rule_dpif *rule, uint8_t tcp_flags, | |
6955 | const struct ofpbuf *packet) | |
6956 | { | |
6957 | xin->ofproto = ofproto; | |
6958 | xin->flow = *flow; | |
6959 | xin->packet = packet; | |
6960 | xin->may_learn = packet != NULL; | |
6961 | xin->rule = rule; | |
6962 | xin->ofpacts = NULL; | |
6963 | xin->ofpacts_len = 0; | |
6964 | xin->tcp_flags = tcp_flags; | |
6965 | xin->resubmit_hook = NULL; | |
6966 | xin->report_hook = NULL; | |
6967 | xin->resubmit_stats = NULL; | |
6968 | ||
6969 | if (initial_vals) { | |
6970 | xin->initial_vals = *initial_vals; | |
6971 | } else { | |
6972 | xin->initial_vals.vlan_tci = xin->flow.vlan_tci; | |
6973 | } | |
6974 | } | |
6975 | ||
6976 | static void | |
6977 | xlate_out_uninit(struct xlate_out *xout) | |
abe529af | 6978 | { |
bbafd73b EJ |
6979 | if (xout) { |
6980 | ofpbuf_uninit(&xout->odp_actions); | |
6981 | } | |
6982 | } | |
6983 | ||
6984 | /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts' | |
6985 | * into datapath actions in 'odp_actions', using 'ctx'. */ | |
6986 | static void | |
6987 | xlate_actions(struct xlate_in *xin, struct xlate_out *xout) | |
6988 | { | |
6989 | /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so | |
6990 | * that in the future we always keep a copy of the original flow for | |
6991 | * tracing purposes. */ | |
6992 | static bool hit_resubmit_limit; | |
6993 | ||
6994 | enum slow_path_reason special; | |
6995 | const struct ofpact *ofpacts; | |
6996 | struct ofport_dpif *in_port; | |
6997 | struct flow orig_flow; | |
6998 | struct xlate_ctx ctx; | |
6999 | size_t ofpacts_len; | |
7000 | ||
7001 | COVERAGE_INC(ofproto_dpif_xlate); | |
7002 | ||
ef506a7c JG |
7003 | /* Flow initialization rules: |
7004 | * - 'base_flow' must match the kernel's view of the packet at the | |
7005 | * time that action processing starts. 'flow' represents any | |
7006 | * transformations we wish to make through actions. | |
7007 | * - By default 'base_flow' and 'flow' are the same since the input | |
7008 | * packet matches the output before any actions are applied. | |
7009 | * - When using VLAN splinters, 'base_flow''s VLAN is set to the value | |
7010 | * of the received packet as seen by the kernel. If we later output | |
7011 | * to another device without any modifications this will cause us to | |
7012 | * insert a new tag since the original one was stripped off by the | |
7013 | * VLAN device. | |
4110a57a JR |
7014 | * - Tunnel metadata as received is retained in 'flow'. This allows |
7015 | * tunnel metadata matching also in later tables. | |
7016 | * Since a kernel action for setting the tunnel metadata will only be | |
7017 | * generated with actual tunnel output, changing the tunnel metadata | |
7018 | * values in 'flow' (such as tun_id) will only have effect with a later | |
7019 | * tunnel output action. | |
ef506a7c JG |
7020 | * - Tunnel 'base_flow' is completely cleared since that is what the |
7021 | * kernel does. If we wish to maintain the original values an action | |
7022 | * needs to be generated. */ | |
7023 | ||
bbafd73b EJ |
7024 | ctx.xin = xin; |
7025 | ctx.xout = xout; | |
7026 | ||
7027 | ctx.ofproto = xin->ofproto; | |
7028 | ctx.rule = xin->rule; | |
7029 | ||
7030 | ctx.base_flow = ctx.xin->flow; | |
7031 | ctx.base_flow.vlan_tci = xin->initial_vals.vlan_tci; | |
7032 | memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel); | |
7033 | ctx.orig_tunnel_ip_dst = ctx.xin->flow.tunnel.ip_dst; | |
7034 | ||
bcd2633a JP |
7035 | flow_wildcards_init_catchall(&ctx.xout->wc); |
7036 | memset(&ctx.xout->wc.masks.in_port, 0xff, | |
7037 | sizeof ctx.xout->wc.masks.in_port); | |
7038 | ||
7039 | if (tnl_port_should_receive(&ctx.xin->flow)) { | |
7040 | memset(&ctx.xout->wc.masks.tunnel, 0xff, | |
7041 | sizeof ctx.xout->wc.masks.tunnel); | |
7042 | } | |
7043 | ||
7044 | /* Disable most wildcarding for NetFlow. */ | |
7045 | if (xin->ofproto->netflow) { | |
7046 | memset(&ctx.xout->wc.masks.dl_src, 0xff, | |
7047 | sizeof ctx.xout->wc.masks.dl_src); | |
7048 | memset(&ctx.xout->wc.masks.dl_dst, 0xff, | |
7049 | sizeof ctx.xout->wc.masks.dl_dst); | |
7050 | memset(&ctx.xout->wc.masks.dl_type, 0xff, | |
7051 | sizeof ctx.xout->wc.masks.dl_type); | |
7052 | memset(&ctx.xout->wc.masks.vlan_tci, 0xff, | |
7053 | sizeof ctx.xout->wc.masks.vlan_tci); | |
7054 | memset(&ctx.xout->wc.masks.nw_proto, 0xff, | |
7055 | sizeof ctx.xout->wc.masks.nw_proto); | |
7056 | memset(&ctx.xout->wc.masks.nw_src, 0xff, | |
7057 | sizeof ctx.xout->wc.masks.nw_src); | |
7058 | memset(&ctx.xout->wc.masks.nw_dst, 0xff, | |
7059 | sizeof ctx.xout->wc.masks.nw_dst); | |
7060 | memset(&ctx.xout->wc.masks.tp_src, 0xff, | |
7061 | sizeof ctx.xout->wc.masks.tp_src); | |
7062 | memset(&ctx.xout->wc.masks.tp_dst, 0xff, | |
7063 | sizeof ctx.xout->wc.masks.tp_dst); | |
7064 | } | |
7065 | ||
bbafd73b EJ |
7066 | ctx.xout->tags = 0; |
7067 | ctx.xout->slow = 0; | |
7068 | ctx.xout->has_learn = false; | |
7069 | ctx.xout->has_normal = false; | |
7070 | ctx.xout->has_fin_timeout = false; | |
7071 | ctx.xout->nf_output_iface = NF_OUT_DROP; | |
7072 | ctx.xout->mirrors = 0; | |
7073 | ||
7074 | ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub, | |
7075 | sizeof ctx.xout->odp_actions_stub); | |
7076 | ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE); | |
7077 | ||
7078 | ctx.recurse = 0; | |
7079 | ctx.max_resubmit_trigger = false; | |
7080 | ctx.orig_skb_priority = ctx.xin->flow.skb_priority; | |
7081 | ctx.table_id = 0; | |
7082 | ctx.exit = false; | |
7083 | ||
7084 | if (xin->ofpacts) { | |
7085 | ofpacts = xin->ofpacts; | |
7086 | ofpacts_len = xin->ofpacts_len; | |
7087 | } else if (xin->rule) { | |
7088 | ofpacts = xin->rule->up.ofpacts; | |
7089 | ofpacts_len = xin->rule->up.ofpacts_len; | |
7090 | } else { | |
7091 | NOT_REACHED(); | |
0f49659a | 7092 | } |
abe529af | 7093 | |
bbafd73b | 7094 | ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack); |
43d50bc8 | 7095 | |
bbafd73b | 7096 | if (ctx.ofproto->has_mirrors || hit_resubmit_limit) { |
ccb7c863 | 7097 | /* Do this conditionally because the copy is expensive enough that it |
9ba85077 | 7098 | * shows up in profiles. */ |
bbafd73b | 7099 | orig_flow = ctx.xin->flow; |
ccb7c863 BP |
7100 | } |
7101 | ||
bbafd73b EJ |
7102 | if (ctx.xin->flow.nw_frag & FLOW_NW_FRAG_ANY) { |
7103 | switch (ctx.ofproto->up.frag_handling) { | |
7257b535 BP |
7104 | case OFPC_FRAG_NORMAL: |
7105 | /* We must pretend that transport ports are unavailable. */ | |
bbafd73b EJ |
7106 | ctx.xin->flow.tp_src = ctx.base_flow.tp_src = htons(0); |
7107 | ctx.xin->flow.tp_dst = ctx.base_flow.tp_dst = htons(0); | |
7257b535 BP |
7108 | break; |
7109 | ||
7110 | case OFPC_FRAG_DROP: | |
050ac423 | 7111 | return; |
7257b535 BP |
7112 | |
7113 | case OFPC_FRAG_REASM: | |
7114 | NOT_REACHED(); | |
7115 | ||
7116 | case OFPC_FRAG_NX_MATCH: | |
7117 | /* Nothing to do. */ | |
7118 | break; | |
f0fd1a17 PS |
7119 | |
7120 | case OFPC_INVALID_TTL_TO_CONTROLLER: | |
7121 | NOT_REACHED(); | |
7257b535 BP |
7122 | } |
7123 | } | |
7124 | ||
bbafd73b EJ |
7125 | in_port = get_ofp_port(ctx.ofproto, ctx.xin->flow.in_port); |
7126 | special = process_special(ctx.ofproto, &ctx.xin->flow, in_port, | |
7127 | ctx.xin->packet); | |
6a7e895f | 7128 | if (special) { |
bbafd73b | 7129 | ctx.xout->slow = special; |
abe529af | 7130 | } else { |
6a6455e5 | 7131 | static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1); |
14f94f9a | 7132 | struct initial_vals initial_vals; |
94aa0d19 | 7133 | size_t sample_actions_len; |
ee382d89 | 7134 | uint32_t local_odp_port; |
6a6455e5 | 7135 | |
bbafd73b | 7136 | initial_vals.vlan_tci = ctx.base_flow.vlan_tci; |
14f94f9a | 7137 | |
bbafd73b EJ |
7138 | add_sflow_action(&ctx); |
7139 | add_ipfix_action(&ctx); | |
7140 | sample_actions_len = ctx.xout->odp_actions.size; | |
ffaef958 | 7141 | |
bbafd73b EJ |
7142 | if (tunnel_ecn_ok(&ctx) && (!in_port || may_receive(in_port, &ctx))) { |
7143 | do_xlate_actions(ofpacts, ofpacts_len, &ctx); | |
ffaef958 BP |
7144 | |
7145 | /* We've let OFPP_NORMAL and the learning action look at the | |
7146 | * packet, so drop it now if forwarding is disabled. */ | |
7147 | if (in_port && !stp_forward_in_state(in_port->stp_state)) { | |
bbafd73b | 7148 | ctx.xout->odp_actions.size = sample_actions_len; |
ffaef958 BP |
7149 | } |
7150 | } | |
abe529af | 7151 | |
bbafd73b | 7152 | if (ctx.max_resubmit_trigger && !ctx.xin->resubmit_hook) { |
43d50bc8 BP |
7153 | if (!hit_resubmit_limit) { |
7154 | /* We didn't record the original flow. Make sure we do from | |
7155 | * now on. */ | |
7156 | hit_resubmit_limit = true; | |
7157 | } else if (!VLOG_DROP_ERR(&trace_rl)) { | |
7158 | struct ds ds = DS_EMPTY_INITIALIZER; | |
7159 | ||
bbafd73b | 7160 | ofproto_trace(ctx.ofproto, &orig_flow, ctx.xin->packet, |
14f94f9a | 7161 | &initial_vals, &ds); |
43d50bc8 BP |
7162 | VLOG_ERR("Trace triggered by excessive resubmit " |
7163 | "recursion:\n%s", ds_cstr(&ds)); | |
7164 | ds_destroy(&ds); | |
7165 | } | |
6a6455e5 EJ |
7166 | } |
7167 | ||
bbafd73b EJ |
7168 | local_odp_port = ofp_port_to_odp_port(ctx.ofproto, OFPP_LOCAL); |
7169 | if (!connmgr_must_output_local(ctx.ofproto->up.connmgr, &ctx.xin->flow, | |
454a77e5 | 7170 | local_odp_port, |
bbafd73b EJ |
7171 | ctx.xout->odp_actions.data, |
7172 | ctx.xout->odp_actions.size)) { | |
7173 | compose_output_action(&ctx, OFPP_LOCAL); | |
b6848f13 | 7174 | } |
bbafd73b EJ |
7175 | if (ctx.ofproto->has_mirrors) { |
7176 | add_mirror_actions(&ctx, &orig_flow); | |
ccb7c863 | 7177 | } |
bbafd73b | 7178 | fix_sflow_action(&ctx); |
abe529af | 7179 | } |
bd85dac1 | 7180 | |
bbafd73b | 7181 | ofpbuf_uninit(&ctx.stack); |
bcd2633a JP |
7182 | |
7183 | /* Clear the metadata and register wildcard masks, because we won't | |
7184 | * use non-header fields as part of the cache. */ | |
7185 | memset(&ctx.xout->wc.masks.metadata, 0, | |
7186 | sizeof ctx.xout->wc.masks.metadata); | |
7187 | memset(&ctx.xout->wc.masks.regs, 0, sizeof ctx.xout->wc.masks.regs); | |
050ac423 BP |
7188 | } |
7189 | ||
f25d0cf3 BP |
7190 | /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts' |
7191 | * into datapath actions, using 'ctx', and discards the datapath actions. */ | |
050ac423 | 7192 | static void |
bbafd73b | 7193 | xlate_actions_for_side_effects(struct xlate_in *xin) |
050ac423 | 7194 | { |
bbafd73b | 7195 | struct xlate_out xout; |
abe529af | 7196 | |
bbafd73b EJ |
7197 | xlate_actions(xin, &xout); |
7198 | xlate_out_uninit(&xout); | |
abe529af | 7199 | } |
479df176 BP |
7200 | |
7201 | static void | |
f03a84b9 | 7202 | xlate_report(struct xlate_ctx *ctx, const char *s) |
479df176 | 7203 | { |
bbafd73b EJ |
7204 | if (ctx->xin->report_hook) { |
7205 | ctx->xin->report_hook(ctx, s); | |
479df176 BP |
7206 | } |
7207 | } | |
bcd2633a JP |
7208 | |
7209 | static void | |
7210 | xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src) | |
7211 | { | |
7212 | dst->wc = src->wc; | |
7213 | dst->tags = src->tags; | |
7214 | dst->slow = src->slow; | |
7215 | dst->has_learn = src->has_learn; | |
7216 | dst->has_normal = src->has_normal; | |
7217 | dst->has_fin_timeout = src->has_fin_timeout; | |
7218 | dst->nf_output_iface = src->nf_output_iface; | |
7219 | dst->mirrors = src->mirrors; | |
7220 | ||
7221 | ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub, | |
7222 | sizeof dst->odp_actions_stub); | |
7223 | ofpbuf_put(&dst->odp_actions, src->odp_actions.data, | |
7224 | src->odp_actions.size); | |
7225 | } | |
abe529af BP |
7226 | \f |
7227 | /* OFPP_NORMAL implementation. */ | |
7228 | ||
abe529af BP |
7229 | static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *); |
7230 | ||
ecac4ebf BP |
7231 | /* Given 'vid', the VID obtained from the 802.1Q header that was received as |
7232 | * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle', | |
7233 | * the bundle on which the packet was received, returns the VLAN to which the | |
7234 | * packet belongs. | |
7235 | * | |
7236 | * Both 'vid' and the return value are in the range 0...4095. */ | |
7237 | static uint16_t | |
7238 | input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid) | |
7239 | { | |
7240 | switch (in_bundle->vlan_mode) { | |
7241 | case PORT_VLAN_ACCESS: | |
7242 | return in_bundle->vlan; | |
7243 | break; | |
7244 | ||
7245 | case PORT_VLAN_TRUNK: | |
7246 | return vid; | |
7247 | ||
7248 | case PORT_VLAN_NATIVE_UNTAGGED: | |
7249 | case PORT_VLAN_NATIVE_TAGGED: | |
7250 | return vid ? vid : in_bundle->vlan; | |
7251 | ||
7252 | default: | |
7253 | NOT_REACHED(); | |
7254 | } | |
7255 | } | |
7256 | ||
5da5ec37 BP |
7257 | /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'. |
7258 | * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs | |
7259 | * a warning. | |
7260 | * | |
7261 | * 'vid' should be the VID obtained from the 802.1Q header that was received as | |
7262 | * part of a packet (specify 0 if there was no 802.1Q header), in the range | |
7263 | * 0...4095. */ | |
7264 | static bool | |
7265 | input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn) | |
7266 | { | |
33158a18 JP |
7267 | /* Allow any VID on the OFPP_NONE port. */ |
7268 | if (in_bundle == &ofpp_none_bundle) { | |
7269 | return true; | |
7270 | } | |
7271 | ||
5da5ec37 BP |
7272 | switch (in_bundle->vlan_mode) { |
7273 | case PORT_VLAN_ACCESS: | |
7274 | if (vid) { | |
7275 | if (warn) { | |
7276 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
7277 | VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged " | |
7278 | "packet received on port %s configured as VLAN " | |
7279 | "%"PRIu16" access port", | |
7280 | in_bundle->ofproto->up.name, vid, | |
7281 | in_bundle->name, in_bundle->vlan); | |
7282 | } | |
7283 | return false; | |
7284 | } | |
7285 | return true; | |
7286 | ||
7287 | case PORT_VLAN_NATIVE_UNTAGGED: | |
7288 | case PORT_VLAN_NATIVE_TAGGED: | |
7289 | if (!vid) { | |
7290 | /* Port must always carry its native VLAN. */ | |
7291 | return true; | |
7292 | } | |
7293 | /* Fall through. */ | |
7294 | case PORT_VLAN_TRUNK: | |
7295 | if (!ofbundle_includes_vlan(in_bundle, vid)) { | |
7296 | if (warn) { | |
7297 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
7298 | VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet " | |
7299 | "received on port %s not configured for trunking " | |
7300 | "VLAN %"PRIu16, | |
7301 | in_bundle->ofproto->up.name, vid, | |
7302 | in_bundle->name, vid); | |
7303 | } | |
7304 | return false; | |
7305 | } | |
7306 | return true; | |
7307 | ||
7308 | default: | |
7309 | NOT_REACHED(); | |
7310 | } | |
7311 | ||
7312 | } | |
7313 | ||
ecac4ebf BP |
7314 | /* Given 'vlan', the VLAN that a packet belongs to, and |
7315 | * 'out_bundle', a bundle on which the packet is to be output, returns the VID | |
7316 | * that should be included in the 802.1Q header. (If the return value is 0, | |
7317 | * then the 802.1Q header should only be included in the packet if there is a | |
7318 | * nonzero PCP.) | |
7319 | * | |
7320 | * Both 'vlan' and the return value are in the range 0...4095. */ | |
7321 | static uint16_t | |
7322 | output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan) | |
7323 | { | |
7324 | switch (out_bundle->vlan_mode) { | |
7325 | case PORT_VLAN_ACCESS: | |
7326 | return 0; | |
7327 | ||
7328 | case PORT_VLAN_TRUNK: | |
7329 | case PORT_VLAN_NATIVE_TAGGED: | |
7330 | return vlan; | |
7331 | ||
7332 | case PORT_VLAN_NATIVE_UNTAGGED: | |
7333 | return vlan == out_bundle->vlan ? 0 : vlan; | |
7334 | ||
7335 | default: | |
7336 | NOT_REACHED(); | |
7337 | } | |
7338 | } | |
7339 | ||
395e68ce | 7340 | static void |
f03a84b9 | 7341 | output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle, |
395e68ce | 7342 | uint16_t vlan) |
abe529af | 7343 | { |
395e68ce BP |
7344 | struct ofport_dpif *port; |
7345 | uint16_t vid; | |
81b1afb1 | 7346 | ovs_be16 tci, old_tci; |
ecac4ebf | 7347 | |
395e68ce BP |
7348 | vid = output_vlan_to_vid(out_bundle, vlan); |
7349 | if (!out_bundle->bond) { | |
7350 | port = ofbundle_get_a_port(out_bundle); | |
7351 | } else { | |
bbafd73b | 7352 | port = bond_choose_output_slave(out_bundle->bond, &ctx->xin->flow, |
bcd2633a | 7353 | &ctx->xout->wc, vid, &ctx->xout->tags); |
395e68ce BP |
7354 | if (!port) { |
7355 | /* No slaves enabled, so drop packet. */ | |
7356 | return; | |
7357 | } | |
7358 | } | |
abe529af | 7359 | |
bbafd73b | 7360 | old_tci = ctx->xin->flow.vlan_tci; |
5e9ceccd BP |
7361 | tci = htons(vid); |
7362 | if (tci || out_bundle->use_priority_tags) { | |
bbafd73b | 7363 | tci |= ctx->xin->flow.vlan_tci & htons(VLAN_PCP_MASK); |
5e9ceccd BP |
7364 | if (tci) { |
7365 | tci |= htons(VLAN_CFI); | |
7366 | } | |
395e68ce | 7367 | } |
bbafd73b | 7368 | ctx->xin->flow.vlan_tci = tci; |
395e68ce | 7369 | |
5e48dc2b | 7370 | compose_output_action(ctx, port->up.ofp_port); |
bbafd73b | 7371 | ctx->xin->flow.vlan_tci = old_tci; |
abe529af BP |
7372 | } |
7373 | ||
7374 | static int | |
7375 | mirror_mask_ffs(mirror_mask_t mask) | |
7376 | { | |
7377 | BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask)); | |
7378 | return ffs(mask); | |
7379 | } | |
7380 | ||
abe529af BP |
7381 | static bool |
7382 | ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan) | |
7383 | { | |
ecac4ebf | 7384 | return (bundle->vlan_mode != PORT_VLAN_ACCESS |
fc3d7408 | 7385 | && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan))); |
abe529af BP |
7386 | } |
7387 | ||
7388 | static bool | |
7389 | ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan) | |
7390 | { | |
7391 | return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan); | |
7392 | } | |
7393 | ||
7394 | /* Returns an arbitrary interface within 'bundle'. */ | |
7395 | static struct ofport_dpif * | |
7396 | ofbundle_get_a_port(const struct ofbundle *bundle) | |
7397 | { | |
7398 | return CONTAINER_OF(list_front(&bundle->ports), | |
7399 | struct ofport_dpif, bundle_node); | |
7400 | } | |
7401 | ||
abe529af BP |
7402 | static bool |
7403 | vlan_is_mirrored(const struct ofmirror *m, int vlan) | |
7404 | { | |
fc3d7408 | 7405 | return !m->vlans || bitmap_is_set(m->vlans, vlan); |
abe529af BP |
7406 | } |
7407 | ||
7408 | static void | |
f03a84b9 | 7409 | add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow) |
abe529af BP |
7410 | { |
7411 | struct ofproto_dpif *ofproto = ctx->ofproto; | |
7412 | mirror_mask_t mirrors; | |
c06bba01 JP |
7413 | struct ofbundle *in_bundle; |
7414 | uint16_t vlan; | |
7415 | uint16_t vid; | |
7416 | const struct nlattr *a; | |
7417 | size_t left; | |
7418 | ||
3581c12c | 7419 | in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port, |
bbafd73b | 7420 | ctx->xin->packet != NULL, NULL); |
3581c12c | 7421 | if (!in_bundle) { |
c06bba01 JP |
7422 | return; |
7423 | } | |
c06bba01 JP |
7424 | mirrors = in_bundle->src_mirrors; |
7425 | ||
7426 | /* Drop frames on bundles reserved for mirroring. */ | |
7427 | if (in_bundle->mirror_out) { | |
bbafd73b | 7428 | if (ctx->xin->packet != NULL) { |
c06bba01 JP |
7429 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
7430 | VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port " | |
7431 | "%s, which is reserved exclusively for mirroring", | |
7432 | ctx->ofproto->up.name, in_bundle->name); | |
7433 | } | |
7434 | return; | |
7435 | } | |
7436 | ||
7437 | /* Check VLAN. */ | |
7438 | vid = vlan_tci_to_vid(orig_flow->vlan_tci); | |
bbafd73b | 7439 | if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) { |
c06bba01 JP |
7440 | return; |
7441 | } | |
7442 | vlan = input_vid_to_vlan(in_bundle, vid); | |
7443 | ||
7444 | /* Look at the output ports to check for destination selections. */ | |
7445 | ||
bbafd73b EJ |
7446 | NL_ATTR_FOR_EACH (a, left, ctx->xout->odp_actions.data, |
7447 | ctx->xout->odp_actions.size) { | |
c06bba01 JP |
7448 | enum ovs_action_attr type = nl_attr_type(a); |
7449 | struct ofport_dpif *ofport; | |
7450 | ||
7451 | if (type != OVS_ACTION_ATTR_OUTPUT) { | |
7452 | continue; | |
7453 | } | |
7454 | ||
7455 | ofport = get_odp_port(ofproto, nl_attr_get_u32(a)); | |
521472bc BP |
7456 | if (ofport && ofport->bundle) { |
7457 | mirrors |= ofport->bundle->dst_mirrors; | |
7458 | } | |
c06bba01 | 7459 | } |
abe529af BP |
7460 | |
7461 | if (!mirrors) { | |
7462 | return; | |
7463 | } | |
7464 | ||
c06bba01 | 7465 | /* Restore the original packet before adding the mirror actions. */ |
bbafd73b | 7466 | ctx->xin->flow = *orig_flow; |
c06bba01 | 7467 | |
9ba15e2a BP |
7468 | while (mirrors) { |
7469 | struct ofmirror *m; | |
9ba15e2a BP |
7470 | |
7471 | m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1]; | |
7472 | ||
bcd2633a JP |
7473 | if (m->vlans) { |
7474 | ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK); | |
7475 | } | |
7476 | ||
9ba15e2a | 7477 | if (!vlan_is_mirrored(m, vlan)) { |
8472a3ce | 7478 | mirrors = zero_rightmost_1bit(mirrors); |
9ba15e2a BP |
7479 | continue; |
7480 | } | |
7481 | ||
7482 | mirrors &= ~m->dup_mirrors; | |
bbafd73b | 7483 | ctx->xout->mirrors |= m->dup_mirrors; |
9ba15e2a | 7484 | if (m->out) { |
395e68ce | 7485 | output_normal(ctx, m->out, vlan); |
614ec445 EJ |
7486 | } else if (vlan != m->out_vlan |
7487 | && !eth_addr_is_reserved(orig_flow->dl_dst)) { | |
9ba15e2a BP |
7488 | struct ofbundle *bundle; |
7489 | ||
7490 | HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { | |
7491 | if (ofbundle_includes_vlan(bundle, m->out_vlan) | |
395e68ce BP |
7492 | && !bundle->mirror_out) { |
7493 | output_normal(ctx, bundle, m->out_vlan); | |
abe529af BP |
7494 | } |
7495 | } | |
7496 | } | |
abe529af BP |
7497 | } |
7498 | } | |
7499 | ||
9d24de3b JP |
7500 | static void |
7501 | update_mirror_stats(struct ofproto_dpif *ofproto, mirror_mask_t mirrors, | |
7502 | uint64_t packets, uint64_t bytes) | |
7503 | { | |
7504 | if (!mirrors) { | |
7505 | return; | |
7506 | } | |
7507 | ||
8472a3ce | 7508 | for (; mirrors; mirrors = zero_rightmost_1bit(mirrors)) { |
9d24de3b JP |
7509 | struct ofmirror *m; |
7510 | ||
7511 | m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1]; | |
7512 | ||
7513 | if (!m) { | |
7514 | /* In normal circumstances 'm' will not be NULL. However, | |
7515 | * if mirrors are reconfigured, we can temporarily get out | |
7516 | * of sync in facet_revalidate(). We could "correct" the | |
7517 | * mirror list before reaching here, but doing that would | |
7518 | * not properly account the traffic stats we've currently | |
7519 | * accumulated for previous mirror configuration. */ | |
7520 | continue; | |
7521 | } | |
7522 | ||
7523 | m->packet_count += packets; | |
7524 | m->byte_count += bytes; | |
7525 | } | |
7526 | } | |
7527 | ||
abe529af BP |
7528 | /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after |
7529 | * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to | |
7530 | * indicate this; newer upstream kernels use gratuitous ARP requests. */ | |
7531 | static bool | |
bcd2633a | 7532 | is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc) |
abe529af | 7533 | { |
bcd2633a JP |
7534 | memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type); |
7535 | if (flow->dl_type != htons(ETH_TYPE_ARP)) { | |
7536 | return false; | |
7537 | } | |
7538 | ||
7539 | memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst); | |
7540 | if (!eth_addr_is_broadcast(flow->dl_dst)) { | |
7541 | return false; | |
7542 | } | |
7543 | ||
7544 | memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto); | |
7545 | if (flow->nw_proto == ARP_OP_REPLY) { | |
7546 | return true; | |
7547 | } else if (flow->nw_proto == ARP_OP_REQUEST) { | |
7548 | memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src); | |
7549 | memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst); | |
7550 | ||
7551 | return flow->nw_src == flow->nw_dst; | |
7552 | } else { | |
7553 | return false; | |
7554 | } | |
abe529af BP |
7555 | } |
7556 | ||
7557 | static void | |
7558 | update_learning_table(struct ofproto_dpif *ofproto, | |
bcd2633a JP |
7559 | const struct flow *flow, struct flow_wildcards *wc, |
7560 | int vlan, struct ofbundle *in_bundle) | |
abe529af BP |
7561 | { |
7562 | struct mac_entry *mac; | |
7563 | ||
33158a18 JP |
7564 | /* Don't learn the OFPP_NONE port. */ |
7565 | if (in_bundle == &ofpp_none_bundle) { | |
7566 | return; | |
7567 | } | |
7568 | ||
abe529af BP |
7569 | if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) { |
7570 | return; | |
7571 | } | |
7572 | ||
7573 | mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan); | |
bcd2633a | 7574 | if (is_gratuitous_arp(flow, wc)) { |
abe529af BP |
7575 | /* We don't want to learn from gratuitous ARP packets that are |
7576 | * reflected back over bond slaves so we lock the learning table. */ | |
7577 | if (!in_bundle->bond) { | |
7578 | mac_entry_set_grat_arp_lock(mac); | |
7579 | } else if (mac_entry_is_grat_arp_locked(mac)) { | |
7580 | return; | |
7581 | } | |
7582 | } | |
7583 | ||
7584 | if (mac_entry_is_new(mac) || mac->port.p != in_bundle) { | |
7585 | /* The log messages here could actually be useful in debugging, | |
7586 | * so keep the rate limit relatively high. */ | |
7587 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300); | |
7588 | VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is " | |
7589 | "on port %s in VLAN %d", | |
7590 | ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src), | |
7591 | in_bundle->name, vlan); | |
7592 | ||
7593 | mac->port.p = in_bundle; | |
2cc3c58e | 7594 | tag_set_add(&ofproto->backer->revalidate_set, |
abe529af BP |
7595 | mac_learning_changed(ofproto->ml, mac)); |
7596 | } | |
7597 | } | |
7598 | ||
3581c12c | 7599 | static struct ofbundle * |
4acbc98d SH |
7600 | lookup_input_bundle(const struct ofproto_dpif *ofproto, uint16_t in_port, |
7601 | bool warn, struct ofport_dpif **in_ofportp) | |
395e68ce BP |
7602 | { |
7603 | struct ofport_dpif *ofport; | |
7604 | ||
7605 | /* Find the port and bundle for the received packet. */ | |
7606 | ofport = get_ofp_port(ofproto, in_port); | |
70c2fd56 BP |
7607 | if (in_ofportp) { |
7608 | *in_ofportp = ofport; | |
7609 | } | |
395e68ce | 7610 | if (ofport && ofport->bundle) { |
3581c12c | 7611 | return ofport->bundle; |
395e68ce BP |
7612 | } |
7613 | ||
70c2fd56 BP |
7614 | /* Special-case OFPP_NONE, which a controller may use as the ingress |
7615 | * port for traffic that it is sourcing. */ | |
7616 | if (in_port == OFPP_NONE) { | |
7617 | return &ofpp_none_bundle; | |
7618 | } | |
7619 | ||
395e68ce BP |
7620 | /* Odd. A few possible reasons here: |
7621 | * | |
7622 | * - We deleted a port but there are still a few packets queued up | |
7623 | * from it. | |
7624 | * | |
7625 | * - Someone externally added a port (e.g. "ovs-dpctl add-if") that | |
7626 | * we don't know about. | |
7627 | * | |
7628 | * - The ofproto client didn't configure the port as part of a bundle. | |
6b803ddc EJ |
7629 | * This is particularly likely to happen if a packet was received on the |
7630 | * port after it was created, but before the client had a chance to | |
7631 | * configure its bundle. | |
395e68ce BP |
7632 | */ |
7633 | if (warn) { | |
7634 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
7635 | ||
7636 | VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown " | |
7637 | "port %"PRIu16, ofproto->up.name, in_port); | |
7638 | } | |
7639 | return NULL; | |
7640 | } | |
7641 | ||
5da5ec37 | 7642 | /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or |
abe529af BP |
7643 | * dropped. Returns true if they may be forwarded, false if they should be |
7644 | * dropped. | |
7645 | * | |
395e68ce BP |
7646 | * 'in_port' must be the ofport_dpif that corresponds to flow->in_port. |
7647 | * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull). | |
abe529af | 7648 | * |
395e68ce BP |
7649 | * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as |
7650 | * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as | |
7651 | * checked by input_vid_is_valid(). | |
abe529af BP |
7652 | * |
7653 | * May also add tags to '*tags', although the current implementation only does | |
7654 | * so in one special case. | |
7655 | */ | |
7656 | static bool | |
f03a84b9 | 7657 | is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port, |
479df176 | 7658 | uint16_t vlan) |
abe529af | 7659 | { |
479df176 | 7660 | struct ofproto_dpif *ofproto = ctx->ofproto; |
bbafd73b | 7661 | struct flow *flow = &ctx->xin->flow; |
395e68ce | 7662 | struct ofbundle *in_bundle = in_port->bundle; |
abe529af | 7663 | |
395e68ce BP |
7664 | /* Drop frames for reserved multicast addresses |
7665 | * only if forward_bpdu option is absent. */ | |
614ec445 | 7666 | if (!ofproto->up.forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) { |
479df176 | 7667 | xlate_report(ctx, "packet has reserved destination MAC, dropping"); |
abe529af BP |
7668 | return false; |
7669 | } | |
7670 | ||
abe529af BP |
7671 | if (in_bundle->bond) { |
7672 | struct mac_entry *mac; | |
7673 | ||
7674 | switch (bond_check_admissibility(in_bundle->bond, in_port, | |
bbafd73b | 7675 | flow->dl_dst, &ctx->xout->tags)) { |
abe529af BP |
7676 | case BV_ACCEPT: |
7677 | break; | |
7678 | ||
7679 | case BV_DROP: | |
479df176 | 7680 | xlate_report(ctx, "bonding refused admissibility, dropping"); |
abe529af BP |
7681 | return false; |
7682 | ||
7683 | case BV_DROP_IF_MOVED: | |
7684 | mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL); | |
7685 | if (mac && mac->port.p != in_bundle && | |
bcd2633a | 7686 | (!is_gratuitous_arp(flow, &ctx->xout->wc) |
abe529af | 7687 | || mac_entry_is_grat_arp_locked(mac))) { |
479df176 BP |
7688 | xlate_report(ctx, "SLB bond thinks this packet looped back, " |
7689 | "dropping"); | |
abe529af BP |
7690 | return false; |
7691 | } | |
7692 | break; | |
7693 | } | |
7694 | } | |
7695 | ||
7696 | return true; | |
7697 | } | |
7698 | ||
4cd78906 | 7699 | static void |
f03a84b9 | 7700 | xlate_normal(struct xlate_ctx *ctx) |
abe529af | 7701 | { |
395e68ce | 7702 | struct ofport_dpif *in_port; |
abe529af | 7703 | struct ofbundle *in_bundle; |
abe529af | 7704 | struct mac_entry *mac; |
395e68ce BP |
7705 | uint16_t vlan; |
7706 | uint16_t vid; | |
abe529af | 7707 | |
bbafd73b | 7708 | ctx->xout->has_normal = true; |
75a75043 | 7709 | |
bcd2633a JP |
7710 | memset(&ctx->xout->wc.masks.dl_src, 0xff, |
7711 | sizeof ctx->xout->wc.masks.dl_src); | |
7712 | memset(&ctx->xout->wc.masks.dl_dst, 0xff, | |
7713 | sizeof ctx->xout->wc.masks.dl_dst); | |
7714 | memset(&ctx->xout->wc.masks.vlan_tci, 0xff, | |
7715 | sizeof ctx->xout->wc.masks.vlan_tci); | |
7716 | ||
bbafd73b EJ |
7717 | in_bundle = lookup_input_bundle(ctx->ofproto, ctx->xin->flow.in_port, |
7718 | ctx->xin->packet != NULL, &in_port); | |
3581c12c | 7719 | if (!in_bundle) { |
479df176 | 7720 | xlate_report(ctx, "no input bundle, dropping"); |
395e68ce BP |
7721 | return; |
7722 | } | |
3581c12c | 7723 | |
395e68ce | 7724 | /* Drop malformed frames. */ |
bbafd73b EJ |
7725 | if (ctx->xin->flow.dl_type == htons(ETH_TYPE_VLAN) && |
7726 | !(ctx->xin->flow.vlan_tci & htons(VLAN_CFI))) { | |
7727 | if (ctx->xin->packet != NULL) { | |
395e68ce BP |
7728 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
7729 | VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial " | |
7730 | "VLAN tag received on port %s", | |
7731 | ctx->ofproto->up.name, in_bundle->name); | |
7732 | } | |
479df176 | 7733 | xlate_report(ctx, "partial VLAN tag, dropping"); |
395e68ce BP |
7734 | return; |
7735 | } | |
7736 | ||
7737 | /* Drop frames on bundles reserved for mirroring. */ | |
7738 | if (in_bundle->mirror_out) { | |
bbafd73b | 7739 | if (ctx->xin->packet != NULL) { |
395e68ce BP |
7740 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
7741 | VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port " | |
7742 | "%s, which is reserved exclusively for mirroring", | |
7743 | ctx->ofproto->up.name, in_bundle->name); | |
7744 | } | |
479df176 | 7745 | xlate_report(ctx, "input port is mirror output port, dropping"); |
395e68ce BP |
7746 | return; |
7747 | } | |
7748 | ||
7749 | /* Check VLAN. */ | |
bbafd73b EJ |
7750 | vid = vlan_tci_to_vid(ctx->xin->flow.vlan_tci); |
7751 | if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) { | |
479df176 | 7752 | xlate_report(ctx, "disallowed VLAN VID for this input port, dropping"); |
395e68ce BP |
7753 | return; |
7754 | } | |
7755 | vlan = input_vid_to_vlan(in_bundle, vid); | |
7756 | ||
7757 | /* Check other admissibility requirements. */ | |
479df176 | 7758 | if (in_port && !is_admissible(ctx, in_port, vlan)) { |
395e68ce | 7759 | return; |
abe529af BP |
7760 | } |
7761 | ||
75a75043 | 7762 | /* Learn source MAC. */ |
bbafd73b | 7763 | if (ctx->xin->may_learn) { |
bcd2633a JP |
7764 | update_learning_table(ctx->ofproto, &ctx->xin->flow, &ctx->xout->wc, |
7765 | vlan, in_bundle); | |
abe529af BP |
7766 | } |
7767 | ||
7768 | /* Determine output bundle. */ | |
bbafd73b EJ |
7769 | mac = mac_learning_lookup(ctx->ofproto->ml, ctx->xin->flow.dl_dst, vlan, |
7770 | &ctx->xout->tags); | |
abe529af | 7771 | if (mac) { |
c06bba01 | 7772 | if (mac->port.p != in_bundle) { |
479df176 | 7773 | xlate_report(ctx, "forwarding to learned port"); |
c06bba01 | 7774 | output_normal(ctx, mac->port.p, vlan); |
479df176 BP |
7775 | } else { |
7776 | xlate_report(ctx, "learned port is input port, dropping"); | |
c06bba01 | 7777 | } |
abe529af | 7778 | } else { |
c06bba01 | 7779 | struct ofbundle *bundle; |
abe529af | 7780 | |
479df176 | 7781 | xlate_report(ctx, "no learned MAC for destination, flooding"); |
c06bba01 JP |
7782 | HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) { |
7783 | if (bundle != in_bundle | |
7784 | && ofbundle_includes_vlan(bundle, vlan) | |
7785 | && bundle->floodable | |
7786 | && !bundle->mirror_out) { | |
7787 | output_normal(ctx, bundle, vlan); | |
7788 | } | |
7789 | } | |
bbafd73b | 7790 | ctx->xout->nf_output_iface = NF_OUT_FLOOD; |
abe529af | 7791 | } |
abe529af BP |
7792 | } |
7793 | \f | |
54a9cbc9 BP |
7794 | /* Optimized flow revalidation. |
7795 | * | |
7796 | * It's a difficult problem, in general, to tell which facets need to have | |
7797 | * their actions recalculated whenever the OpenFlow flow table changes. We | |
7798 | * don't try to solve that general problem: for most kinds of OpenFlow flow | |
7799 | * table changes, we recalculate the actions for every facet. This is | |
7800 | * relatively expensive, but it's good enough if the OpenFlow flow table | |
7801 | * doesn't change very often. | |
7802 | * | |
7803 | * However, we can expect one particular kind of OpenFlow flow table change to | |
7804 | * happen frequently: changes caused by MAC learning. To avoid wasting a lot | |
7805 | * of CPU on revalidating every facet whenever MAC learning modifies the flow | |
7806 | * table, we add a special case that applies to flow tables in which every rule | |
7807 | * has the same form (that is, the same wildcards), except that the table is | |
7808 | * also allowed to have a single "catch-all" flow that matches all packets. We | |
7809 | * optimize this case by tagging all of the facets that resubmit into the table | |
7810 | * and invalidating the same tag whenever a flow changes in that table. The | |
7811 | * end result is that we revalidate just the facets that need it (and sometimes | |
7812 | * a few more, but not all of the facets or even all of the facets that | |
7813 | * resubmit to the table modified by MAC learning). */ | |
7814 | ||
5cb7a798 | 7815 | /* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted |
54a9cbc9 | 7816 | * into an OpenFlow table with the given 'basis'. */ |
822d9414 | 7817 | static tag_type |
5cb7a798 | 7818 | rule_calculate_tag(const struct flow *flow, const struct minimask *mask, |
54a9cbc9 BP |
7819 | uint32_t secret) |
7820 | { | |
5cb7a798 | 7821 | if (minimask_is_catchall(mask)) { |
54a9cbc9 BP |
7822 | return 0; |
7823 | } else { | |
5cb7a798 BP |
7824 | uint32_t hash = flow_hash_in_minimask(flow, mask, secret); |
7825 | return tag_create_deterministic(hash); | |
54a9cbc9 BP |
7826 | } |
7827 | } | |
7828 | ||
7829 | /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the | |
7830 | * taggability of that table. | |
7831 | * | |
7832 | * This function must be called after *each* change to a flow table. If you | |
7833 | * skip calling it on some changes then the pointer comparisons at the end can | |
7834 | * be invalid if you get unlucky. For example, if a flow removal causes a | |
7835 | * cls_table to be destroyed and then a flow insertion causes a cls_table with | |
7836 | * different wildcards to be created with the same address, then this function | |
7837 | * will incorrectly skip revalidation. */ | |
7838 | static void | |
7839 | table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id) | |
7840 | { | |
7841 | struct table_dpif *table = &ofproto->tables[table_id]; | |
d0918789 | 7842 | const struct oftable *oftable = &ofproto->up.tables[table_id]; |
54a9cbc9 BP |
7843 | struct cls_table *catchall, *other; |
7844 | struct cls_table *t; | |
7845 | ||
7846 | catchall = other = NULL; | |
7847 | ||
d0918789 | 7848 | switch (hmap_count(&oftable->cls.tables)) { |
54a9cbc9 BP |
7849 | case 0: |
7850 | /* We could tag this OpenFlow table but it would make the logic a | |
7851 | * little harder and it's a corner case that doesn't seem worth it | |
7852 | * yet. */ | |
7853 | break; | |
7854 | ||
7855 | case 1: | |
7856 | case 2: | |
d0918789 | 7857 | HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) { |
54a9cbc9 BP |
7858 | if (cls_table_is_catchall(t)) { |
7859 | catchall = t; | |
7860 | } else if (!other) { | |
7861 | other = t; | |
7862 | } else { | |
7863 | /* Indicate that we can't tag this by setting both tables to | |
7864 | * NULL. (We know that 'catchall' is already NULL.) */ | |
7865 | other = NULL; | |
7866 | } | |
7867 | } | |
7868 | break; | |
7869 | ||
7870 | default: | |
7871 | /* Can't tag this table. */ | |
7872 | break; | |
7873 | } | |
7874 | ||
7875 | if (table->catchall_table != catchall || table->other_table != other) { | |
7876 | table->catchall_table = catchall; | |
7877 | table->other_table = other; | |
2cc3c58e | 7878 | ofproto->backer->need_revalidate = REV_FLOW_TABLE; |
54a9cbc9 BP |
7879 | } |
7880 | } | |
7881 | ||
7882 | /* Given 'rule' that has changed in some way (either it is a rule being | |
7883 | * inserted, a rule being deleted, or a rule whose actions are being | |
7884 | * modified), marks facets for revalidation to ensure that packets will be | |
7885 | * forwarded correctly according to the new state of the flow table. | |
7886 | * | |
7887 | * This function must be called after *each* change to a flow table. See | |
7888 | * the comment on table_update_taggable() for more information. */ | |
7889 | static void | |
7890 | rule_invalidate(const struct rule_dpif *rule) | |
7891 | { | |
7892 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); | |
7893 | ||
7894 | table_update_taggable(ofproto, rule->up.table_id); | |
7895 | ||
2cc3c58e | 7896 | if (!ofproto->backer->need_revalidate) { |
54a9cbc9 BP |
7897 | struct table_dpif *table = &ofproto->tables[rule->up.table_id]; |
7898 | ||
7899 | if (table->other_table && rule->tag) { | |
2cc3c58e | 7900 | tag_set_add(&ofproto->backer->revalidate_set, rule->tag); |
54a9cbc9 | 7901 | } else { |
2cc3c58e | 7902 | ofproto->backer->need_revalidate = REV_FLOW_TABLE; |
54a9cbc9 BP |
7903 | } |
7904 | } | |
7905 | } | |
7906 | \f | |
abe529af | 7907 | static bool |
7257b535 BP |
7908 | set_frag_handling(struct ofproto *ofproto_, |
7909 | enum ofp_config_flags frag_handling) | |
abe529af BP |
7910 | { |
7911 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
7257b535 | 7912 | if (frag_handling != OFPC_FRAG_REASM) { |
2cc3c58e | 7913 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
7257b535 BP |
7914 | return true; |
7915 | } else { | |
7916 | return false; | |
7917 | } | |
abe529af BP |
7918 | } |
7919 | ||
90bf1e07 | 7920 | static enum ofperr |
abe529af BP |
7921 | packet_out(struct ofproto *ofproto_, struct ofpbuf *packet, |
7922 | const struct flow *flow, | |
f25d0cf3 | 7923 | const struct ofpact *ofpacts, size_t ofpacts_len) |
abe529af BP |
7924 | { |
7925 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
14f94f9a | 7926 | struct initial_vals initial_vals; |
548de4dd BP |
7927 | struct odputil_keybuf keybuf; |
7928 | struct dpif_flow_stats stats; | |
bbafd73b EJ |
7929 | struct xlate_out xout; |
7930 | struct xlate_in xin; | |
548de4dd | 7931 | struct ofpbuf key; |
112bc5f4 | 7932 | |
80e5eed9 | 7933 | |
548de4dd | 7934 | ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); |
e1b1d06a JP |
7935 | odp_flow_key_from_flow(&key, flow, |
7936 | ofp_port_to_odp_port(ofproto, flow->in_port)); | |
050ac423 | 7937 | |
548de4dd | 7938 | dpif_flow_stats_extract(flow, packet, time_msec(), &stats); |
abe529af | 7939 | |
14f94f9a | 7940 | initial_vals.vlan_tci = flow->vlan_tci; |
bbafd73b EJ |
7941 | xlate_in_init(&xin, ofproto, flow, &initial_vals, NULL, stats.tcp_flags, |
7942 | packet); | |
7943 | xin.resubmit_stats = &stats; | |
7944 | xin.ofpacts_len = ofpacts_len; | |
7945 | xin.ofpacts = ofpacts; | |
2284188b | 7946 | |
bbafd73b | 7947 | xlate_actions(&xin, &xout); |
acf60855 | 7948 | dpif_execute(ofproto->backer->dpif, key.data, key.size, |
bbafd73b EJ |
7949 | xout.odp_actions.data, xout.odp_actions.size, packet); |
7950 | xlate_out_uninit(&xout); | |
2284188b | 7951 | |
548de4dd | 7952 | return 0; |
abe529af | 7953 | } |
6fca1ffb BP |
7954 | \f |
7955 | /* NetFlow. */ | |
7956 | ||
7957 | static int | |
7958 | set_netflow(struct ofproto *ofproto_, | |
7959 | const struct netflow_options *netflow_options) | |
7960 | { | |
7961 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
7962 | ||
7963 | if (netflow_options) { | |
7964 | if (!ofproto->netflow) { | |
7965 | ofproto->netflow = netflow_create(); | |
7966 | } | |
7967 | return netflow_set_options(ofproto->netflow, netflow_options); | |
7968 | } else { | |
7969 | netflow_destroy(ofproto->netflow); | |
7970 | ofproto->netflow = NULL; | |
7971 | return 0; | |
7972 | } | |
7973 | } | |
abe529af BP |
7974 | |
7975 | static void | |
7976 | get_netflow_ids(const struct ofproto *ofproto_, | |
7977 | uint8_t *engine_type, uint8_t *engine_id) | |
7978 | { | |
7979 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); | |
7980 | ||
acf60855 | 7981 | dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id); |
abe529af | 7982 | } |
6fca1ffb BP |
7983 | |
7984 | static void | |
7985 | send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet) | |
7986 | { | |
7987 | if (!facet_is_controller_flow(facet) && | |
7988 | netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) { | |
b0f7b9b5 | 7989 | struct subfacet *subfacet; |
6fca1ffb BP |
7990 | struct ofexpired expired; |
7991 | ||
b0f7b9b5 | 7992 | LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) { |
6a7e895f | 7993 | if (subfacet->path == SF_FAST_PATH) { |
b0f7b9b5 | 7994 | struct dpif_flow_stats stats; |
6fca1ffb | 7995 | |
bcd2633a JP |
7996 | subfacet_install(subfacet, &facet->xout.odp_actions, |
7997 | &stats); | |
15baa734 | 7998 | subfacet_update_stats(subfacet, &stats); |
b0f7b9b5 | 7999 | } |
6fca1ffb BP |
8000 | } |
8001 | ||
8002 | expired.flow = facet->flow; | |
8003 | expired.packet_count = facet->packet_count; | |
8004 | expired.byte_count = facet->byte_count; | |
8005 | expired.used = facet->used; | |
8006 | netflow_expire(ofproto->netflow, &facet->nf_flow, &expired); | |
8007 | } | |
8008 | } | |
8009 | ||
8010 | static void | |
8011 | send_netflow_active_timeouts(struct ofproto_dpif *ofproto) | |
8012 | { | |
bcd2633a | 8013 | struct cls_cursor cursor; |
6fca1ffb BP |
8014 | struct facet *facet; |
8015 | ||
bcd2633a JP |
8016 | cls_cursor_init(&cursor, &ofproto->facets, NULL); |
8017 | CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { | |
6fca1ffb BP |
8018 | send_active_timeout(ofproto, facet); |
8019 | } | |
8020 | } | |
abe529af BP |
8021 | \f |
8022 | static struct ofproto_dpif * | |
8023 | ofproto_dpif_lookup(const char *name) | |
8024 | { | |
b44a10b7 BP |
8025 | struct ofproto_dpif *ofproto; |
8026 | ||
8027 | HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node, | |
8028 | hash_string(name, 0), &all_ofproto_dpifs) { | |
8029 | if (!strcmp(ofproto->up.name, name)) { | |
8030 | return ofproto; | |
8031 | } | |
8032 | } | |
8033 | return NULL; | |
abe529af BP |
8034 | } |
8035 | ||
f0a3aa2e | 8036 | static void |
96e466a3 | 8037 | ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc, |
0e15264f | 8038 | const char *argv[], void *aux OVS_UNUSED) |
f0a3aa2e | 8039 | { |
490df1ef | 8040 | struct ofproto_dpif *ofproto; |
f0a3aa2e | 8041 | |
96e466a3 EJ |
8042 | if (argc > 1) { |
8043 | ofproto = ofproto_dpif_lookup(argv[1]); | |
8044 | if (!ofproto) { | |
bde9f75d | 8045 | unixctl_command_reply_error(conn, "no such bridge"); |
96e466a3 EJ |
8046 | return; |
8047 | } | |
2cc3c58e | 8048 | mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); |
96e466a3 EJ |
8049 | } else { |
8050 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
2cc3c58e | 8051 | mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); |
96e466a3 | 8052 | } |
f0a3aa2e | 8053 | } |
f0a3aa2e | 8054 | |
bde9f75d | 8055 | unixctl_command_reply(conn, "table successfully flushed"); |
f0a3aa2e AA |
8056 | } |
8057 | ||
abe529af | 8058 | static void |
0e15264f BP |
8059 | ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED, |
8060 | const char *argv[], void *aux OVS_UNUSED) | |
abe529af BP |
8061 | { |
8062 | struct ds ds = DS_EMPTY_INITIALIZER; | |
8063 | const struct ofproto_dpif *ofproto; | |
8064 | const struct mac_entry *e; | |
8065 | ||
0e15264f | 8066 | ofproto = ofproto_dpif_lookup(argv[1]); |
abe529af | 8067 | if (!ofproto) { |
bde9f75d | 8068 | unixctl_command_reply_error(conn, "no such bridge"); |
abe529af BP |
8069 | return; |
8070 | } | |
8071 | ||
8072 | ds_put_cstr(&ds, " port VLAN MAC Age\n"); | |
8073 | LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) { | |
8074 | struct ofbundle *bundle = e->port.p; | |
8075 | ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n", | |
8076 | ofbundle_get_a_port(bundle)->odp_port, | |
e764773c BP |
8077 | e->vlan, ETH_ADDR_ARGS(e->mac), |
8078 | mac_entry_age(ofproto->ml, e)); | |
abe529af | 8079 | } |
bde9f75d | 8080 | unixctl_command_reply(conn, ds_cstr(&ds)); |
abe529af BP |
8081 | ds_destroy(&ds); |
8082 | } | |
8083 | ||
6a6455e5 | 8084 | struct trace_ctx { |
bbafd73b EJ |
8085 | struct xlate_out xout; |
8086 | struct xlate_in xin; | |
abe529af BP |
8087 | struct flow flow; |
8088 | struct ds *result; | |
8089 | }; | |
8090 | ||
8091 | static void | |
29901626 BP |
8092 | trace_format_rule(struct ds *result, uint8_t table_id, int level, |
8093 | const struct rule_dpif *rule) | |
abe529af BP |
8094 | { |
8095 | ds_put_char_multiple(result, '\t', level); | |
8096 | if (!rule) { | |
8097 | ds_put_cstr(result, "No match\n"); | |
8098 | return; | |
8099 | } | |
8100 | ||
29901626 BP |
8101 | ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ", |
8102 | table_id, ntohll(rule->up.flow_cookie)); | |
79feb7df | 8103 | cls_rule_format(&rule->up.cr, result); |
abe529af BP |
8104 | ds_put_char(result, '\n'); |
8105 | ||
8106 | ds_put_char_multiple(result, '\t', level); | |
8107 | ds_put_cstr(result, "OpenFlow "); | |
f25d0cf3 | 8108 | ofpacts_format(rule->up.ofpacts, rule->up.ofpacts_len, result); |
abe529af BP |
8109 | ds_put_char(result, '\n'); |
8110 | } | |
8111 | ||
8112 | static void | |
8113 | trace_format_flow(struct ds *result, int level, const char *title, | |
bbafd73b | 8114 | struct trace_ctx *trace) |
abe529af BP |
8115 | { |
8116 | ds_put_char_multiple(result, '\t', level); | |
8117 | ds_put_format(result, "%s: ", title); | |
bbafd73b | 8118 | if (flow_equal(&trace->xin.flow, &trace->flow)) { |
abe529af BP |
8119 | ds_put_cstr(result, "unchanged"); |
8120 | } else { | |
bbafd73b EJ |
8121 | flow_format(result, &trace->xin.flow); |
8122 | trace->flow = trace->xin.flow; | |
abe529af BP |
8123 | } |
8124 | ds_put_char(result, '\n'); | |
8125 | } | |
8126 | ||
eb9e1c26 EJ |
8127 | static void |
8128 | trace_format_regs(struct ds *result, int level, const char *title, | |
6a6455e5 | 8129 | struct trace_ctx *trace) |
eb9e1c26 EJ |
8130 | { |
8131 | size_t i; | |
8132 | ||
8133 | ds_put_char_multiple(result, '\t', level); | |
8134 | ds_put_format(result, "%s:", title); | |
8135 | for (i = 0; i < FLOW_N_REGS; i++) { | |
8136 | ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]); | |
8137 | } | |
8138 | ds_put_char(result, '\n'); | |
8139 | } | |
8140 | ||
1ed8d352 EJ |
8141 | static void |
8142 | trace_format_odp(struct ds *result, int level, const char *title, | |
6a6455e5 | 8143 | struct trace_ctx *trace) |
1ed8d352 | 8144 | { |
bbafd73b | 8145 | struct ofpbuf *odp_actions = &trace->xout.odp_actions; |
1ed8d352 EJ |
8146 | |
8147 | ds_put_char_multiple(result, '\t', level); | |
8148 | ds_put_format(result, "%s: ", title); | |
8149 | format_odp_actions(result, odp_actions->data, odp_actions->size); | |
8150 | ds_put_char(result, '\n'); | |
8151 | } | |
8152 | ||
abe529af | 8153 | static void |
f03a84b9 | 8154 | trace_resubmit(struct xlate_ctx *ctx, struct rule_dpif *rule) |
abe529af | 8155 | { |
bbafd73b | 8156 | struct trace_ctx *trace = CONTAINER_OF(ctx->xin, struct trace_ctx, xin); |
abe529af BP |
8157 | struct ds *result = trace->result; |
8158 | ||
8159 | ds_put_char(result, '\n'); | |
8160 | trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace); | |
eb9e1c26 | 8161 | trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace); |
1ed8d352 | 8162 | trace_format_odp(result, ctx->recurse + 1, "Resubmitted odp", trace); |
29901626 | 8163 | trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule); |
abe529af BP |
8164 | } |
8165 | ||
479df176 | 8166 | static void |
f03a84b9 | 8167 | trace_report(struct xlate_ctx *ctx, const char *s) |
479df176 | 8168 | { |
bbafd73b | 8169 | struct trace_ctx *trace = CONTAINER_OF(ctx->xin, struct trace_ctx, xin); |
479df176 BP |
8170 | struct ds *result = trace->result; |
8171 | ||
8172 | ds_put_char_multiple(result, '\t', ctx->recurse); | |
8173 | ds_put_cstr(result, s); | |
8174 | ds_put_char(result, '\n'); | |
8175 | } | |
8176 | ||
abe529af | 8177 | static void |
0e15264f | 8178 | ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[], |
abe529af BP |
8179 | void *aux OVS_UNUSED) |
8180 | { | |
50aa28fd | 8181 | const struct dpif_backer *backer; |
abe529af | 8182 | struct ofproto_dpif *ofproto; |
876b0e1c BP |
8183 | struct ofpbuf odp_key; |
8184 | struct ofpbuf *packet; | |
14f94f9a | 8185 | struct initial_vals initial_vals; |
abe529af BP |
8186 | struct ds result; |
8187 | struct flow flow; | |
abe529af BP |
8188 | char *s; |
8189 | ||
876b0e1c | 8190 | packet = NULL; |
50aa28fd | 8191 | backer = NULL; |
abe529af | 8192 | ds_init(&result); |
50aa28fd | 8193 | ofpbuf_init(&odp_key, 0); |
abe529af | 8194 | |
50aa28fd AW |
8195 | /* Handle "-generate" or a hex string as the last argument. */ |
8196 | if (!strcmp(argv[argc - 1], "-generate")) { | |
8197 | packet = ofpbuf_new(0); | |
8198 | argc--; | |
8199 | } else { | |
8200 | const char *error = eth_from_hex(argv[argc - 1], &packet); | |
8201 | if (!error) { | |
8202 | argc--; | |
8203 | } else if (argc == 4) { | |
8204 | /* The 3-argument form must end in "-generate' or a hex string. */ | |
8205 | unixctl_command_reply_error(conn, error); | |
8206 | goto exit; | |
8207 | } | |
e84173dc | 8208 | } |
876b0e1c | 8209 | |
50aa28fd AW |
8210 | /* Parse the flow and determine whether a datapath or |
8211 | * bridge is specified. If function odp_flow_key_from_string() | |
8212 | * returns 0, the flow is a odp_flow. If function | |
8213 | * parse_ofp_exact_flow() returns 0, the flow is a br_flow. */ | |
8214 | if (!odp_flow_key_from_string(argv[argc - 1], NULL, &odp_key)) { | |
8215 | /* If the odp_flow is the second argument, | |
8216 | * the datapath name is the first argument. */ | |
8217 | if (argc == 3) { | |
8218 | const char *dp_type; | |
8219 | if (!strncmp(argv[1], "ovs-", 4)) { | |
8220 | dp_type = argv[1] + 4; | |
8221 | } else { | |
8222 | dp_type = argv[1]; | |
31a19d69 | 8223 | } |
50aa28fd AW |
8224 | backer = shash_find_data(&all_dpif_backers, dp_type); |
8225 | if (!backer) { | |
8226 | unixctl_command_reply_error(conn, "Cannot find datapath " | |
8227 | "of this name"); | |
31a19d69 BP |
8228 | goto exit; |
8229 | } | |
8230 | } else { | |
50aa28fd AW |
8231 | /* No datapath name specified, so there should be only one |
8232 | * datapath. */ | |
8233 | struct shash_node *node; | |
8234 | if (shash_count(&all_dpif_backers) != 1) { | |
8235 | unixctl_command_reply_error(conn, "Must specify datapath " | |
8236 | "name, there is more than one type of datapath"); | |
31a19d69 BP |
8237 | goto exit; |
8238 | } | |
50aa28fd AW |
8239 | node = shash_first(&all_dpif_backers); |
8240 | backer = node->data; | |
876b0e1c | 8241 | } |
8b3b8dd1 | 8242 | |
50aa28fd AW |
8243 | /* Extract the ofproto_dpif object from the ofproto_receive() |
8244 | * function. */ | |
8245 | if (ofproto_receive(backer, NULL, odp_key.data, | |
8246 | odp_key.size, &flow, NULL, &ofproto, NULL, | |
8247 | &initial_vals)) { | |
8248 | unixctl_command_reply_error(conn, "Invalid datapath flow"); | |
8249 | goto exit; | |
8b3b8dd1 | 8250 | } |
50aa28fd AW |
8251 | ds_put_format(&result, "Bridge: %s\n", ofproto->up.name); |
8252 | } else if (!parse_ofp_exact_flow(&flow, argv[argc - 1])) { | |
8253 | if (argc != 3) { | |
8254 | unixctl_command_reply_error(conn, "Must specify bridge name"); | |
876b0e1c BP |
8255 | goto exit; |
8256 | } | |
8257 | ||
50aa28fd AW |
8258 | ofproto = ofproto_dpif_lookup(argv[1]); |
8259 | if (!ofproto) { | |
8260 | unixctl_command_reply_error(conn, "Unknown bridge name"); | |
8261 | goto exit; | |
8262 | } | |
14f94f9a | 8263 | initial_vals.vlan_tci = flow.vlan_tci; |
876b0e1c | 8264 | } else { |
50aa28fd | 8265 | unixctl_command_reply_error(conn, "Bad flow syntax"); |
abe529af BP |
8266 | goto exit; |
8267 | } | |
8268 | ||
50aa28fd AW |
8269 | /* Generate a packet, if requested. */ |
8270 | if (packet) { | |
8271 | if (!packet->size) { | |
8272 | flow_compose(packet, &flow); | |
8273 | } else { | |
8274 | ds_put_cstr(&result, "Packet: "); | |
8275 | s = ofp_packet_to_string(packet->data, packet->size); | |
8276 | ds_put_cstr(&result, s); | |
8277 | free(s); | |
8278 | ||
8279 | /* Use the metadata from the flow and the packet argument | |
8280 | * to reconstruct the flow. */ | |
8281 | flow_extract(packet, flow.skb_priority, flow.skb_mark, NULL, | |
8282 | flow.in_port, &flow); | |
8283 | initial_vals.vlan_tci = flow.vlan_tci; | |
8284 | } | |
8285 | } | |
8286 | ||
14f94f9a | 8287 | ofproto_trace(ofproto, &flow, packet, &initial_vals, &result); |
6a6455e5 EJ |
8288 | unixctl_command_reply(conn, ds_cstr(&result)); |
8289 | ||
8290 | exit: | |
8291 | ds_destroy(&result); | |
8292 | ofpbuf_delete(packet); | |
8293 | ofpbuf_uninit(&odp_key); | |
8294 | } | |
8295 | ||
8296 | static void | |
8297 | ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow, | |
14f94f9a JP |
8298 | const struct ofpbuf *packet, |
8299 | const struct initial_vals *initial_vals, struct ds *ds) | |
6a6455e5 EJ |
8300 | { |
8301 | struct rule_dpif *rule; | |
8302 | ||
8303 | ds_put_cstr(ds, "Flow: "); | |
8304 | flow_format(ds, flow); | |
8305 | ds_put_char(ds, '\n'); | |
abe529af | 8306 | |
bcd2633a | 8307 | rule = rule_dpif_lookup(ofproto, flow, NULL); |
c57b2226 | 8308 | |
6a6455e5 | 8309 | trace_format_rule(ds, 0, 0, rule); |
c57b2226 BP |
8310 | if (rule == ofproto->miss_rule) { |
8311 | ds_put_cstr(ds, "\nNo match, flow generates \"packet in\"s.\n"); | |
8312 | } else if (rule == ofproto->no_packet_in_rule) { | |
8313 | ds_put_cstr(ds, "\nNo match, packets dropped because " | |
8314 | "OFPPC_NO_PACKET_IN is set on in_port.\n"); | |
7fd51d39 BP |
8315 | } else if (rule == ofproto->drop_frags_rule) { |
8316 | ds_put_cstr(ds, "\nPackets dropped because they are IP fragments " | |
8317 | "and the fragment handling mode is \"drop\".\n"); | |
c57b2226 BP |
8318 | } |
8319 | ||
abe529af | 8320 | if (rule) { |
050ac423 BP |
8321 | uint64_t odp_actions_stub[1024 / 8]; |
8322 | struct ofpbuf odp_actions; | |
6a6455e5 | 8323 | struct trace_ctx trace; |
bcd2633a | 8324 | struct match match; |
0e553d9c | 8325 | uint8_t tcp_flags; |
abe529af | 8326 | |
6a6455e5 EJ |
8327 | tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0; |
8328 | trace.result = ds; | |
8329 | trace.flow = *flow; | |
050ac423 BP |
8330 | ofpbuf_use_stub(&odp_actions, |
8331 | odp_actions_stub, sizeof odp_actions_stub); | |
bbafd73b EJ |
8332 | xlate_in_init(&trace.xin, ofproto, flow, initial_vals, rule, tcp_flags, |
8333 | packet); | |
8334 | trace.xin.resubmit_hook = trace_resubmit; | |
8335 | trace.xin.report_hook = trace_report; | |
bcd2633a | 8336 | |
bbafd73b | 8337 | xlate_actions(&trace.xin, &trace.xout); |
abe529af | 8338 | |
6a6455e5 EJ |
8339 | ds_put_char(ds, '\n'); |
8340 | trace_format_flow(ds, 0, "Final flow", &trace); | |
bcd2633a JP |
8341 | |
8342 | match_init(&match, flow, &trace.xout.wc); | |
8343 | ds_put_cstr(ds, "Relevant fields: "); | |
8344 | match_format(&match, ds, OFP_DEFAULT_PRIORITY); | |
8345 | ds_put_char(ds, '\n'); | |
8346 | ||
6a6455e5 | 8347 | ds_put_cstr(ds, "Datapath actions: "); |
bbafd73b EJ |
8348 | format_odp_actions(ds, trace.xout.odp_actions.data, |
8349 | trace.xout.odp_actions.size); | |
876b0e1c | 8350 | |
bbafd73b | 8351 | if (trace.xout.slow) { |
6a7e895f BP |
8352 | ds_put_cstr(ds, "\nThis flow is handled by the userspace " |
8353 | "slow path because it:"); | |
bbafd73b | 8354 | switch (trace.xout.slow) { |
98f0520f EJ |
8355 | case SLOW_CFM: |
8356 | ds_put_cstr(ds, "\n\t- Consists of CFM packets."); | |
8357 | break; | |
8358 | case SLOW_LACP: | |
8359 | ds_put_cstr(ds, "\n\t- Consists of LACP packets."); | |
8360 | break; | |
8361 | case SLOW_STP: | |
8362 | ds_put_cstr(ds, "\n\t- Consists of STP packets."); | |
8363 | break; | |
8364 | case SLOW_BFD: | |
8365 | ds_put_cstr(ds, "\n\t- Consists of BFD packets."); | |
8366 | break; | |
8367 | case SLOW_CONTROLLER: | |
8368 | ds_put_cstr(ds, "\n\t- Sends \"packet-in\" messages " | |
8369 | "to the OpenFlow controller."); | |
8370 | break; | |
8371 | case __SLOW_MAX: | |
8372 | NOT_REACHED(); | |
6a7e895f | 8373 | } |
876b0e1c | 8374 | } |
bbafd73b EJ |
8375 | |
8376 | xlate_out_uninit(&trace.xout); | |
abe529af | 8377 | } |
abe529af BP |
8378 | } |
8379 | ||
7ee20df1 | 8380 | static void |
0e15264f BP |
8381 | ofproto_dpif_clog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED, |
8382 | const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) | |
7ee20df1 BP |
8383 | { |
8384 | clogged = true; | |
bde9f75d | 8385 | unixctl_command_reply(conn, NULL); |
7ee20df1 BP |
8386 | } |
8387 | ||
8388 | static void | |
0e15264f BP |
8389 | ofproto_dpif_unclog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED, |
8390 | const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) | |
7ee20df1 BP |
8391 | { |
8392 | clogged = false; | |
bde9f75d | 8393 | unixctl_command_reply(conn, NULL); |
7ee20df1 BP |
8394 | } |
8395 | ||
6814e51f BP |
8396 | /* Runs a self-check of flow translations in 'ofproto'. Appends a message to |
8397 | * 'reply' describing the results. */ | |
8398 | static void | |
8399 | ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply) | |
8400 | { | |
bcd2633a | 8401 | struct cls_cursor cursor; |
6814e51f BP |
8402 | struct facet *facet; |
8403 | int errors; | |
8404 | ||
8405 | errors = 0; | |
bcd2633a JP |
8406 | cls_cursor_init(&cursor, &ofproto->facets, NULL); |
8407 | CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { | |
6814e51f BP |
8408 | if (!facet_check_consistency(facet)) { |
8409 | errors++; | |
8410 | } | |
8411 | } | |
8412 | if (errors) { | |
2cc3c58e | 8413 | ofproto->backer->need_revalidate = REV_INCONSISTENCY; |
6814e51f BP |
8414 | } |
8415 | ||
8416 | if (errors) { | |
8417 | ds_put_format(reply, "%s: self-check failed (%d errors)\n", | |
8418 | ofproto->up.name, errors); | |
8419 | } else { | |
8420 | ds_put_format(reply, "%s: self-check passed\n", ofproto->up.name); | |
8421 | } | |
8422 | } | |
8423 | ||
8424 | static void | |
8425 | ofproto_dpif_self_check(struct unixctl_conn *conn, | |
8426 | int argc, const char *argv[], void *aux OVS_UNUSED) | |
8427 | { | |
8428 | struct ds reply = DS_EMPTY_INITIALIZER; | |
8429 | struct ofproto_dpif *ofproto; | |
8430 | ||
8431 | if (argc > 1) { | |
8432 | ofproto = ofproto_dpif_lookup(argv[1]); | |
8433 | if (!ofproto) { | |
bde9f75d EJ |
8434 | unixctl_command_reply_error(conn, "Unknown ofproto (use " |
8435 | "ofproto/list for help)"); | |
6814e51f BP |
8436 | return; |
8437 | } | |
8438 | ofproto_dpif_self_check__(ofproto, &reply); | |
8439 | } else { | |
8440 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
8441 | ofproto_dpif_self_check__(ofproto, &reply); | |
8442 | } | |
8443 | } | |
8444 | ||
bde9f75d | 8445 | unixctl_command_reply(conn, ds_cstr(&reply)); |
6814e51f BP |
8446 | ds_destroy(&reply); |
8447 | } | |
8448 | ||
27022416 JP |
8449 | /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list |
8450 | * of the 'ofproto_shash' nodes. It is the responsibility of the caller | |
8451 | * to destroy 'ofproto_shash' and free the returned value. */ | |
8452 | static const struct shash_node ** | |
8453 | get_ofprotos(struct shash *ofproto_shash) | |
8454 | { | |
8455 | const struct ofproto_dpif *ofproto; | |
8456 | ||
8457 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { | |
8458 | char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name); | |
8459 | shash_add_nocopy(ofproto_shash, name, ofproto); | |
8460 | } | |
8461 | ||
8462 | return shash_sort(ofproto_shash); | |
8463 | } | |
8464 | ||
8465 | static void | |
8466 | ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED, | |
8467 | const char *argv[] OVS_UNUSED, | |
8468 | void *aux OVS_UNUSED) | |
8469 | { | |
8470 | struct ds ds = DS_EMPTY_INITIALIZER; | |
8471 | struct shash ofproto_shash; | |
8472 | const struct shash_node **sorted_ofprotos; | |
8473 | int i; | |
8474 | ||
8475 | shash_init(&ofproto_shash); | |
8476 | sorted_ofprotos = get_ofprotos(&ofproto_shash); | |
8477 | for (i = 0; i < shash_count(&ofproto_shash); i++) { | |
8478 | const struct shash_node *node = sorted_ofprotos[i]; | |
8479 | ds_put_format(&ds, "%s\n", node->name); | |
8480 | } | |
8481 | ||
8482 | shash_destroy(&ofproto_shash); | |
8483 | free(sorted_ofprotos); | |
8484 | ||
8485 | unixctl_command_reply(conn, ds_cstr(&ds)); | |
8486 | ds_destroy(&ds); | |
8487 | } | |
8488 | ||
8489 | static void | |
dc54ef36 EJ |
8490 | show_dp_rates(struct ds *ds, const char *heading, |
8491 | const struct avg_subfacet_rates *rates) | |
8492 | { | |
8493 | ds_put_format(ds, "%s add rate: %5.3f/min, del rate: %5.3f/min\n", | |
8494 | heading, rates->add_rate, rates->del_rate); | |
8495 | } | |
8496 | ||
8497 | static void | |
8498 | dpif_show_backer(const struct dpif_backer *backer, struct ds *ds) | |
27022416 | 8499 | { |
dc54ef36 EJ |
8500 | const struct shash_node **ofprotos; |
8501 | struct ofproto_dpif *ofproto; | |
8502 | struct shash ofproto_shash; | |
09672174 | 8503 | uint64_t n_hit, n_missed; |
dc54ef36 | 8504 | long long int minutes; |
09672174 | 8505 | size_t i; |
655ab909 | 8506 | |
04d08d54 | 8507 | n_hit = n_missed = 0; |
dc54ef36 EJ |
8508 | HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { |
8509 | if (ofproto->backer == backer) { | |
dc54ef36 EJ |
8510 | n_missed += ofproto->n_missed; |
8511 | n_hit += ofproto->n_hit; | |
8512 | } | |
8513 | } | |
655ab909 | 8514 | |
dc54ef36 EJ |
8515 | ds_put_format(ds, "%s: hit:%"PRIu64" missed:%"PRIu64"\n", |
8516 | dpif_name(backer->dpif), n_hit, n_missed); | |
8517 | ds_put_format(ds, "\tflows: cur: %zu, avg: %u, max: %u," | |
04d08d54 | 8518 | " life span: %lldms\n", hmap_count(&backer->subfacets), |
dc54ef36 EJ |
8519 | backer->avg_n_subfacet, backer->max_n_subfacet, |
8520 | backer->avg_subfacet_life); | |
8521 | ||
8522 | minutes = (time_msec() - backer->created) / (1000 * 60); | |
655ab909 | 8523 | if (minutes >= 60) { |
dc54ef36 | 8524 | show_dp_rates(ds, "\thourly avg:", &backer->hourly); |
655ab909 AZ |
8525 | } |
8526 | if (minutes >= 60 * 24) { | |
dc54ef36 | 8527 | show_dp_rates(ds, "\tdaily avg:", &backer->daily); |
655ab909 | 8528 | } |
dc54ef36 | 8529 | show_dp_rates(ds, "\toverall avg:", &backer->lifetime); |
27022416 | 8530 | |
dc54ef36 EJ |
8531 | shash_init(&ofproto_shash); |
8532 | ofprotos = get_ofprotos(&ofproto_shash); | |
8533 | for (i = 0; i < shash_count(&ofproto_shash); i++) { | |
8534 | struct ofproto_dpif *ofproto = ofprotos[i]->data; | |
8535 | const struct shash_node **ports; | |
8536 | size_t j; | |
0a740f48 | 8537 | |
dc54ef36 EJ |
8538 | if (ofproto->backer != backer) { |
8539 | continue; | |
0a740f48 | 8540 | } |
27022416 | 8541 | |
dc54ef36 EJ |
8542 | ds_put_format(ds, "\t%s: hit:%"PRIu64" missed:%"PRIu64"\n", |
8543 | ofproto->up.name, ofproto->n_hit, ofproto->n_missed); | |
8544 | ||
8545 | ports = shash_sort(&ofproto->up.port_by_name); | |
8546 | for (j = 0; j < shash_count(&ofproto->up.port_by_name); j++) { | |
8547 | const struct shash_node *node = ports[j]; | |
8548 | struct ofport *ofport = node->data; | |
8549 | struct smap config; | |
8550 | uint32_t odp_port; | |
27022416 | 8551 | |
dc54ef36 EJ |
8552 | ds_put_format(ds, "\t\t%s %u/", netdev_get_name(ofport->netdev), |
8553 | ofport->ofp_port); | |
27022416 | 8554 | |
dc54ef36 EJ |
8555 | odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port); |
8556 | if (odp_port != OVSP_NONE) { | |
8557 | ds_put_format(ds, "%"PRIu32":", odp_port); | |
8558 | } else { | |
8559 | ds_put_cstr(ds, "none:"); | |
8560 | } | |
27022416 | 8561 | |
dc54ef36 | 8562 | ds_put_format(ds, " (%s", netdev_get_type(ofport->netdev)); |
27022416 | 8563 | |
dc54ef36 EJ |
8564 | smap_init(&config); |
8565 | if (!netdev_get_config(ofport->netdev, &config)) { | |
8566 | const struct smap_node **nodes; | |
8567 | size_t i; | |
27022416 | 8568 | |
dc54ef36 EJ |
8569 | nodes = smap_sort(&config); |
8570 | for (i = 0; i < smap_count(&config); i++) { | |
8571 | const struct smap_node *node = nodes[i]; | |
8572 | ds_put_format(ds, "%c %s=%s", i ? ',' : ':', | |
8573 | node->key, node->value); | |
8574 | } | |
8575 | free(nodes); | |
27022416 | 8576 | } |
dc54ef36 EJ |
8577 | smap_destroy(&config); |
8578 | ||
27022416 | 8579 | ds_put_char(ds, ')'); |
dc54ef36 | 8580 | ds_put_char(ds, '\n'); |
27022416 | 8581 | } |
dc54ef36 | 8582 | free(ports); |
27022416 | 8583 | } |
dc54ef36 EJ |
8584 | shash_destroy(&ofproto_shash); |
8585 | free(ofprotos); | |
27022416 JP |
8586 | } |
8587 | ||
8588 | static void | |
dc54ef36 EJ |
8589 | ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc OVS_UNUSED, |
8590 | const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) | |
27022416 JP |
8591 | { |
8592 | struct ds ds = DS_EMPTY_INITIALIZER; | |
dc54ef36 EJ |
8593 | const struct shash_node **backers; |
8594 | int i; | |
27022416 | 8595 | |
dc54ef36 EJ |
8596 | backers = shash_sort(&all_dpif_backers); |
8597 | for (i = 0; i < shash_count(&all_dpif_backers); i++) { | |
8598 | dpif_show_backer(backers[i]->data, &ds); | |
27022416 | 8599 | } |
dc54ef36 | 8600 | free(backers); |
27022416 JP |
8601 | |
8602 | unixctl_command_reply(conn, ds_cstr(&ds)); | |
8603 | ds_destroy(&ds); | |
8604 | } | |
8605 | ||
bcd2633a JP |
8606 | /* Dump the megaflow (facet) cache. This is useful to check the |
8607 | * correctness of flow wildcarding, since the same mechanism is used for | |
8608 | * both xlate caching and kernel wildcarding. | |
8609 | * | |
8610 | * It's important to note that in the output the flow description uses | |
8611 | * OpenFlow (OFP) ports, but the actions use datapath (ODP) ports. | |
8612 | * | |
8613 | * This command is only needed for advanced debugging, so it's not | |
8614 | * documented in the man page. */ | |
8615 | static void | |
8616 | ofproto_unixctl_dpif_dump_megaflows(struct unixctl_conn *conn, | |
8617 | int argc OVS_UNUSED, const char *argv[], | |
8618 | void *aux OVS_UNUSED) | |
8619 | { | |
8620 | struct ds ds = DS_EMPTY_INITIALIZER; | |
8621 | const struct ofproto_dpif *ofproto; | |
8622 | long long int now = time_msec(); | |
8623 | struct cls_cursor cursor; | |
8624 | struct facet *facet; | |
8625 | ||
8626 | ofproto = ofproto_dpif_lookup(argv[1]); | |
8627 | if (!ofproto) { | |
8628 | unixctl_command_reply_error(conn, "no such bridge"); | |
8629 | return; | |
8630 | } | |
8631 | ||
8632 | cls_cursor_init(&cursor, &ofproto->facets, NULL); | |
8633 | CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { | |
8634 | cls_rule_format(&facet->cr, &ds); | |
8635 | ds_put_cstr(&ds, ", "); | |
8636 | ds_put_format(&ds, "n_subfacets:%"PRIu64", ", | |
8637 | list_size(&facet->subfacets)); | |
8638 | ds_put_format(&ds, "used:%.3fs, ", (now - facet->used) / 1000.0); | |
8639 | ds_put_cstr(&ds, "Datapath actions: "); | |
8640 | format_odp_actions(&ds, facet->xout.odp_actions.data, | |
8641 | facet->xout.odp_actions.size); | |
8642 | ds_put_cstr(&ds, "\n"); | |
8643 | } | |
8644 | ||
8645 | ds_chomp(&ds, '\n'); | |
8646 | unixctl_command_reply(conn, ds_cstr(&ds)); | |
8647 | ds_destroy(&ds); | |
8648 | } | |
8649 | ||
27022416 JP |
8650 | static void |
8651 | ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn, | |
8652 | int argc OVS_UNUSED, const char *argv[], | |
8653 | void *aux OVS_UNUSED) | |
8654 | { | |
8655 | struct ds ds = DS_EMPTY_INITIALIZER; | |
8656 | const struct ofproto_dpif *ofproto; | |
8657 | struct subfacet *subfacet; | |
8658 | ||
8659 | ofproto = ofproto_dpif_lookup(argv[1]); | |
8660 | if (!ofproto) { | |
8661 | unixctl_command_reply_error(conn, "no such bridge"); | |
8662 | return; | |
8663 | } | |
8664 | ||
af37354d EJ |
8665 | update_stats(ofproto->backer); |
8666 | ||
04d08d54 | 8667 | HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->backer->subfacets) { |
4dff9097 EJ |
8668 | struct facet *facet = subfacet->facet; |
8669 | ||
04d08d54 EJ |
8670 | if (ofproto_dpif_cast(facet->rule->up.ofproto) != ofproto) { |
8671 | continue; | |
8672 | } | |
8673 | ||
9566abf9 | 8674 | odp_flow_key_format(subfacet->key, subfacet->key_len, &ds); |
27022416 JP |
8675 | |
8676 | ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:", | |
8677 | subfacet->dp_packet_count, subfacet->dp_byte_count); | |
8678 | if (subfacet->used) { | |
8679 | ds_put_format(&ds, "%.3fs", | |
8680 | (time_msec() - subfacet->used) / 1000.0); | |
8681 | } else { | |
8682 | ds_put_format(&ds, "never"); | |
8683 | } | |
8684 | if (subfacet->facet->tcp_flags) { | |
8685 | ds_put_cstr(&ds, ", flags:"); | |
8686 | packet_format_tcp_flags(&ds, subfacet->facet->tcp_flags); | |
8687 | } | |
8688 | ||
8689 | ds_put_cstr(&ds, ", actions:"); | |
bbafd73b | 8690 | if (facet->xout.slow) { |
f2245da3 JP |
8691 | uint64_t slow_path_stub[128 / 8]; |
8692 | const struct nlattr *actions; | |
8693 | size_t actions_len; | |
8694 | ||
bbafd73b | 8695 | compose_slow_path(ofproto, &facet->flow, facet->xout.slow, |
f2245da3 JP |
8696 | slow_path_stub, sizeof slow_path_stub, |
8697 | &actions, &actions_len); | |
8698 | format_odp_actions(&ds, actions, actions_len); | |
8699 | } else { | |
bbafd73b EJ |
8700 | format_odp_actions(&ds, facet->xout.odp_actions.data, |
8701 | facet->xout.odp_actions.size); | |
f2245da3 | 8702 | } |
27022416 JP |
8703 | ds_put_char(&ds, '\n'); |
8704 | } | |
8705 | ||
8706 | unixctl_command_reply(conn, ds_cstr(&ds)); | |
8707 | ds_destroy(&ds); | |
8708 | } | |
8709 | ||
8710 | static void | |
8711 | ofproto_unixctl_dpif_del_flows(struct unixctl_conn *conn, | |
8712 | int argc OVS_UNUSED, const char *argv[], | |
8713 | void *aux OVS_UNUSED) | |
8714 | { | |
8715 | struct ds ds = DS_EMPTY_INITIALIZER; | |
8716 | struct ofproto_dpif *ofproto; | |
8717 | ||
8718 | ofproto = ofproto_dpif_lookup(argv[1]); | |
8719 | if (!ofproto) { | |
8720 | unixctl_command_reply_error(conn, "no such bridge"); | |
8721 | return; | |
8722 | } | |
8723 | ||
8724 | flush(&ofproto->up); | |
8725 | ||
8726 | unixctl_command_reply(conn, ds_cstr(&ds)); | |
8727 | ds_destroy(&ds); | |
8728 | } | |
8729 | ||
abe529af BP |
8730 | static void |
8731 | ofproto_dpif_unixctl_init(void) | |
8732 | { | |
8733 | static bool registered; | |
8734 | if (registered) { | |
8735 | return; | |
8736 | } | |
8737 | registered = true; | |
8738 | ||
0e15264f BP |
8739 | unixctl_command_register( |
8740 | "ofproto/trace", | |
50aa28fd AW |
8741 | "[dp_name]|bridge odp_flow|br_flow [-generate|packet]", |
8742 | 1, 3, ofproto_unixctl_trace, NULL); | |
96e466a3 | 8743 | unixctl_command_register("fdb/flush", "[bridge]", 0, 1, |
0e15264f BP |
8744 | ofproto_unixctl_fdb_flush, NULL); |
8745 | unixctl_command_register("fdb/show", "bridge", 1, 1, | |
8746 | ofproto_unixctl_fdb_show, NULL); | |
8747 | unixctl_command_register("ofproto/clog", "", 0, 0, | |
8748 | ofproto_dpif_clog, NULL); | |
8749 | unixctl_command_register("ofproto/unclog", "", 0, 0, | |
8750 | ofproto_dpif_unclog, NULL); | |
6814e51f BP |
8751 | unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1, |
8752 | ofproto_dpif_self_check, NULL); | |
27022416 JP |
8753 | unixctl_command_register("dpif/dump-dps", "", 0, 0, |
8754 | ofproto_unixctl_dpif_dump_dps, NULL); | |
dc54ef36 EJ |
8755 | unixctl_command_register("dpif/show", "", 0, 0, ofproto_unixctl_dpif_show, |
8756 | NULL); | |
27022416 JP |
8757 | unixctl_command_register("dpif/dump-flows", "bridge", 1, 1, |
8758 | ofproto_unixctl_dpif_dump_flows, NULL); | |
8759 | unixctl_command_register("dpif/del-flows", "bridge", 1, 1, | |
8760 | ofproto_unixctl_dpif_del_flows, NULL); | |
bcd2633a JP |
8761 | unixctl_command_register("dpif/dump-megaflows", "bridge", 1, 1, |
8762 | ofproto_unixctl_dpif_dump_megaflows, NULL); | |
abe529af BP |
8763 | } |
8764 | \f | |
52a90c29 BP |
8765 | /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.) |
8766 | * | |
8767 | * This is deprecated. It is only for compatibility with broken device drivers | |
8768 | * in old versions of Linux that do not properly support VLANs when VLAN | |
8769 | * devices are not used. When broken device drivers are no longer in | |
8770 | * widespread use, we will delete these interfaces. */ | |
8771 | ||
8772 | static int | |
8773 | set_realdev(struct ofport *ofport_, uint16_t realdev_ofp_port, int vid) | |
8774 | { | |
8775 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto); | |
8776 | struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); | |
8777 | ||
8778 | if (realdev_ofp_port == ofport->realdev_ofp_port | |
8779 | && vid == ofport->vlandev_vid) { | |
8780 | return 0; | |
8781 | } | |
8782 | ||
2cc3c58e | 8783 | ofproto->backer->need_revalidate = REV_RECONFIGURE; |
52a90c29 BP |
8784 | |
8785 | if (ofport->realdev_ofp_port) { | |
8786 | vsp_remove(ofport); | |
8787 | } | |
8788 | if (realdev_ofp_port && ofport->bundle) { | |
8789 | /* vlandevs are enslaved to their realdevs, so they are not allowed to | |
8790 | * themselves be part of a bundle. */ | |
8791 | bundle_set(ofport->up.ofproto, ofport->bundle, NULL); | |
8792 | } | |
8793 | ||
8794 | ofport->realdev_ofp_port = realdev_ofp_port; | |
8795 | ofport->vlandev_vid = vid; | |
8796 | ||
8797 | if (realdev_ofp_port) { | |
8798 | vsp_add(ofport, realdev_ofp_port, vid); | |
8799 | } | |
8800 | ||
8801 | return 0; | |
8802 | } | |
8803 | ||
8804 | static uint32_t | |
8805 | hash_realdev_vid(uint16_t realdev_ofp_port, int vid) | |
8806 | { | |
8807 | return hash_2words(realdev_ofp_port, vid); | |
8808 | } | |
8809 | ||
deea1200 AW |
8810 | /* Returns the OFP port number of the Linux VLAN device that corresponds to |
8811 | * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in | |
8812 | * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and | |
8813 | * 'vlan_tci' 9, it would return the port number of eth0.9. | |
40e05935 | 8814 | * |
deea1200 AW |
8815 | * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this |
8816 | * function just returns its 'realdev_ofp_port' argument. */ | |
8817 | static uint16_t | |
52a90c29 | 8818 | vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto, |
deea1200 | 8819 | uint16_t realdev_ofp_port, ovs_be16 vlan_tci) |
52a90c29 BP |
8820 | { |
8821 | if (!hmap_is_empty(&ofproto->realdev_vid_map)) { | |
52a90c29 BP |
8822 | int vid = vlan_tci_to_vid(vlan_tci); |
8823 | const struct vlan_splinter *vsp; | |
8824 | ||
8825 | HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node, | |
8826 | hash_realdev_vid(realdev_ofp_port, vid), | |
8827 | &ofproto->realdev_vid_map) { | |
8828 | if (vsp->realdev_ofp_port == realdev_ofp_port | |
8829 | && vsp->vid == vid) { | |
deea1200 | 8830 | return vsp->vlandev_ofp_port; |
52a90c29 BP |
8831 | } |
8832 | } | |
8833 | } | |
deea1200 | 8834 | return realdev_ofp_port; |
52a90c29 BP |
8835 | } |
8836 | ||
8837 | static struct vlan_splinter * | |
8838 | vlandev_find(const struct ofproto_dpif *ofproto, uint16_t vlandev_ofp_port) | |
8839 | { | |
8840 | struct vlan_splinter *vsp; | |
8841 | ||
8842 | HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node, hash_int(vlandev_ofp_port, 0), | |
8843 | &ofproto->vlandev_map) { | |
8844 | if (vsp->vlandev_ofp_port == vlandev_ofp_port) { | |
8845 | return vsp; | |
8846 | } | |
8847 | } | |
8848 | ||
8849 | return NULL; | |
8850 | } | |
8851 | ||
40e05935 BP |
8852 | /* Returns the OpenFlow port number of the "real" device underlying the Linux |
8853 | * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the | |
8854 | * VLAN VID of the Linux VLAN device in '*vid'. For example, given | |
8855 | * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of | |
8856 | * eth0 and store 9 in '*vid'. | |
8857 | * | |
8858 | * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux | |
8859 | * VLAN device. Unless VLAN splinters are enabled, this is what this function | |
8860 | * always does.*/ | |
52a90c29 BP |
8861 | static uint16_t |
8862 | vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto, | |
40e05935 | 8863 | uint16_t vlandev_ofp_port, int *vid) |
52a90c29 BP |
8864 | { |
8865 | if (!hmap_is_empty(&ofproto->vlandev_map)) { | |
8866 | const struct vlan_splinter *vsp; | |
8867 | ||
8868 | vsp = vlandev_find(ofproto, vlandev_ofp_port); | |
8869 | if (vsp) { | |
8870 | if (vid) { | |
8871 | *vid = vsp->vid; | |
8872 | } | |
8873 | return vsp->realdev_ofp_port; | |
8874 | } | |
8875 | } | |
8876 | return 0; | |
8877 | } | |
8878 | ||
b98d8985 BP |
8879 | /* Given 'flow', a flow representing a packet received on 'ofproto', checks |
8880 | * whether 'flow->in_port' represents a Linux VLAN device. If so, changes | |
8881 | * 'flow->in_port' to the "real" device backing the VLAN device, sets | |
8882 | * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is | |
8883 | * always the case unless VLAN splinters are enabled), returns false without | |
8884 | * making any changes. */ | |
8885 | static bool | |
8886 | vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow) | |
8887 | { | |
8888 | uint16_t realdev; | |
8889 | int vid; | |
8890 | ||
8891 | realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port, &vid); | |
8892 | if (!realdev) { | |
8893 | return false; | |
8894 | } | |
8895 | ||
8896 | /* Cause the flow to be processed as if it came in on the real device with | |
8897 | * the VLAN device's VLAN ID. */ | |
8898 | flow->in_port = realdev; | |
8899 | flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI); | |
8900 | return true; | |
8901 | } | |
8902 | ||
52a90c29 BP |
8903 | static void |
8904 | vsp_remove(struct ofport_dpif *port) | |
8905 | { | |
8906 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); | |
8907 | struct vlan_splinter *vsp; | |
8908 | ||
8909 | vsp = vlandev_find(ofproto, port->up.ofp_port); | |
8910 | if (vsp) { | |
8911 | hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node); | |
8912 | hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node); | |
8913 | free(vsp); | |
8914 | ||
8915 | port->realdev_ofp_port = 0; | |
8916 | } else { | |
8917 | VLOG_ERR("missing vlan device record"); | |
8918 | } | |
8919 | } | |
8920 | ||
8921 | static void | |
8922 | vsp_add(struct ofport_dpif *port, uint16_t realdev_ofp_port, int vid) | |
8923 | { | |
8924 | struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); | |
8925 | ||
8926 | if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL) | |
8927 | && (vsp_realdev_to_vlandev(ofproto, realdev_ofp_port, htons(vid)) | |
8928 | == realdev_ofp_port)) { | |
8929 | struct vlan_splinter *vsp; | |
8930 | ||
8931 | vsp = xmalloc(sizeof *vsp); | |
8932 | hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node, | |
8933 | hash_int(port->up.ofp_port, 0)); | |
8934 | hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node, | |
8935 | hash_realdev_vid(realdev_ofp_port, vid)); | |
8936 | vsp->realdev_ofp_port = realdev_ofp_port; | |
8937 | vsp->vlandev_ofp_port = port->up.ofp_port; | |
8938 | vsp->vid = vid; | |
8939 | ||
8940 | port->realdev_ofp_port = realdev_ofp_port; | |
8941 | } else { | |
8942 | VLOG_ERR("duplicate vlan device record"); | |
8943 | } | |
8944 | } | |
e1b1d06a JP |
8945 | |
8946 | static uint32_t | |
8947 | ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port) | |
8948 | { | |
8949 | const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port); | |
8950 | return ofport ? ofport->odp_port : OVSP_NONE; | |
8951 | } | |
8952 | ||
acf60855 JP |
8953 | static struct ofport_dpif * |
8954 | odp_port_to_ofport(const struct dpif_backer *backer, uint32_t odp_port) | |
e1b1d06a JP |
8955 | { |
8956 | struct ofport_dpif *port; | |
8957 | ||
8958 | HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node, | |
8959 | hash_int(odp_port, 0), | |
acf60855 | 8960 | &backer->odp_to_ofport_map) { |
e1b1d06a | 8961 | if (port->odp_port == odp_port) { |
acf60855 | 8962 | return port; |
e1b1d06a JP |
8963 | } |
8964 | } | |
8965 | ||
acf60855 JP |
8966 | return NULL; |
8967 | } | |
8968 | ||
8969 | static uint16_t | |
8970 | odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port) | |
8971 | { | |
8972 | struct ofport_dpif *port; | |
8973 | ||
8974 | port = odp_port_to_ofport(ofproto->backer, odp_port); | |
6472ba11 | 8975 | if (port && &ofproto->up == port->up.ofproto) { |
acf60855 JP |
8976 | return port->up.ofp_port; |
8977 | } else { | |
8978 | return OFPP_NONE; | |
8979 | } | |
e1b1d06a | 8980 | } |
655ab909 | 8981 | |
655ab909 AZ |
8982 | /* Compute exponentially weighted moving average, adding 'new' as the newest, |
8983 | * most heavily weighted element. 'base' designates the rate of decay: after | |
8984 | * 'base' further updates, 'new''s weight in the EWMA decays to about 1/e | |
8985 | * (about .37). */ | |
8986 | static void | |
8987 | exp_mavg(double *avg, int base, double new) | |
8988 | { | |
8989 | *avg = (*avg * (base - 1) + new) / base; | |
8990 | } | |
8991 | ||
8992 | static void | |
dc54ef36 | 8993 | update_moving_averages(struct dpif_backer *backer) |
655ab909 AZ |
8994 | { |
8995 | const int min_ms = 60 * 1000; /* milliseconds in one minute. */ | |
dc54ef36 EJ |
8996 | long long int minutes = (time_msec() - backer->created) / min_ms; |
8997 | ||
8998 | if (minutes > 0) { | |
8999 | backer->lifetime.add_rate = (double) backer->total_subfacet_add_count | |
9000 | / minutes; | |
9001 | backer->lifetime.del_rate = (double) backer->total_subfacet_del_count | |
9002 | / minutes; | |
9003 | } else { | |
9004 | backer->lifetime.add_rate = 0.0; | |
9005 | backer->lifetime.del_rate = 0.0; | |
9006 | } | |
655ab909 AZ |
9007 | |
9008 | /* Update hourly averages on the minute boundaries. */ | |
dc54ef36 EJ |
9009 | if (time_msec() - backer->last_minute >= min_ms) { |
9010 | exp_mavg(&backer->hourly.add_rate, 60, backer->subfacet_add_count); | |
9011 | exp_mavg(&backer->hourly.del_rate, 60, backer->subfacet_del_count); | |
655ab909 AZ |
9012 | |
9013 | /* Update daily averages on the hour boundaries. */ | |
dc54ef36 EJ |
9014 | if ((backer->last_minute - backer->created) / min_ms % 60 == 59) { |
9015 | exp_mavg(&backer->daily.add_rate, 24, backer->hourly.add_rate); | |
9016 | exp_mavg(&backer->daily.del_rate, 24, backer->hourly.del_rate); | |
655ab909 AZ |
9017 | } |
9018 | ||
dc54ef36 EJ |
9019 | backer->total_subfacet_add_count += backer->subfacet_add_count; |
9020 | backer->total_subfacet_del_count += backer->subfacet_del_count; | |
9021 | backer->subfacet_add_count = 0; | |
9022 | backer->subfacet_del_count = 0; | |
9023 | backer->last_minute += min_ms; | |
655ab909 AZ |
9024 | } |
9025 | } | |
e1b1d06a | 9026 | |
abe529af | 9027 | const struct ofproto_class ofproto_dpif_class = { |
b0408fca | 9028 | init, |
abe529af BP |
9029 | enumerate_types, |
9030 | enumerate_names, | |
9031 | del, | |
0aeaabc8 | 9032 | port_open_type, |
acf60855 JP |
9033 | type_run, |
9034 | type_run_fast, | |
9035 | type_wait, | |
abe529af BP |
9036 | alloc, |
9037 | construct, | |
9038 | destruct, | |
9039 | dealloc, | |
9040 | run, | |
5fcc0d00 | 9041 | run_fast, |
abe529af | 9042 | wait, |
0d085684 | 9043 | get_memory_usage, |
abe529af | 9044 | flush, |
6c1491fb BP |
9045 | get_features, |
9046 | get_tables, | |
abe529af BP |
9047 | port_alloc, |
9048 | port_construct, | |
9049 | port_destruct, | |
9050 | port_dealloc, | |
9051 | port_modified, | |
9052 | port_reconfigured, | |
9053 | port_query_by_name, | |
9054 | port_add, | |
9055 | port_del, | |
6527c598 | 9056 | port_get_stats, |
abe529af BP |
9057 | port_dump_start, |
9058 | port_dump_next, | |
9059 | port_dump_done, | |
9060 | port_poll, | |
9061 | port_poll_wait, | |
9062 | port_is_lacp_current, | |
0ab6decf | 9063 | NULL, /* rule_choose_table */ |
abe529af BP |
9064 | rule_alloc, |
9065 | rule_construct, | |
9066 | rule_destruct, | |
9067 | rule_dealloc, | |
abe529af BP |
9068 | rule_get_stats, |
9069 | rule_execute, | |
9070 | rule_modify_actions, | |
7257b535 | 9071 | set_frag_handling, |
abe529af BP |
9072 | packet_out, |
9073 | set_netflow, | |
9074 | get_netflow_ids, | |
9075 | set_sflow, | |
29089a54 | 9076 | set_ipfix, |
abe529af | 9077 | set_cfm, |
9a9e3786 | 9078 | get_cfm_status, |
ccc09689 EJ |
9079 | set_bfd, |
9080 | get_bfd_status, | |
21f7563c JP |
9081 | set_stp, |
9082 | get_stp_status, | |
9083 | set_stp_port, | |
9084 | get_stp_port_status, | |
8b36f51e | 9085 | set_queues, |
abe529af BP |
9086 | bundle_set, |
9087 | bundle_remove, | |
9088 | mirror_set, | |
9d24de3b | 9089 | mirror_get_stats, |
abe529af BP |
9090 | set_flood_vlans, |
9091 | is_mirror_output_bundle, | |
8402c74b | 9092 | forward_bpdu_changed, |
c4069512 | 9093 | set_mac_table_config, |
52a90c29 | 9094 | set_realdev, |
abe529af | 9095 | }; |