]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-upcall.c
042a50a9f179091b1ef11a8e654ee465ae646127
[mirror_ovs.git] / ofproto / ofproto-dpif-upcall.c
1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15 #include <config.h>
16 #include "ofproto-dpif-upcall.h"
17
18 #include <errno.h>
19 #include <stdbool.h>
20 #include <inttypes.h>
21
22 #include "connmgr.h"
23 #include "coverage.h"
24 #include "cmap.h"
25 #include "dpif.h"
26 #include "openvswitch/dynamic-string.h"
27 #include "fail-open.h"
28 #include "guarded-list.h"
29 #include "latch.h"
30 #include "openvswitch/list.h"
31 #include "netlink.h"
32 #include "openvswitch/ofpbuf.h"
33 #include "ofproto-dpif-ipfix.h"
34 #include "ofproto-dpif-sflow.h"
35 #include "ofproto-dpif-xlate.h"
36 #include "ovs-rcu.h"
37 #include "packets.h"
38 #include "poll-loop.h"
39 #include "seq.h"
40 #include "unixctl.h"
41 #include "openvswitch/vlog.h"
42
43 #define MAX_QUEUE_LENGTH 512
44 #define UPCALL_MAX_BATCH 64
45 #define REVALIDATE_MAX_BATCH 50
46
47 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
48
49 COVERAGE_DEFINE(dumped_duplicate_flow);
50 COVERAGE_DEFINE(dumped_new_flow);
51 COVERAGE_DEFINE(handler_duplicate_upcall);
52 COVERAGE_DEFINE(upcall_ukey_contention);
53 COVERAGE_DEFINE(revalidate_missed_dp_flow);
54
55 /* A thread that reads upcalls from dpif, forwards each upcall's packet,
56 * and possibly sets up a kernel flow as a cache. */
57 struct handler {
58 struct udpif *udpif; /* Parent udpif. */
59 pthread_t thread; /* Thread ID. */
60 uint32_t handler_id; /* Handler id. */
61 };
62
63 /* In the absence of a multiple-writer multiple-reader datastructure for
64 * storing udpif_keys ("ukeys"), we use a large number of cmaps, each with its
65 * own lock for writing. */
66 #define N_UMAPS 512 /* per udpif. */
67 struct umap {
68 struct ovs_mutex mutex; /* Take for writing to the following. */
69 struct cmap cmap; /* Datapath flow keys. */
70 };
71
72 /* A thread that processes datapath flows, updates OpenFlow statistics, and
73 * updates or removes them if necessary.
74 *
75 * Revalidator threads operate in two phases: "dump" and "sweep". In between
76 * each phase, all revalidators sync up so that all revalidator threads are
77 * either in one phase or the other, but not a combination.
78 *
79 * During the dump phase, revalidators fetch flows from the datapath and
80 * attribute the statistics to OpenFlow rules. Each datapath flow has a
81 * corresponding ukey which caches the most recently seen statistics. If
82 * a flow needs to be deleted (for example, because it is unused over a
83 * period of time), revalidator threads may delete the flow during the
84 * dump phase. The datapath is not guaranteed to reliably dump all flows
85 * from the datapath, and there is no mapping between datapath flows to
86 * revalidators, so a particular flow may be handled by zero or more
87 * revalidators during a single dump phase. To avoid duplicate attribution
88 * of statistics, ukeys are never deleted during this phase.
89 *
90 * During the sweep phase, each revalidator takes ownership of a different
91 * slice of umaps and sweeps through all ukeys in those umaps to figure out
92 * whether they need to be deleted. During this phase, revalidators may
93 * fetch individual flows which were not dumped during the dump phase to
94 * validate them and attribute statistics.
95 */
96 struct revalidator {
97 struct udpif *udpif; /* Parent udpif. */
98 pthread_t thread; /* Thread ID. */
99 unsigned int id; /* ovsthread_id_self(). */
100 };
101
102 /* An upcall handler for ofproto_dpif.
103 *
104 * udpif keeps records of two kind of logically separate units:
105 *
106 * upcall handling
107 * ---------------
108 *
109 * - An array of 'struct handler's for upcall handling and flow
110 * installation.
111 *
112 * flow revalidation
113 * -----------------
114 *
115 * - Revalidation threads which read the datapath flow table and maintains
116 * them.
117 */
118 struct udpif {
119 struct ovs_list list_node; /* In all_udpifs list. */
120
121 struct dpif *dpif; /* Datapath handle. */
122 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
123
124 struct handler *handlers; /* Upcall handlers. */
125 size_t n_handlers;
126
127 struct revalidator *revalidators; /* Flow revalidators. */
128 size_t n_revalidators;
129
130 struct latch exit_latch; /* Tells child threads to exit. */
131
132 /* Revalidation. */
133 struct seq *reval_seq; /* Incremented to force revalidation. */
134 bool reval_exit; /* Set by leader on 'exit_latch. */
135 struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
136 struct dpif_flow_dump *dump; /* DPIF flow dump state. */
137 long long int dump_duration; /* Duration of the last flow dump. */
138 struct seq *dump_seq; /* Increments each dump iteration. */
139 atomic_bool enable_ufid; /* If true, skip dumping flow attrs. */
140
141 /* These variables provide a mechanism for the main thread to pause
142 * all revalidation without having to completely shut the threads down.
143 * 'pause_latch' is shared between the main thread and the lead
144 * revalidator thread, so when it is desirable to halt revalidation, the
145 * main thread will set the latch. 'pause' and 'pause_barrier' are shared
146 * by revalidator threads. The lead revalidator will set 'pause' when it
147 * observes the latch has been set, and this will cause all revalidator
148 * threads to wait on 'pause_barrier' at the beginning of the next
149 * revalidation round. */
150 bool pause; /* Set by leader on 'pause_latch. */
151 struct latch pause_latch; /* Set to force revalidators pause. */
152 struct ovs_barrier pause_barrier; /* Barrier used to pause all */
153 /* revalidators by main thread. */
154
155 /* There are 'N_UMAPS' maps containing 'struct udpif_key' elements.
156 *
157 * During the flow dump phase, revalidators insert into these with a random
158 * distribution. During the garbage collection phase, each revalidator
159 * takes care of garbage collecting a slice of these maps. */
160 struct umap *ukeys;
161
162 /* Datapath flow statistics. */
163 unsigned int max_n_flows;
164 unsigned int avg_n_flows;
165
166 /* Following fields are accessed and modified by different threads. */
167 atomic_uint flow_limit; /* Datapath flow hard limit. */
168
169 /* n_flows_mutex prevents multiple threads updating these concurrently. */
170 atomic_uint n_flows; /* Number of flows in the datapath. */
171 atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
172 struct ovs_mutex n_flows_mutex;
173
174 /* Following fields are accessed and modified only from the main thread. */
175 struct unixctl_conn **conns; /* Connections waiting on dump_seq. */
176 uint64_t conn_seq; /* Corresponds to 'dump_seq' when
177 conns[n_conns-1] was stored. */
178 size_t n_conns; /* Number of connections waiting. */
179 };
180
181 enum upcall_type {
182 BAD_UPCALL, /* Some kind of bug somewhere. */
183 MISS_UPCALL, /* A flow miss. */
184 SFLOW_UPCALL, /* sFlow sample. */
185 FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
186 IPFIX_UPCALL /* Per-bridge sampling. */
187 };
188
189 enum reval_result {
190 UKEY_KEEP,
191 UKEY_DELETE,
192 UKEY_MODIFY
193 };
194
195 struct upcall {
196 struct ofproto_dpif *ofproto; /* Parent ofproto. */
197 const struct recirc_id_node *recirc; /* Recirculation context. */
198 bool have_recirc_ref; /* Reference held on recirc ctx? */
199
200 /* The flow and packet are only required to be constant when using
201 * dpif-netdev. If a modification is absolutely necessary, a const cast
202 * may be used with other datapaths. */
203 const struct flow *flow; /* Parsed representation of the packet. */
204 const ovs_u128 *ufid; /* Unique identifier for 'flow'. */
205 unsigned pmd_id; /* Datapath poll mode driver id. */
206 const struct dp_packet *packet; /* Packet associated with this upcall. */
207 ofp_port_t in_port; /* OpenFlow in port, or OFPP_NONE. */
208 uint16_t mru; /* If !0, Maximum receive unit of
209 fragmented IP packet */
210
211 enum dpif_upcall_type type; /* Datapath type of the upcall. */
212 const struct nlattr *userdata; /* Userdata for DPIF_UC_ACTION Upcalls. */
213 const struct nlattr *actions; /* Flow actions in DPIF_UC_ACTION Upcalls. */
214
215 bool xout_initialized; /* True if 'xout' must be uninitialized. */
216 struct xlate_out xout; /* Result of xlate_actions(). */
217 struct ofpbuf odp_actions; /* Datapath actions from xlate_actions(). */
218 struct flow_wildcards wc; /* Dependencies that megaflow must match. */
219 struct ofpbuf put_actions; /* Actions 'put' in the fastpath. */
220
221 struct dpif_ipfix *ipfix; /* IPFIX pointer or NULL. */
222 struct dpif_sflow *sflow; /* SFlow pointer or NULL. */
223
224 struct udpif_key *ukey; /* Revalidator flow cache. */
225 bool ukey_persists; /* Set true to keep 'ukey' beyond the
226 lifetime of this upcall. */
227
228 uint64_t dump_seq; /* udpif->dump_seq at translation time. */
229 uint64_t reval_seq; /* udpif->reval_seq at translation time. */
230
231 /* Not used by the upcall callback interface. */
232 const struct nlattr *key; /* Datapath flow key. */
233 size_t key_len; /* Datapath flow key length. */
234 const struct nlattr *out_tun_key; /* Datapath output tunnel key. */
235
236 uint64_t odp_actions_stub[1024 / 8]; /* Stub for odp_actions. */
237 };
238
239 /* 'udpif_key's are responsible for tracking the little bit of state udpif
240 * needs to do flow expiration which can't be pulled directly from the
241 * datapath. They may be created by any handler or revalidator thread at any
242 * time, and read by any revalidator during the dump phase. They are however
243 * each owned by a single revalidator which takes care of destroying them
244 * during the garbage-collection phase.
245 *
246 * The mutex within the ukey protects some members of the ukey. The ukey
247 * itself is protected by RCU and is held within a umap in the parent udpif.
248 * Adding or removing a ukey from a umap is only safe when holding the
249 * corresponding umap lock. */
250 struct udpif_key {
251 struct cmap_node cmap_node; /* In parent revalidator 'ukeys' map. */
252
253 /* These elements are read only once created, and therefore aren't
254 * protected by a mutex. */
255 const struct nlattr *key; /* Datapath flow key. */
256 size_t key_len; /* Length of 'key'. */
257 const struct nlattr *mask; /* Datapath flow mask. */
258 size_t mask_len; /* Length of 'mask'. */
259 ovs_u128 ufid; /* Unique flow identifier. */
260 bool ufid_present; /* True if 'ufid' is in datapath. */
261 uint32_t hash; /* Pre-computed hash for 'key'. */
262 unsigned pmd_id; /* Datapath poll mode driver id. */
263
264 struct ovs_mutex mutex; /* Guards the following. */
265 struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
266 long long int created OVS_GUARDED; /* Estimate of creation time. */
267 uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
268 uint64_t reval_seq OVS_GUARDED; /* Tracks udpif->reval_seq. */
269 bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted
270 once. */
271 /* Datapath flow actions as nlattrs. Protected by RCU. Read with
272 * ukey_get_actions(), and write with ukey_set_actions(). */
273 OVSRCU_TYPE(struct ofpbuf *) actions;
274
275 struct xlate_cache *xcache OVS_GUARDED; /* Cache for xlate entries that
276 * are affected by this ukey.
277 * Used for stats and learning.*/
278 union {
279 struct odputil_keybuf buf;
280 struct nlattr nla;
281 } keybuf, maskbuf;
282
283 uint32_t key_recirc_id; /* Non-zero if reference is held by the ukey. */
284 struct recirc_refs recircs; /* Action recirc IDs with references held. */
285 };
286
287 /* Datapath operation with optional ukey attached. */
288 struct ukey_op {
289 struct udpif_key *ukey;
290 struct dpif_flow_stats stats; /* Stats for 'op'. */
291 struct dpif_op dop; /* Flow operation. */
292 };
293
294 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
295 static struct ovs_list all_udpifs = OVS_LIST_INITIALIZER(&all_udpifs);
296
297 static size_t recv_upcalls(struct handler *);
298 static int process_upcall(struct udpif *, struct upcall *,
299 struct ofpbuf *odp_actions, struct flow_wildcards *);
300 static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls);
301 static void udpif_stop_threads(struct udpif *);
302 static void udpif_start_threads(struct udpif *, size_t n_handlers,
303 size_t n_revalidators);
304 static void udpif_pause_revalidators(struct udpif *);
305 static void udpif_resume_revalidators(struct udpif *);
306 static void *udpif_upcall_handler(void *);
307 static void *udpif_revalidator(void *);
308 static unsigned long udpif_get_n_flows(struct udpif *);
309 static void revalidate(struct revalidator *);
310 static void revalidator_pause(struct revalidator *);
311 static void revalidator_sweep(struct revalidator *);
312 static void revalidator_purge(struct revalidator *);
313 static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
314 const char *argv[], void *aux);
315 static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc,
316 const char *argv[], void *aux);
317 static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
318 const char *argv[], void *aux);
319 static void upcall_unixctl_disable_ufid(struct unixctl_conn *, int argc,
320 const char *argv[], void *aux);
321 static void upcall_unixctl_enable_ufid(struct unixctl_conn *, int argc,
322 const char *argv[], void *aux);
323 static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
324 const char *argv[], void *aux);
325 static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
326 const char *argv[], void *aux);
327 static void upcall_unixctl_purge(struct unixctl_conn *conn, int argc,
328 const char *argv[], void *aux);
329
330 static struct udpif_key *ukey_create_from_upcall(struct upcall *,
331 struct flow_wildcards *);
332 static int ukey_create_from_dpif_flow(const struct udpif *,
333 const struct dpif_flow *,
334 struct udpif_key **);
335 static void ukey_get_actions(struct udpif_key *, const struct nlattr **actions,
336 size_t *size);
337 static bool ukey_install_start(struct udpif *, struct udpif_key *ukey);
338 static bool ukey_install_finish(struct udpif_key *ukey, int error);
339 static bool ukey_install(struct udpif *udpif, struct udpif_key *ukey);
340 static struct udpif_key *ukey_lookup(struct udpif *udpif,
341 const ovs_u128 *ufid,
342 const unsigned pmd_id);
343 static int ukey_acquire(struct udpif *, const struct dpif_flow *,
344 struct udpif_key **result, int *error);
345 static void ukey_delete__(struct udpif_key *);
346 static void ukey_delete(struct umap *, struct udpif_key *);
347 static enum upcall_type classify_upcall(enum dpif_upcall_type type,
348 const struct nlattr *userdata);
349
350 static int upcall_receive(struct upcall *, const struct dpif_backer *,
351 const struct dp_packet *packet, enum dpif_upcall_type,
352 const struct nlattr *userdata, const struct flow *,
353 const unsigned int mru,
354 const ovs_u128 *ufid, const unsigned pmd_id);
355 static void upcall_uninit(struct upcall *);
356
357 static upcall_callback upcall_cb;
358 static dp_purge_callback dp_purge_cb;
359
360 static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
361 static atomic_bool enable_ufid = ATOMIC_VAR_INIT(true);
362
363 void
364 udpif_init(void)
365 {
366 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
367 if (ovsthread_once_start(&once)) {
368 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
369 NULL);
370 unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
371 upcall_unixctl_disable_megaflows, NULL);
372 unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
373 upcall_unixctl_enable_megaflows, NULL);
374 unixctl_command_register("upcall/disable-ufid", "", 0, 0,
375 upcall_unixctl_disable_ufid, NULL);
376 unixctl_command_register("upcall/enable-ufid", "", 0, 0,
377 upcall_unixctl_enable_ufid, NULL);
378 unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
379 upcall_unixctl_set_flow_limit, NULL);
380 unixctl_command_register("revalidator/wait", "", 0, 0,
381 upcall_unixctl_dump_wait, NULL);
382 unixctl_command_register("revalidator/purge", "", 0, 0,
383 upcall_unixctl_purge, NULL);
384 ovsthread_once_done(&once);
385 }
386 }
387
388 struct udpif *
389 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
390 {
391 struct udpif *udpif = xzalloc(sizeof *udpif);
392
393 udpif->dpif = dpif;
394 udpif->backer = backer;
395 atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
396 udpif->reval_seq = seq_create();
397 udpif->dump_seq = seq_create();
398 latch_init(&udpif->exit_latch);
399 latch_init(&udpif->pause_latch);
400 ovs_list_push_back(&all_udpifs, &udpif->list_node);
401 atomic_init(&udpif->enable_ufid, false);
402 atomic_init(&udpif->n_flows, 0);
403 atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
404 ovs_mutex_init(&udpif->n_flows_mutex);
405 udpif->ukeys = xmalloc(N_UMAPS * sizeof *udpif->ukeys);
406 for (int i = 0; i < N_UMAPS; i++) {
407 cmap_init(&udpif->ukeys[i].cmap);
408 ovs_mutex_init(&udpif->ukeys[i].mutex);
409 }
410
411 dpif_register_upcall_cb(dpif, upcall_cb, udpif);
412 dpif_register_dp_purge_cb(dpif, dp_purge_cb, udpif);
413
414 return udpif;
415 }
416
417 void
418 udpif_run(struct udpif *udpif)
419 {
420 if (udpif->conns && udpif->conn_seq != seq_read(udpif->dump_seq)) {
421 int i;
422
423 for (i = 0; i < udpif->n_conns; i++) {
424 unixctl_command_reply(udpif->conns[i], NULL);
425 }
426 free(udpif->conns);
427 udpif->conns = NULL;
428 udpif->n_conns = 0;
429 }
430 }
431
432 void
433 udpif_destroy(struct udpif *udpif)
434 {
435 udpif_stop_threads(udpif);
436
437 dpif_register_dp_purge_cb(udpif->dpif, NULL, udpif);
438 dpif_register_upcall_cb(udpif->dpif, NULL, udpif);
439
440 for (int i = 0; i < N_UMAPS; i++) {
441 cmap_destroy(&udpif->ukeys[i].cmap);
442 ovs_mutex_destroy(&udpif->ukeys[i].mutex);
443 }
444 free(udpif->ukeys);
445 udpif->ukeys = NULL;
446
447 ovs_list_remove(&udpif->list_node);
448 latch_destroy(&udpif->exit_latch);
449 latch_destroy(&udpif->pause_latch);
450 seq_destroy(udpif->reval_seq);
451 seq_destroy(udpif->dump_seq);
452 ovs_mutex_destroy(&udpif->n_flows_mutex);
453 free(udpif);
454 }
455
456 /* Stops the handler and revalidator threads, must be enclosed in
457 * ovsrcu quiescent state unless when destroying udpif. */
458 static void
459 udpif_stop_threads(struct udpif *udpif)
460 {
461 if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
462 size_t i;
463
464 latch_set(&udpif->exit_latch);
465
466 for (i = 0; i < udpif->n_handlers; i++) {
467 struct handler *handler = &udpif->handlers[i];
468
469 xpthread_join(handler->thread, NULL);
470 }
471
472 for (i = 0; i < udpif->n_revalidators; i++) {
473 xpthread_join(udpif->revalidators[i].thread, NULL);
474 }
475
476 dpif_disable_upcall(udpif->dpif);
477
478 for (i = 0; i < udpif->n_revalidators; i++) {
479 struct revalidator *revalidator = &udpif->revalidators[i];
480
481 /* Delete ukeys, and delete all flows from the datapath to prevent
482 * double-counting stats. */
483 revalidator_purge(revalidator);
484 }
485
486 latch_poll(&udpif->exit_latch);
487
488 ovs_barrier_destroy(&udpif->reval_barrier);
489 ovs_barrier_destroy(&udpif->pause_barrier);
490
491 free(udpif->revalidators);
492 udpif->revalidators = NULL;
493 udpif->n_revalidators = 0;
494
495 free(udpif->handlers);
496 udpif->handlers = NULL;
497 udpif->n_handlers = 0;
498 }
499 }
500
501 /* Starts the handler and revalidator threads, must be enclosed in
502 * ovsrcu quiescent state. */
503 static void
504 udpif_start_threads(struct udpif *udpif, size_t n_handlers,
505 size_t n_revalidators)
506 {
507 if (udpif && n_handlers && n_revalidators) {
508 size_t i;
509 bool enable_ufid;
510
511 udpif->n_handlers = n_handlers;
512 udpif->n_revalidators = n_revalidators;
513
514 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
515 for (i = 0; i < udpif->n_handlers; i++) {
516 struct handler *handler = &udpif->handlers[i];
517
518 handler->udpif = udpif;
519 handler->handler_id = i;
520 handler->thread = ovs_thread_create(
521 "handler", udpif_upcall_handler, handler);
522 }
523
524 enable_ufid = ofproto_dpif_get_enable_ufid(udpif->backer);
525 atomic_init(&udpif->enable_ufid, enable_ufid);
526 dpif_enable_upcall(udpif->dpif);
527
528 ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
529 ovs_barrier_init(&udpif->pause_barrier, udpif->n_revalidators + 1);
530 udpif->reval_exit = false;
531 udpif->pause = false;
532 udpif->revalidators = xzalloc(udpif->n_revalidators
533 * sizeof *udpif->revalidators);
534 for (i = 0; i < udpif->n_revalidators; i++) {
535 struct revalidator *revalidator = &udpif->revalidators[i];
536
537 revalidator->udpif = udpif;
538 revalidator->thread = ovs_thread_create(
539 "revalidator", udpif_revalidator, revalidator);
540 }
541 }
542 }
543
544 /* Pauses all revalidators. Should only be called by the main thread.
545 * When function returns, all revalidators are paused and will proceed
546 * only after udpif_resume_revalidators() is called. */
547 static void
548 udpif_pause_revalidators(struct udpif *udpif)
549 {
550 if (ofproto_dpif_backer_enabled(udpif->backer)) {
551 latch_set(&udpif->pause_latch);
552 ovs_barrier_block(&udpif->pause_barrier);
553 }
554 }
555
556 /* Resumes the pausing of revalidators. Should only be called by the
557 * main thread. */
558 static void
559 udpif_resume_revalidators(struct udpif *udpif)
560 {
561 if (ofproto_dpif_backer_enabled(udpif->backer)) {
562 latch_poll(&udpif->pause_latch);
563 ovs_barrier_block(&udpif->pause_barrier);
564 }
565 }
566
567 /* Tells 'udpif' how many threads it should use to handle upcalls.
568 * 'n_handlers' and 'n_revalidators' can never be zero. 'udpif''s
569 * datapath handle must have packet reception enabled before starting
570 * threads. */
571 void
572 udpif_set_threads(struct udpif *udpif, size_t n_handlers,
573 size_t n_revalidators)
574 {
575 ovs_assert(udpif);
576 ovs_assert(n_handlers && n_revalidators);
577
578 ovsrcu_quiesce_start();
579 if (udpif->n_handlers != n_handlers
580 || udpif->n_revalidators != n_revalidators) {
581 udpif_stop_threads(udpif);
582 }
583
584 if (!udpif->handlers && !udpif->revalidators) {
585 int error;
586
587 error = dpif_handlers_set(udpif->dpif, n_handlers);
588 if (error) {
589 VLOG_ERR("failed to configure handlers in dpif %s: %s",
590 dpif_name(udpif->dpif), ovs_strerror(error));
591 return;
592 }
593
594 udpif_start_threads(udpif, n_handlers, n_revalidators);
595 }
596 ovsrcu_quiesce_end();
597 }
598
599 /* Waits for all ongoing upcall translations to complete. This ensures that
600 * there are no transient references to any removed ofprotos (or other
601 * objects). In particular, this should be called after an ofproto is removed
602 * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
603 void
604 udpif_synchronize(struct udpif *udpif)
605 {
606 /* This is stronger than necessary. It would be sufficient to ensure
607 * (somehow) that each handler and revalidator thread had passed through
608 * its main loop once. */
609 size_t n_handlers = udpif->n_handlers;
610 size_t n_revalidators = udpif->n_revalidators;
611
612 ovsrcu_quiesce_start();
613 udpif_stop_threads(udpif);
614 udpif_start_threads(udpif, n_handlers, n_revalidators);
615 ovsrcu_quiesce_end();
616 }
617
618 /* Notifies 'udpif' that something changed which may render previous
619 * xlate_actions() results invalid. */
620 void
621 udpif_revalidate(struct udpif *udpif)
622 {
623 seq_change(udpif->reval_seq);
624 }
625
626 /* Returns a seq which increments every time 'udpif' pulls stats from the
627 * datapath. Callers can use this to get a sense of when might be a good time
628 * to do periodic work which relies on relatively up to date statistics. */
629 struct seq *
630 udpif_dump_seq(struct udpif *udpif)
631 {
632 return udpif->dump_seq;
633 }
634
635 void
636 udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
637 {
638 size_t i;
639
640 simap_increase(usage, "handlers", udpif->n_handlers);
641
642 simap_increase(usage, "revalidators", udpif->n_revalidators);
643 for (i = 0; i < N_UMAPS; i++) {
644 simap_increase(usage, "udpif keys", cmap_count(&udpif->ukeys[i].cmap));
645 }
646 }
647
648 /* Remove flows from a single datapath. */
649 void
650 udpif_flush(struct udpif *udpif)
651 {
652 size_t n_handlers, n_revalidators;
653
654 n_handlers = udpif->n_handlers;
655 n_revalidators = udpif->n_revalidators;
656
657 ovsrcu_quiesce_start();
658
659 udpif_stop_threads(udpif);
660 dpif_flow_flush(udpif->dpif);
661 udpif_start_threads(udpif, n_handlers, n_revalidators);
662
663 ovsrcu_quiesce_end();
664 }
665
666 /* Removes all flows from all datapaths. */
667 static void
668 udpif_flush_all_datapaths(void)
669 {
670 struct udpif *udpif;
671
672 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
673 udpif_flush(udpif);
674 }
675 }
676
677 static bool
678 udpif_use_ufid(struct udpif *udpif)
679 {
680 bool enable;
681
682 atomic_read_relaxed(&enable_ufid, &enable);
683 return enable && ofproto_dpif_get_enable_ufid(udpif->backer);
684 }
685
686 \f
687 static unsigned long
688 udpif_get_n_flows(struct udpif *udpif)
689 {
690 long long int time, now;
691 unsigned long flow_count;
692
693 now = time_msec();
694 atomic_read_relaxed(&udpif->n_flows_timestamp, &time);
695 if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) {
696 struct dpif_dp_stats stats;
697
698 atomic_store_relaxed(&udpif->n_flows_timestamp, now);
699 dpif_get_dp_stats(udpif->dpif, &stats);
700 flow_count = stats.n_flows;
701 atomic_store_relaxed(&udpif->n_flows, flow_count);
702 ovs_mutex_unlock(&udpif->n_flows_mutex);
703 } else {
704 atomic_read_relaxed(&udpif->n_flows, &flow_count);
705 }
706 return flow_count;
707 }
708
709 /* The upcall handler thread tries to read a batch of UPCALL_MAX_BATCH
710 * upcalls from dpif, processes the batch and installs corresponding flows
711 * in dpif. */
712 static void *
713 udpif_upcall_handler(void *arg)
714 {
715 struct handler *handler = arg;
716 struct udpif *udpif = handler->udpif;
717
718 while (!latch_is_set(&handler->udpif->exit_latch)) {
719 if (recv_upcalls(handler)) {
720 poll_immediate_wake();
721 } else {
722 dpif_recv_wait(udpif->dpif, handler->handler_id);
723 latch_wait(&udpif->exit_latch);
724 }
725 poll_block();
726 }
727
728 return NULL;
729 }
730
731 static size_t
732 recv_upcalls(struct handler *handler)
733 {
734 struct udpif *udpif = handler->udpif;
735 uint64_t recv_stubs[UPCALL_MAX_BATCH][512 / 8];
736 struct ofpbuf recv_bufs[UPCALL_MAX_BATCH];
737 struct dpif_upcall dupcalls[UPCALL_MAX_BATCH];
738 struct upcall upcalls[UPCALL_MAX_BATCH];
739 struct flow flows[UPCALL_MAX_BATCH];
740 size_t n_upcalls, i;
741
742 n_upcalls = 0;
743 while (n_upcalls < UPCALL_MAX_BATCH) {
744 struct ofpbuf *recv_buf = &recv_bufs[n_upcalls];
745 struct dpif_upcall *dupcall = &dupcalls[n_upcalls];
746 struct upcall *upcall = &upcalls[n_upcalls];
747 struct flow *flow = &flows[n_upcalls];
748 unsigned int mru;
749 int error;
750
751 ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
752 sizeof recv_stubs[n_upcalls]);
753 if (dpif_recv(udpif->dpif, handler->handler_id, dupcall, recv_buf)) {
754 ofpbuf_uninit(recv_buf);
755 break;
756 }
757
758 if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, flow)
759 == ODP_FIT_ERROR) {
760 goto free_dupcall;
761 }
762
763 if (dupcall->mru) {
764 mru = nl_attr_get_u16(dupcall->mru);
765 } else {
766 mru = 0;
767 }
768
769 error = upcall_receive(upcall, udpif->backer, &dupcall->packet,
770 dupcall->type, dupcall->userdata, flow, mru,
771 &dupcall->ufid, PMD_ID_NULL);
772 if (error) {
773 if (error == ENODEV) {
774 /* Received packet on datapath port for which we couldn't
775 * associate an ofproto. This can happen if a port is removed
776 * while traffic is being received. Print a rate-limited
777 * message in case it happens frequently. */
778 dpif_flow_put(udpif->dpif, DPIF_FP_CREATE, dupcall->key,
779 dupcall->key_len, NULL, 0, NULL, 0,
780 &dupcall->ufid, PMD_ID_NULL, NULL);
781 VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
782 "port %"PRIu32, flow->in_port.odp_port);
783 }
784 goto free_dupcall;
785 }
786
787 upcall->key = dupcall->key;
788 upcall->key_len = dupcall->key_len;
789 upcall->ufid = &dupcall->ufid;
790
791 upcall->out_tun_key = dupcall->out_tun_key;
792 upcall->actions = dupcall->actions;
793
794 pkt_metadata_from_flow(&dupcall->packet.md, flow);
795 flow_extract(&dupcall->packet, flow);
796
797 error = process_upcall(udpif, upcall,
798 &upcall->odp_actions, &upcall->wc);
799 if (error) {
800 goto cleanup;
801 }
802
803 n_upcalls++;
804 continue;
805
806 cleanup:
807 upcall_uninit(upcall);
808 free_dupcall:
809 dp_packet_uninit(&dupcall->packet);
810 ofpbuf_uninit(recv_buf);
811 }
812
813 if (n_upcalls) {
814 handle_upcalls(handler->udpif, upcalls, n_upcalls);
815 for (i = 0; i < n_upcalls; i++) {
816 dp_packet_uninit(&dupcalls[i].packet);
817 ofpbuf_uninit(&recv_bufs[i]);
818 upcall_uninit(&upcalls[i]);
819 }
820 }
821
822 return n_upcalls;
823 }
824
825 static void *
826 udpif_revalidator(void *arg)
827 {
828 /* Used by all revalidators. */
829 struct revalidator *revalidator = arg;
830 struct udpif *udpif = revalidator->udpif;
831 bool leader = revalidator == &udpif->revalidators[0];
832
833 /* Used only by the leader. */
834 long long int start_time = 0;
835 uint64_t last_reval_seq = 0;
836 size_t n_flows = 0;
837
838 revalidator->id = ovsthread_id_self();
839 for (;;) {
840 if (leader) {
841 uint64_t reval_seq;
842
843 recirc_run(); /* Recirculation cleanup. */
844
845 reval_seq = seq_read(udpif->reval_seq);
846 last_reval_seq = reval_seq;
847
848 n_flows = udpif_get_n_flows(udpif);
849 udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
850 udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
851
852 /* Only the leader checks the pause latch to prevent a race where
853 * some threads think it's false and proceed to block on
854 * reval_barrier and others think it's true and block indefinitely
855 * on the pause_barrier */
856 udpif->pause = latch_is_set(&udpif->pause_latch);
857
858 /* Only the leader checks the exit latch to prevent a race where
859 * some threads think it's true and exit and others think it's
860 * false and block indefinitely on the reval_barrier */
861 udpif->reval_exit = latch_is_set(&udpif->exit_latch);
862
863 start_time = time_msec();
864 if (!udpif->reval_exit) {
865 bool terse_dump;
866
867 terse_dump = udpif_use_ufid(udpif);
868 udpif->dump = dpif_flow_dump_create(udpif->dpif, terse_dump);
869 }
870 }
871
872 /* Wait for the leader to start the flow dump. */
873 ovs_barrier_block(&udpif->reval_barrier);
874 if (udpif->pause) {
875 revalidator_pause(revalidator);
876 }
877
878 if (udpif->reval_exit) {
879 break;
880 }
881 revalidate(revalidator);
882
883 /* Wait for all flows to have been dumped before we garbage collect. */
884 ovs_barrier_block(&udpif->reval_barrier);
885 revalidator_sweep(revalidator);
886
887 /* Wait for all revalidators to finish garbage collection. */
888 ovs_barrier_block(&udpif->reval_barrier);
889
890 if (leader) {
891 unsigned int flow_limit;
892 long long int duration;
893
894 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
895
896 dpif_flow_dump_destroy(udpif->dump);
897 seq_change(udpif->dump_seq);
898
899 duration = MAX(time_msec() - start_time, 1);
900 udpif->dump_duration = duration;
901 if (duration > 2000) {
902 flow_limit /= duration / 1000;
903 } else if (duration > 1300) {
904 flow_limit = flow_limit * 3 / 4;
905 } else if (duration < 1000 && n_flows > 2000
906 && flow_limit < n_flows * 1000 / duration) {
907 flow_limit += 1000;
908 }
909 flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
910 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
911
912 if (duration > 2000) {
913 VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
914 duration);
915 }
916
917 poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
918 seq_wait(udpif->reval_seq, last_reval_seq);
919 latch_wait(&udpif->exit_latch);
920 latch_wait(&udpif->pause_latch);
921 poll_block();
922 }
923 }
924
925 return NULL;
926 }
927 \f
928 static enum upcall_type
929 classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata)
930 {
931 union user_action_cookie cookie;
932 size_t userdata_len;
933
934 /* First look at the upcall type. */
935 switch (type) {
936 case DPIF_UC_ACTION:
937 break;
938
939 case DPIF_UC_MISS:
940 return MISS_UPCALL;
941
942 case DPIF_N_UC_TYPES:
943 default:
944 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, type);
945 return BAD_UPCALL;
946 }
947
948 /* "action" upcalls need a closer look. */
949 if (!userdata) {
950 VLOG_WARN_RL(&rl, "action upcall missing cookie");
951 return BAD_UPCALL;
952 }
953 userdata_len = nl_attr_get_size(userdata);
954 if (userdata_len < sizeof cookie.type
955 || userdata_len > sizeof cookie) {
956 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
957 userdata_len);
958 return BAD_UPCALL;
959 }
960 memset(&cookie, 0, sizeof cookie);
961 memcpy(&cookie, nl_attr_get(userdata), userdata_len);
962 if (userdata_len == MAX(8, sizeof cookie.sflow)
963 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
964 return SFLOW_UPCALL;
965 } else if (userdata_len == MAX(8, sizeof cookie.slow_path)
966 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
967 return MISS_UPCALL;
968 } else if (userdata_len == MAX(8, sizeof cookie.flow_sample)
969 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
970 return FLOW_SAMPLE_UPCALL;
971 } else if (userdata_len == MAX(8, sizeof cookie.ipfix)
972 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
973 return IPFIX_UPCALL;
974 } else {
975 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
976 " and size %"PRIuSIZE, cookie.type, userdata_len);
977 return BAD_UPCALL;
978 }
979 }
980
981 /* Calculates slow path actions for 'xout'. 'buf' must statically be
982 * initialized with at least 128 bytes of space. */
983 static void
984 compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
985 const struct flow *flow, odp_port_t odp_in_port,
986 struct ofpbuf *buf)
987 {
988 union user_action_cookie cookie;
989 odp_port_t port;
990 uint32_t pid;
991
992 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
993 cookie.slow_path.unused = 0;
994 cookie.slow_path.reason = xout->slow;
995
996 port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
997 ? ODPP_NONE
998 : odp_in_port;
999 pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
1000 odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path,
1001 ODPP_NONE, false, buf);
1002 }
1003
1004 /* If there is no error, the upcall must be destroyed with upcall_uninit()
1005 * before quiescing, as the referred objects are guaranteed to exist only
1006 * until the calling thread quiesces. Otherwise, do not call upcall_uninit()
1007 * since the 'upcall->put_actions' remains uninitialized. */
1008 static int
1009 upcall_receive(struct upcall *upcall, const struct dpif_backer *backer,
1010 const struct dp_packet *packet, enum dpif_upcall_type type,
1011 const struct nlattr *userdata, const struct flow *flow,
1012 const unsigned int mru,
1013 const ovs_u128 *ufid, const unsigned pmd_id)
1014 {
1015 int error;
1016
1017 error = xlate_lookup(backer, flow, &upcall->ofproto, &upcall->ipfix,
1018 &upcall->sflow, NULL, &upcall->in_port);
1019 if (error) {
1020 return error;
1021 }
1022
1023 upcall->recirc = NULL;
1024 upcall->have_recirc_ref = false;
1025 upcall->flow = flow;
1026 upcall->packet = packet;
1027 upcall->ufid = ufid;
1028 upcall->pmd_id = pmd_id;
1029 upcall->type = type;
1030 upcall->userdata = userdata;
1031 ofpbuf_use_stub(&upcall->odp_actions, upcall->odp_actions_stub,
1032 sizeof upcall->odp_actions_stub);
1033 ofpbuf_init(&upcall->put_actions, 0);
1034
1035 upcall->xout_initialized = false;
1036 upcall->ukey_persists = false;
1037
1038 upcall->ukey = NULL;
1039 upcall->key = NULL;
1040 upcall->key_len = 0;
1041 upcall->mru = mru;
1042
1043 upcall->out_tun_key = NULL;
1044 upcall->actions = NULL;
1045
1046 return 0;
1047 }
1048
1049 static void
1050 upcall_xlate(struct udpif *udpif, struct upcall *upcall,
1051 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
1052 {
1053 struct dpif_flow_stats stats;
1054 struct xlate_in xin;
1055
1056 stats.n_packets = 1;
1057 stats.n_bytes = dp_packet_size(upcall->packet);
1058 stats.used = time_msec();
1059 stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
1060
1061 xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
1062 stats.tcp_flags, upcall->packet, wc, odp_actions);
1063
1064 if (upcall->type == DPIF_UC_MISS) {
1065 xin.resubmit_stats = &stats;
1066
1067 if (xin.frozen_state) {
1068 /* We may install a datapath flow only if we get a reference to the
1069 * recirculation context (otherwise we could have recirculation
1070 * upcalls using recirculation ID for which no context can be
1071 * found). We may still execute the flow's actions even if we
1072 * don't install the flow. */
1073 upcall->recirc = recirc_id_node_from_state(xin.frozen_state);
1074 upcall->have_recirc_ref = recirc_id_node_try_ref_rcu(upcall->recirc);
1075 }
1076 } else {
1077 /* For non-miss upcalls, we are either executing actions (one of which
1078 * is an userspace action) for an upcall, in which case the stats have
1079 * already been taken care of, or there's a flow in the datapath which
1080 * this packet was accounted to. Presumably the revalidators will deal
1081 * with pushing its stats eventually. */
1082 }
1083
1084 upcall->dump_seq = seq_read(udpif->dump_seq);
1085 upcall->reval_seq = seq_read(udpif->reval_seq);
1086
1087 xlate_actions(&xin, &upcall->xout);
1088 if (wc) {
1089 /* Convert the input port wildcard from OFP to ODP format. There's no
1090 * real way to do this for arbitrary bitmasks since the numbering spaces
1091 * aren't the same. However, flow translation always exact matches the
1092 * whole thing, so we can do the same here. */
1093 WC_MASK_FIELD(wc, in_port.odp_port);
1094 }
1095
1096 upcall->xout_initialized = true;
1097
1098 if (!upcall->xout.slow) {
1099 ofpbuf_use_const(&upcall->put_actions,
1100 odp_actions->data, odp_actions->size);
1101 } else {
1102 /* upcall->put_actions already initialized by upcall_receive(). */
1103 compose_slow_path(udpif, &upcall->xout, upcall->flow,
1104 upcall->flow->in_port.odp_port,
1105 &upcall->put_actions);
1106 }
1107
1108 /* This function is also called for slow-pathed flows. As we are only
1109 * going to create new datapath flows for actual datapath misses, there is
1110 * no point in creating a ukey otherwise. */
1111 if (upcall->type == DPIF_UC_MISS) {
1112 upcall->ukey = ukey_create_from_upcall(upcall, wc);
1113 }
1114 }
1115
1116 static void
1117 upcall_uninit(struct upcall *upcall)
1118 {
1119 if (upcall) {
1120 if (upcall->xout_initialized) {
1121 xlate_out_uninit(&upcall->xout);
1122 }
1123 ofpbuf_uninit(&upcall->odp_actions);
1124 ofpbuf_uninit(&upcall->put_actions);
1125 if (upcall->ukey) {
1126 if (!upcall->ukey_persists) {
1127 ukey_delete__(upcall->ukey);
1128 }
1129 } else if (upcall->have_recirc_ref) {
1130 /* The reference was transferred to the ukey if one was created. */
1131 recirc_id_node_unref(upcall->recirc);
1132 }
1133 }
1134 }
1135
1136 /* If there are less flows than the limit, and this is a miss upcall which
1137 *
1138 * - Has no recirc_id, OR
1139 * - Has a recirc_id and we can get a reference on the recirc ctx,
1140 *
1141 * Then we should install the flow (true). Otherwise, return false. */
1142 static bool
1143 should_install_flow(struct udpif *udpif, struct upcall *upcall)
1144 {
1145 unsigned int flow_limit;
1146
1147 if (upcall->type != DPIF_UC_MISS) {
1148 return false;
1149 } else if (upcall->recirc && !upcall->have_recirc_ref) {
1150 VLOG_WARN_RL(&rl, "upcall: no reference for recirc flow");
1151 return false;
1152 }
1153
1154 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1155 if (udpif_get_n_flows(udpif) >= flow_limit) {
1156 VLOG_WARN_RL(&rl, "upcall: datapath flow limit reached");
1157 return false;
1158 }
1159
1160 return true;
1161 }
1162
1163 static int
1164 upcall_cb(const struct dp_packet *packet, const struct flow *flow, ovs_u128 *ufid,
1165 unsigned pmd_id, enum dpif_upcall_type type,
1166 const struct nlattr *userdata, struct ofpbuf *actions,
1167 struct flow_wildcards *wc, struct ofpbuf *put_actions, void *aux)
1168 {
1169 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
1170 struct udpif *udpif = aux;
1171 struct upcall upcall;
1172 bool megaflow;
1173 int error;
1174
1175 atomic_read_relaxed(&enable_megaflows, &megaflow);
1176
1177 error = upcall_receive(&upcall, udpif->backer, packet, type, userdata,
1178 flow, 0, ufid, pmd_id);
1179 if (error) {
1180 return error;
1181 }
1182
1183 error = process_upcall(udpif, &upcall, actions, wc);
1184 if (error) {
1185 goto out;
1186 }
1187
1188 if (upcall.xout.slow && put_actions) {
1189 ofpbuf_put(put_actions, upcall.put_actions.data,
1190 upcall.put_actions.size);
1191 }
1192
1193 if (OVS_UNLIKELY(!megaflow)) {
1194 flow_wildcards_init_for_packet(wc, flow);
1195 }
1196
1197 if (!should_install_flow(udpif, &upcall)) {
1198 error = ENOSPC;
1199 goto out;
1200 }
1201
1202 if (upcall.ukey && !ukey_install(udpif, upcall.ukey)) {
1203 VLOG_WARN_RL(&rl, "upcall_cb failure: ukey installation fails");
1204 error = ENOSPC;
1205 }
1206 out:
1207 if (!error) {
1208 upcall.ukey_persists = true;
1209 }
1210 upcall_uninit(&upcall);
1211 return error;
1212 }
1213
1214 static int
1215 process_upcall(struct udpif *udpif, struct upcall *upcall,
1216 struct ofpbuf *odp_actions, struct flow_wildcards *wc)
1217 {
1218 const struct nlattr *userdata = upcall->userdata;
1219 const struct dp_packet *packet = upcall->packet;
1220 const struct flow *flow = upcall->flow;
1221
1222 switch (classify_upcall(upcall->type, userdata)) {
1223 case MISS_UPCALL:
1224 upcall_xlate(udpif, upcall, odp_actions, wc);
1225 return 0;
1226
1227 case SFLOW_UPCALL:
1228 if (upcall->sflow) {
1229 union user_action_cookie cookie;
1230 const struct nlattr *actions;
1231 size_t actions_len = 0;
1232 struct dpif_sflow_actions sflow_actions;
1233 memset(&sflow_actions, 0, sizeof sflow_actions);
1234 memset(&cookie, 0, sizeof cookie);
1235 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow);
1236 if (upcall->actions) {
1237 /* Actions were passed up from datapath. */
1238 actions = nl_attr_get(upcall->actions);
1239 actions_len = nl_attr_get_size(upcall->actions);
1240 if (actions && actions_len) {
1241 dpif_sflow_read_actions(flow, actions, actions_len,
1242 &sflow_actions);
1243 }
1244 }
1245 if (actions_len == 0) {
1246 /* Lookup actions in userspace cache. */
1247 struct udpif_key *ukey = ukey_lookup(udpif, upcall->ufid,
1248 upcall->pmd_id);
1249 if (ukey) {
1250 ukey_get_actions(ukey, &actions, &actions_len);
1251 dpif_sflow_read_actions(flow, actions, actions_len,
1252 &sflow_actions);
1253 }
1254 }
1255 dpif_sflow_received(upcall->sflow, packet, flow,
1256 flow->in_port.odp_port, &cookie,
1257 actions_len > 0 ? &sflow_actions : NULL);
1258 }
1259 break;
1260
1261 case IPFIX_UPCALL:
1262 if (upcall->ipfix) {
1263 union user_action_cookie cookie;
1264 struct flow_tnl output_tunnel_key;
1265
1266 memset(&cookie, 0, sizeof cookie);
1267 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.ipfix);
1268
1269 if (upcall->out_tun_key) {
1270 odp_tun_key_from_attr(upcall->out_tun_key, false,
1271 &output_tunnel_key);
1272 }
1273 dpif_ipfix_bridge_sample(upcall->ipfix, packet, flow,
1274 flow->in_port.odp_port,
1275 cookie.ipfix.output_odp_port,
1276 upcall->out_tun_key ?
1277 &output_tunnel_key : NULL);
1278 }
1279 break;
1280
1281 case FLOW_SAMPLE_UPCALL:
1282 if (upcall->ipfix) {
1283 union user_action_cookie cookie;
1284 struct flow_tnl output_tunnel_key;
1285
1286 memset(&cookie, 0, sizeof cookie);
1287 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.flow_sample);
1288
1289 if (upcall->out_tun_key) {
1290 odp_tun_key_from_attr(upcall->out_tun_key, false,
1291 &output_tunnel_key);
1292 }
1293
1294 /* The flow reflects exactly the contents of the packet.
1295 * Sample the packet using it. */
1296 dpif_ipfix_flow_sample(upcall->ipfix, packet, flow,
1297 &cookie, flow->in_port.odp_port,
1298 upcall->out_tun_key ?
1299 &output_tunnel_key : NULL);
1300 }
1301 break;
1302
1303 case BAD_UPCALL:
1304 break;
1305 }
1306
1307 return EAGAIN;
1308 }
1309
1310 static void
1311 handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
1312 size_t n_upcalls)
1313 {
1314 struct dpif_op *opsp[UPCALL_MAX_BATCH * 2];
1315 struct ukey_op ops[UPCALL_MAX_BATCH * 2];
1316 size_t n_ops, n_opsp, i;
1317
1318 /* Handle the packets individually in order of arrival.
1319 *
1320 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
1321 * processes received packets for these protocols.
1322 *
1323 * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
1324 * controller.
1325 *
1326 * The loop fills 'ops' with an array of operations to execute in the
1327 * datapath. */
1328 n_ops = 0;
1329 for (i = 0; i < n_upcalls; i++) {
1330 struct upcall *upcall = &upcalls[i];
1331 const struct dp_packet *packet = upcall->packet;
1332 struct ukey_op *op;
1333
1334 if (should_install_flow(udpif, upcall)) {
1335 struct udpif_key *ukey = upcall->ukey;
1336
1337 upcall->ukey_persists = true;
1338 op = &ops[n_ops++];
1339
1340 op->ukey = ukey;
1341 op->dop.type = DPIF_OP_FLOW_PUT;
1342 op->dop.u.flow_put.flags = DPIF_FP_CREATE;
1343 op->dop.u.flow_put.key = ukey->key;
1344 op->dop.u.flow_put.key_len = ukey->key_len;
1345 op->dop.u.flow_put.mask = ukey->mask;
1346 op->dop.u.flow_put.mask_len = ukey->mask_len;
1347 op->dop.u.flow_put.ufid = upcall->ufid;
1348 op->dop.u.flow_put.stats = NULL;
1349 ukey_get_actions(ukey, &op->dop.u.flow_put.actions,
1350 &op->dop.u.flow_put.actions_len);
1351 }
1352
1353 if (upcall->odp_actions.size) {
1354 op = &ops[n_ops++];
1355 op->ukey = NULL;
1356 op->dop.type = DPIF_OP_EXECUTE;
1357 op->dop.u.execute.packet = CONST_CAST(struct dp_packet *, packet);
1358 op->dop.u.execute.flow = upcall->flow;
1359 odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
1360 &op->dop.u.execute.packet->md);
1361 op->dop.u.execute.actions = upcall->odp_actions.data;
1362 op->dop.u.execute.actions_len = upcall->odp_actions.size;
1363 op->dop.u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
1364 op->dop.u.execute.probe = false;
1365 op->dop.u.execute.mtu = upcall->mru;
1366 }
1367 }
1368
1369 /* Execute batch.
1370 *
1371 * We install ukeys before installing the flows, locking them for exclusive
1372 * access by this thread for the period of installation. This ensures that
1373 * other threads won't attempt to delete the flows as we are creating them.
1374 */
1375 n_opsp = 0;
1376 for (i = 0; i < n_ops; i++) {
1377 struct udpif_key *ukey = ops[i].ukey;
1378
1379 if (ukey) {
1380 /* If we can't install the ukey, don't install the flow. */
1381 if (!ukey_install_start(udpif, ukey)) {
1382 ukey_delete__(ukey);
1383 ops[i].ukey = NULL;
1384 continue;
1385 }
1386 }
1387 opsp[n_opsp++] = &ops[i].dop;
1388 }
1389 dpif_operate(udpif->dpif, opsp, n_opsp);
1390 for (i = 0; i < n_ops; i++) {
1391 if (ops[i].ukey) {
1392 ukey_install_finish(ops[i].ukey, ops[i].dop.error);
1393 }
1394 }
1395 }
1396
1397 static uint32_t
1398 get_ukey_hash(const ovs_u128 *ufid, const unsigned pmd_id)
1399 {
1400 return hash_2words(ufid->u32[0], pmd_id);
1401 }
1402
1403 static struct udpif_key *
1404 ukey_lookup(struct udpif *udpif, const ovs_u128 *ufid, const unsigned pmd_id)
1405 {
1406 struct udpif_key *ukey;
1407 int idx = get_ukey_hash(ufid, pmd_id) % N_UMAPS;
1408 struct cmap *cmap = &udpif->ukeys[idx].cmap;
1409
1410 CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node,
1411 get_ukey_hash(ufid, pmd_id), cmap) {
1412 if (ovs_u128_equals(ukey->ufid, *ufid)) {
1413 return ukey;
1414 }
1415 }
1416 return NULL;
1417 }
1418
1419 /* Provides safe lockless access of RCU protected 'ukey->actions'. Callers may
1420 * alternatively access the field directly if they take 'ukey->mutex'. */
1421 static void
1422 ukey_get_actions(struct udpif_key *ukey, const struct nlattr **actions, size_t *size)
1423 {
1424 const struct ofpbuf *buf = ovsrcu_get(struct ofpbuf *, &ukey->actions);
1425 *actions = buf->data;
1426 *size = buf->size;
1427 }
1428
1429 static void
1430 ukey_set_actions(struct udpif_key *ukey, const struct ofpbuf *actions)
1431 {
1432 ovsrcu_postpone(ofpbuf_delete,
1433 ovsrcu_get_protected(struct ofpbuf *, &ukey->actions));
1434 ovsrcu_set(&ukey->actions, ofpbuf_clone(actions));
1435 }
1436
1437 static struct udpif_key *
1438 ukey_create__(const struct nlattr *key, size_t key_len,
1439 const struct nlattr *mask, size_t mask_len,
1440 bool ufid_present, const ovs_u128 *ufid,
1441 const unsigned pmd_id, const struct ofpbuf *actions,
1442 uint64_t dump_seq, uint64_t reval_seq, long long int used,
1443 uint32_t key_recirc_id, struct xlate_out *xout)
1444 OVS_NO_THREAD_SAFETY_ANALYSIS
1445 {
1446 struct udpif_key *ukey = xmalloc(sizeof *ukey);
1447
1448 memcpy(&ukey->keybuf, key, key_len);
1449 ukey->key = &ukey->keybuf.nla;
1450 ukey->key_len = key_len;
1451 memcpy(&ukey->maskbuf, mask, mask_len);
1452 ukey->mask = &ukey->maskbuf.nla;
1453 ukey->mask_len = mask_len;
1454 ukey->ufid_present = ufid_present;
1455 ukey->ufid = *ufid;
1456 ukey->pmd_id = pmd_id;
1457 ukey->hash = get_ukey_hash(&ukey->ufid, pmd_id);
1458
1459 ovsrcu_init(&ukey->actions, NULL);
1460 ukey_set_actions(ukey, actions);
1461
1462 ovs_mutex_init(&ukey->mutex);
1463 ukey->dump_seq = dump_seq;
1464 ukey->reval_seq = reval_seq;
1465 ukey->flow_exists = false;
1466 ukey->created = time_msec();
1467 memset(&ukey->stats, 0, sizeof ukey->stats);
1468 ukey->stats.used = used;
1469 ukey->xcache = NULL;
1470
1471 ukey->key_recirc_id = key_recirc_id;
1472 recirc_refs_init(&ukey->recircs);
1473 if (xout) {
1474 /* Take ownership of the action recirc id references. */
1475 recirc_refs_swap(&ukey->recircs, &xout->recircs);
1476 }
1477
1478 return ukey;
1479 }
1480
1481 static struct udpif_key *
1482 ukey_create_from_upcall(struct upcall *upcall, struct flow_wildcards *wc)
1483 {
1484 struct odputil_keybuf keystub, maskstub;
1485 struct ofpbuf keybuf, maskbuf;
1486 bool megaflow;
1487 struct odp_flow_key_parms odp_parms = {
1488 .flow = upcall->flow,
1489 .mask = &wc->masks,
1490 };
1491
1492 odp_parms.support = ofproto_dpif_get_support(upcall->ofproto)->odp;
1493 if (upcall->key_len) {
1494 ofpbuf_use_const(&keybuf, upcall->key, upcall->key_len);
1495 } else {
1496 /* dpif-netdev doesn't provide a netlink-formatted flow key in the
1497 * upcall, so convert the upcall's flow here. */
1498 ofpbuf_use_stack(&keybuf, &keystub, sizeof keystub);
1499 odp_flow_key_from_flow(&odp_parms, &keybuf);
1500 }
1501
1502 atomic_read_relaxed(&enable_megaflows, &megaflow);
1503 ofpbuf_use_stack(&maskbuf, &maskstub, sizeof maskstub);
1504 if (megaflow) {
1505 odp_parms.key_buf = &keybuf;
1506 odp_flow_key_from_mask(&odp_parms, &maskbuf);
1507 }
1508
1509 return ukey_create__(keybuf.data, keybuf.size, maskbuf.data, maskbuf.size,
1510 true, upcall->ufid, upcall->pmd_id,
1511 &upcall->put_actions, upcall->dump_seq,
1512 upcall->reval_seq, 0,
1513 upcall->have_recirc_ref ? upcall->recirc->id : 0,
1514 &upcall->xout);
1515 }
1516
1517 static int
1518 ukey_create_from_dpif_flow(const struct udpif *udpif,
1519 const struct dpif_flow *flow,
1520 struct udpif_key **ukey)
1521 {
1522 struct dpif_flow full_flow;
1523 struct ofpbuf actions;
1524 uint64_t dump_seq, reval_seq;
1525 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
1526 const struct nlattr *a;
1527 unsigned int left;
1528
1529 if (!flow->key_len || !flow->actions_len) {
1530 struct ofpbuf buf;
1531 int err;
1532
1533 /* If the key or actions were not provided by the datapath, fetch the
1534 * full flow. */
1535 ofpbuf_use_stack(&buf, &stub, sizeof stub);
1536 err = dpif_flow_get(udpif->dpif, flow->key, flow->key_len,
1537 flow->ufid_present ? &flow->ufid : NULL,
1538 flow->pmd_id, &buf, &full_flow);
1539 if (err) {
1540 return err;
1541 }
1542 flow = &full_flow;
1543 }
1544
1545 /* Check the flow actions for recirculation action. As recirculation
1546 * relies on OVS userspace internal state, we need to delete all old
1547 * datapath flows with either a non-zero recirc_id in the key, or any
1548 * recirculation actions upon OVS restart. */
1549 NL_ATTR_FOR_EACH_UNSAFE (a, left, flow->key, flow->key_len) {
1550 if (nl_attr_type(a) == OVS_KEY_ATTR_RECIRC_ID
1551 && nl_attr_get_u32(a) != 0) {
1552 return EINVAL;
1553 }
1554 }
1555 NL_ATTR_FOR_EACH_UNSAFE (a, left, flow->actions, flow->actions_len) {
1556 if (nl_attr_type(a) == OVS_ACTION_ATTR_RECIRC) {
1557 return EINVAL;
1558 }
1559 }
1560
1561 dump_seq = seq_read(udpif->dump_seq);
1562 reval_seq = seq_read(udpif->reval_seq);
1563 ofpbuf_use_const(&actions, &flow->actions, flow->actions_len);
1564 *ukey = ukey_create__(flow->key, flow->key_len,
1565 flow->mask, flow->mask_len, flow->ufid_present,
1566 &flow->ufid, flow->pmd_id, &actions, dump_seq,
1567 reval_seq, flow->stats.used, 0, NULL);
1568
1569 return 0;
1570 }
1571
1572 /* Attempts to insert a ukey into the shared ukey maps.
1573 *
1574 * On success, returns true, installs the ukey and returns it in a locked
1575 * state. Otherwise, returns false. */
1576 static bool
1577 ukey_install_start(struct udpif *udpif, struct udpif_key *new_ukey)
1578 OVS_TRY_LOCK(true, new_ukey->mutex)
1579 {
1580 struct umap *umap;
1581 struct udpif_key *old_ukey;
1582 uint32_t idx;
1583 bool locked = false;
1584
1585 idx = new_ukey->hash % N_UMAPS;
1586 umap = &udpif->ukeys[idx];
1587 ovs_mutex_lock(&umap->mutex);
1588 old_ukey = ukey_lookup(udpif, &new_ukey->ufid, new_ukey->pmd_id);
1589 if (old_ukey) {
1590 /* Uncommon case: A ukey is already installed with the same UFID. */
1591 if (old_ukey->key_len == new_ukey->key_len
1592 && !memcmp(old_ukey->key, new_ukey->key, new_ukey->key_len)) {
1593 COVERAGE_INC(handler_duplicate_upcall);
1594 } else {
1595 struct ds ds = DS_EMPTY_INITIALIZER;
1596
1597 odp_format_ufid(&old_ukey->ufid, &ds);
1598 ds_put_cstr(&ds, " ");
1599 odp_flow_key_format(old_ukey->key, old_ukey->key_len, &ds);
1600 ds_put_cstr(&ds, "\n");
1601 odp_format_ufid(&new_ukey->ufid, &ds);
1602 ds_put_cstr(&ds, " ");
1603 odp_flow_key_format(new_ukey->key, new_ukey->key_len, &ds);
1604
1605 VLOG_WARN_RL(&rl, "Conflicting ukey for flows:\n%s", ds_cstr(&ds));
1606 ds_destroy(&ds);
1607 }
1608 } else {
1609 ovs_mutex_lock(&new_ukey->mutex);
1610 cmap_insert(&umap->cmap, &new_ukey->cmap_node, new_ukey->hash);
1611 locked = true;
1612 }
1613 ovs_mutex_unlock(&umap->mutex);
1614
1615 return locked;
1616 }
1617
1618 static void
1619 ukey_install_finish__(struct udpif_key *ukey) OVS_REQUIRES(ukey->mutex)
1620 {
1621 ukey->flow_exists = true;
1622 }
1623
1624 static bool
1625 ukey_install_finish(struct udpif_key *ukey, int error)
1626 OVS_RELEASES(ukey->mutex)
1627 {
1628 if (!error) {
1629 ukey_install_finish__(ukey);
1630 }
1631 ovs_mutex_unlock(&ukey->mutex);
1632
1633 return !error;
1634 }
1635
1636 static bool
1637 ukey_install(struct udpif *udpif, struct udpif_key *ukey)
1638 {
1639 /* The usual way to keep 'ukey->flow_exists' in sync with the datapath is
1640 * to call ukey_install_start(), install the corresponding datapath flow,
1641 * then call ukey_install_finish(). The netdev interface using upcall_cb()
1642 * doesn't provide a function to separately finish the flow installation,
1643 * so we perform the operations together here.
1644 *
1645 * This is fine currently, as revalidator threads will only delete this
1646 * ukey during revalidator_sweep() and only if the dump_seq is mismatched.
1647 * It is unlikely for a revalidator thread to advance dump_seq and reach
1648 * the next GC phase between ukey creation and flow installation. */
1649 return ukey_install_start(udpif, ukey) && ukey_install_finish(ukey, 0);
1650 }
1651
1652 /* Searches for a ukey in 'udpif->ukeys' that matches 'flow' and attempts to
1653 * lock the ukey. If the ukey does not exist, create it.
1654 *
1655 * Returns 0 on success, setting *result to the matching ukey and returning it
1656 * in a locked state. Otherwise, returns an errno and clears *result. EBUSY
1657 * indicates that another thread is handling this flow. Other errors indicate
1658 * an unexpected condition creating a new ukey.
1659 *
1660 * *error is an output parameter provided to appease the threadsafety analyser,
1661 * and its value matches the return value. */
1662 static int
1663 ukey_acquire(struct udpif *udpif, const struct dpif_flow *flow,
1664 struct udpif_key **result, int *error)
1665 OVS_TRY_LOCK(0, (*result)->mutex)
1666 {
1667 struct udpif_key *ukey;
1668 int retval;
1669
1670 ukey = ukey_lookup(udpif, &flow->ufid, flow->pmd_id);
1671 if (ukey) {
1672 retval = ovs_mutex_trylock(&ukey->mutex);
1673 } else {
1674 /* Usually we try to avoid installing flows from revalidator threads,
1675 * because locking on a umap may cause handler threads to block.
1676 * However there are certain cases, like when ovs-vswitchd is
1677 * restarted, where it is desirable to handle flows that exist in the
1678 * datapath gracefully (ie, don't just clear the datapath). */
1679 bool install;
1680
1681 retval = ukey_create_from_dpif_flow(udpif, flow, &ukey);
1682 if (retval) {
1683 goto done;
1684 }
1685 install = ukey_install_start(udpif, ukey);
1686 if (install) {
1687 ukey_install_finish__(ukey);
1688 retval = 0;
1689 } else {
1690 ukey_delete__(ukey);
1691 retval = EBUSY;
1692 }
1693 }
1694
1695 done:
1696 *error = retval;
1697 if (retval) {
1698 *result = NULL;
1699 } else {
1700 *result = ukey;
1701 }
1702 return retval;
1703 }
1704
1705 static void
1706 ukey_delete__(struct udpif_key *ukey)
1707 OVS_NO_THREAD_SAFETY_ANALYSIS
1708 {
1709 if (ukey) {
1710 if (ukey->key_recirc_id) {
1711 recirc_free_id(ukey->key_recirc_id);
1712 }
1713 recirc_refs_unref(&ukey->recircs);
1714 xlate_cache_delete(ukey->xcache);
1715 ofpbuf_delete(ovsrcu_get(struct ofpbuf *, &ukey->actions));
1716 ovs_mutex_destroy(&ukey->mutex);
1717 free(ukey);
1718 }
1719 }
1720
1721 static void
1722 ukey_delete(struct umap *umap, struct udpif_key *ukey)
1723 OVS_REQUIRES(umap->mutex)
1724 {
1725 cmap_remove(&umap->cmap, &ukey->cmap_node, ukey->hash);
1726 ovsrcu_postpone(ukey_delete__, ukey);
1727 }
1728
1729 static bool
1730 should_revalidate(const struct udpif *udpif, uint64_t packets,
1731 long long int used)
1732 {
1733 long long int metric, now, duration;
1734
1735 if (udpif->dump_duration < 200) {
1736 /* We are likely to handle full revalidation for the flows. */
1737 return true;
1738 }
1739
1740 /* Calculate the mean time between seeing these packets. If this
1741 * exceeds the threshold, then delete the flow rather than performing
1742 * costly revalidation for flows that aren't being hit frequently.
1743 *
1744 * This is targeted at situations where the dump_duration is high (~1s),
1745 * and revalidation is triggered by a call to udpif_revalidate(). In
1746 * these situations, revalidation of all flows causes fluctuations in the
1747 * flow_limit due to the interaction with the dump_duration and max_idle.
1748 * This tends to result in deletion of low-throughput flows anyway, so
1749 * skip the revalidation and just delete those flows. */
1750 packets = MAX(packets, 1);
1751 now = MAX(used, time_msec());
1752 duration = now - used;
1753 metric = duration / packets;
1754
1755 if (metric < 200) {
1756 /* The flow is receiving more than ~5pps, so keep it. */
1757 return true;
1758 }
1759 return false;
1760 }
1761
1762 /* Verifies that the datapath actions of 'ukey' are still correct, and pushes
1763 * 'stats' for it.
1764 *
1765 * Returns a recommended action for 'ukey', options include:
1766 * UKEY_DELETE The ukey should be deleted.
1767 * UKEY_KEEP The ukey is fine as is.
1768 * UKEY_MODIFY The ukey's actions should be changed but is otherwise
1769 * fine. Callers should change the actions to those found
1770 * in the caller supplied 'odp_actions' buffer. The
1771 * recirculation references can be found in 'recircs' and
1772 * must be handled by the caller.
1773 *
1774 * If the result is UKEY_MODIFY, then references to all recirc_ids used by the
1775 * new flow will be held within 'recircs' (which may be none).
1776 *
1777 * The caller is responsible for both initializing 'recircs' prior this call,
1778 * and ensuring any references are eventually freed.
1779 */
1780 static enum reval_result
1781 revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
1782 const struct dpif_flow_stats *stats,
1783 struct ofpbuf *odp_actions, uint64_t reval_seq,
1784 struct recirc_refs *recircs)
1785 OVS_REQUIRES(ukey->mutex)
1786 {
1787 struct xlate_out xout, *xoutp;
1788 struct netflow *netflow;
1789 struct ofproto_dpif *ofproto;
1790 struct dpif_flow_stats push;
1791 struct flow flow;
1792 struct flow_wildcards dp_mask, wc;
1793 enum reval_result result;
1794 ofp_port_t ofp_in_port;
1795 struct xlate_in xin;
1796 long long int last_used;
1797 int error;
1798 bool need_revalidate;
1799
1800 result = UKEY_DELETE;
1801 xoutp = NULL;
1802 netflow = NULL;
1803
1804 ofpbuf_clear(odp_actions);
1805 need_revalidate = (ukey->reval_seq != reval_seq);
1806 last_used = ukey->stats.used;
1807 push.used = stats->used;
1808 push.tcp_flags = stats->tcp_flags;
1809 push.n_packets = (stats->n_packets > ukey->stats.n_packets
1810 ? stats->n_packets - ukey->stats.n_packets
1811 : 0);
1812 push.n_bytes = (stats->n_bytes > ukey->stats.n_bytes
1813 ? stats->n_bytes - ukey->stats.n_bytes
1814 : 0);
1815
1816 if (need_revalidate && last_used
1817 && !should_revalidate(udpif, push.n_packets, last_used)) {
1818 goto exit;
1819 }
1820
1821 /* We will push the stats, so update the ukey stats cache. */
1822 ukey->stats = *stats;
1823 if (!push.n_packets && !need_revalidate) {
1824 result = UKEY_KEEP;
1825 goto exit;
1826 }
1827
1828 if (ukey->xcache && !need_revalidate) {
1829 xlate_push_stats(ukey->xcache, &push);
1830 result = UKEY_KEEP;
1831 goto exit;
1832 }
1833
1834 if (odp_flow_key_to_flow(ukey->key, ukey->key_len, &flow)
1835 == ODP_FIT_ERROR) {
1836 goto exit;
1837 }
1838
1839 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL, &netflow,
1840 &ofp_in_port);
1841 if (error) {
1842 goto exit;
1843 }
1844
1845 if (need_revalidate) {
1846 xlate_cache_clear(ukey->xcache);
1847 }
1848 if (!ukey->xcache) {
1849 ukey->xcache = xlate_cache_new();
1850 }
1851
1852 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
1853 NULL, need_revalidate ? &wc : NULL, odp_actions);
1854 if (push.n_packets) {
1855 xin.resubmit_stats = &push;
1856 xin.may_learn = true;
1857 }
1858 xin.xcache = ukey->xcache;
1859 xlate_actions(&xin, &xout);
1860 xoutp = &xout;
1861
1862 if (!need_revalidate) {
1863 result = UKEY_KEEP;
1864 goto exit;
1865 }
1866
1867 if (xout.slow) {
1868 ofpbuf_clear(odp_actions);
1869 compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
1870 odp_actions);
1871 }
1872
1873 if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, ukey->key,
1874 ukey->key_len, &dp_mask, &flow)
1875 == ODP_FIT_ERROR) {
1876 goto exit;
1877 }
1878
1879 /* Do not modify if any bit is wildcarded by the installed datapath flow,
1880 * but not the newly revalidated wildcard mask (wc), i.e., if revalidation
1881 * tells that the datapath flow is now too generic and must be narrowed
1882 * down. Note that we do not know if the datapath has ignored any of the
1883 * wildcarded bits, so we may be overtly conservative here. */
1884 if (flow_wildcards_has_extra(&dp_mask, &wc)) {
1885 goto exit;
1886 }
1887
1888 if (!ofpbuf_equal(odp_actions,
1889 ovsrcu_get(struct ofpbuf *, &ukey->actions))) {
1890 /* The datapath mask was OK, but the actions seem to have changed.
1891 * Let's modify it in place. */
1892 result = UKEY_MODIFY;
1893 /* Transfer recirc action ID references to the caller. */
1894 recirc_refs_swap(recircs, &xoutp->recircs);
1895 goto exit;
1896 }
1897
1898 result = UKEY_KEEP;
1899
1900 exit:
1901 if (result != UKEY_DELETE) {
1902 ukey->reval_seq = reval_seq;
1903 }
1904 if (netflow && result == UKEY_DELETE) {
1905 netflow_flow_clear(netflow, &flow);
1906 }
1907 xlate_out_uninit(xoutp);
1908 return result;
1909 }
1910
1911 static void
1912 delete_op_init__(struct udpif *udpif, struct ukey_op *op,
1913 const struct dpif_flow *flow)
1914 {
1915 op->ukey = NULL;
1916 op->dop.type = DPIF_OP_FLOW_DEL;
1917 op->dop.u.flow_del.key = flow->key;
1918 op->dop.u.flow_del.key_len = flow->key_len;
1919 op->dop.u.flow_del.ufid = flow->ufid_present ? &flow->ufid : NULL;
1920 op->dop.u.flow_del.pmd_id = flow->pmd_id;
1921 op->dop.u.flow_del.stats = &op->stats;
1922 op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
1923 }
1924
1925 static void
1926 delete_op_init(struct udpif *udpif, struct ukey_op *op, struct udpif_key *ukey)
1927 {
1928 op->ukey = ukey;
1929 op->dop.type = DPIF_OP_FLOW_DEL;
1930 op->dop.u.flow_del.key = ukey->key;
1931 op->dop.u.flow_del.key_len = ukey->key_len;
1932 op->dop.u.flow_del.ufid = ukey->ufid_present ? &ukey->ufid : NULL;
1933 op->dop.u.flow_del.pmd_id = ukey->pmd_id;
1934 op->dop.u.flow_del.stats = &op->stats;
1935 op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
1936 }
1937
1938 static void
1939 modify_op_init(struct ukey_op *op, struct udpif_key *ukey)
1940 {
1941 op->ukey = ukey;
1942 op->dop.type = DPIF_OP_FLOW_PUT;
1943 op->dop.u.flow_put.flags = DPIF_FP_MODIFY;
1944 op->dop.u.flow_put.key = ukey->key;
1945 op->dop.u.flow_put.key_len = ukey->key_len;
1946 op->dop.u.flow_put.mask = ukey->mask;
1947 op->dop.u.flow_put.mask_len = ukey->mask_len;
1948 op->dop.u.flow_put.ufid = ukey->ufid_present ? &ukey->ufid : NULL;
1949 op->dop.u.flow_put.pmd_id = ukey->pmd_id;
1950 op->dop.u.flow_put.stats = NULL;
1951 ukey_get_actions(ukey, &op->dop.u.flow_put.actions,
1952 &op->dop.u.flow_put.actions_len);
1953 }
1954
1955 /* Executes datapath operations 'ops' and attributes stats retrieved from the
1956 * datapath as part of those operations. */
1957 static void
1958 push_dp_ops(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
1959 {
1960 struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
1961 size_t i;
1962
1963 ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
1964 for (i = 0; i < n_ops; i++) {
1965 opsp[i] = &ops[i].dop;
1966 }
1967 dpif_operate(udpif->dpif, opsp, n_ops);
1968
1969 for (i = 0; i < n_ops; i++) {
1970 struct ukey_op *op = &ops[i];
1971 struct dpif_flow_stats *push, *stats, push_buf;
1972
1973 stats = op->dop.u.flow_del.stats;
1974 push = &push_buf;
1975
1976 if (op->dop.type != DPIF_OP_FLOW_DEL) {
1977 /* Only deleted flows need their stats pushed. */
1978 continue;
1979 }
1980
1981 if (op->dop.error) {
1982 /* flow_del error, 'stats' is unusable. */
1983 continue;
1984 }
1985
1986 if (op->ukey) {
1987 ovs_mutex_lock(&op->ukey->mutex);
1988 push->used = MAX(stats->used, op->ukey->stats.used);
1989 push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
1990 push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
1991 push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
1992 ovs_mutex_unlock(&op->ukey->mutex);
1993 } else {
1994 push = stats;
1995 }
1996
1997 if (push->n_packets || netflow_exists()) {
1998 const struct nlattr *key = op->dop.u.flow_del.key;
1999 size_t key_len = op->dop.u.flow_del.key_len;
2000 struct ofproto_dpif *ofproto;
2001 struct netflow *netflow;
2002 ofp_port_t ofp_in_port;
2003 struct flow flow;
2004 int error;
2005
2006 if (op->ukey) {
2007 ovs_mutex_lock(&op->ukey->mutex);
2008 if (op->ukey->xcache) {
2009 xlate_push_stats(op->ukey->xcache, push);
2010 ovs_mutex_unlock(&op->ukey->mutex);
2011 continue;
2012 }
2013 ovs_mutex_unlock(&op->ukey->mutex);
2014 key = op->ukey->key;
2015 key_len = op->ukey->key_len;
2016 }
2017
2018 if (odp_flow_key_to_flow(key, key_len, &flow)
2019 == ODP_FIT_ERROR) {
2020 continue;
2021 }
2022
2023 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL,
2024 &netflow, &ofp_in_port);
2025 if (!error) {
2026 struct xlate_in xin;
2027
2028 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
2029 push->tcp_flags, NULL, NULL, NULL);
2030 xin.resubmit_stats = push->n_packets ? push : NULL;
2031 xin.may_learn = push->n_packets > 0;
2032 xlate_actions_for_side_effects(&xin);
2033
2034 if (netflow) {
2035 netflow_flow_clear(netflow, &flow);
2036 }
2037 }
2038 }
2039 }
2040 }
2041
2042 /* Executes datapath operations 'ops', attributes stats retrieved from the
2043 * datapath, and deletes ukeys corresponding to deleted flows. */
2044 static void
2045 push_ukey_ops(struct udpif *udpif, struct umap *umap,
2046 struct ukey_op *ops, size_t n_ops)
2047 {
2048 int i;
2049
2050 push_dp_ops(udpif, ops, n_ops);
2051 ovs_mutex_lock(&umap->mutex);
2052 for (i = 0; i < n_ops; i++) {
2053 if (ops[i].dop.type == DPIF_OP_FLOW_DEL) {
2054 ukey_delete(umap, ops[i].ukey);
2055 }
2056 }
2057 ovs_mutex_unlock(&umap->mutex);
2058 }
2059
2060 static void
2061 log_unexpected_flow(const struct dpif_flow *flow, int error)
2062 {
2063 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 60);
2064 struct ds ds = DS_EMPTY_INITIALIZER;
2065
2066 ds_put_format(&ds, "Failed to acquire udpif_key corresponding to "
2067 "unexpected flow (%s): ", ovs_strerror(error));
2068 odp_format_ufid(&flow->ufid, &ds);
2069 VLOG_WARN_RL(&rl, "%s", ds_cstr(&ds));
2070 ds_destroy(&ds);
2071 }
2072
2073 static void
2074 reval_op_init(struct ukey_op *op, enum reval_result result,
2075 struct udpif *udpif, struct udpif_key *ukey,
2076 struct recirc_refs *recircs, struct ofpbuf *odp_actions)
2077 {
2078 if (result == UKEY_DELETE) {
2079 delete_op_init(udpif, op, ukey);
2080 } else if (result == UKEY_MODIFY) {
2081 /* Store the new recircs. */
2082 recirc_refs_swap(&ukey->recircs, recircs);
2083 /* Release old recircs. */
2084 recirc_refs_unref(recircs);
2085 /* ukey->key_recirc_id remains, as the key is the same as before. */
2086
2087 ukey_set_actions(ukey, odp_actions);
2088 modify_op_init(op, ukey);
2089 }
2090 }
2091
2092 static void
2093 revalidate(struct revalidator *revalidator)
2094 {
2095 uint64_t odp_actions_stub[1024 / 8];
2096 struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
2097
2098 struct udpif *udpif = revalidator->udpif;
2099 struct dpif_flow_dump_thread *dump_thread;
2100 uint64_t dump_seq, reval_seq;
2101 unsigned int flow_limit;
2102
2103 dump_seq = seq_read(udpif->dump_seq);
2104 reval_seq = seq_read(udpif->reval_seq);
2105 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
2106 dump_thread = dpif_flow_dump_thread_create(udpif->dump);
2107 for (;;) {
2108 struct ukey_op ops[REVALIDATE_MAX_BATCH];
2109 int n_ops = 0;
2110
2111 struct dpif_flow flows[REVALIDATE_MAX_BATCH];
2112 const struct dpif_flow *f;
2113 int n_dumped;
2114
2115 long long int max_idle;
2116 long long int now;
2117 size_t n_dp_flows;
2118 bool kill_them_all;
2119
2120 n_dumped = dpif_flow_dump_next(dump_thread, flows, ARRAY_SIZE(flows));
2121 if (!n_dumped) {
2122 break;
2123 }
2124
2125 now = time_msec();
2126
2127 /* In normal operation we want to keep flows around until they have
2128 * been idle for 'ofproto_max_idle' milliseconds. However:
2129 *
2130 * - If the number of datapath flows climbs above 'flow_limit',
2131 * drop that down to 100 ms to try to bring the flows down to
2132 * the limit.
2133 *
2134 * - If the number of datapath flows climbs above twice
2135 * 'flow_limit', delete all the datapath flows as an emergency
2136 * measure. (We reassess this condition for the next batch of
2137 * datapath flows, so we will recover before all the flows are
2138 * gone.) */
2139 n_dp_flows = udpif_get_n_flows(udpif);
2140 kill_them_all = n_dp_flows > flow_limit * 2;
2141 max_idle = n_dp_flows > flow_limit ? 100 : ofproto_max_idle;
2142
2143 for (f = flows; f < &flows[n_dumped]; f++) {
2144 long long int used = f->stats.used;
2145 struct recirc_refs recircs = RECIRC_REFS_EMPTY_INITIALIZER;
2146 enum reval_result result;
2147 struct udpif_key *ukey;
2148 bool already_dumped;
2149 int error;
2150
2151 if (ukey_acquire(udpif, f, &ukey, &error)) {
2152 if (error == EBUSY) {
2153 /* Another thread is processing this flow, so don't bother
2154 * processing it.*/
2155 COVERAGE_INC(upcall_ukey_contention);
2156 } else {
2157 log_unexpected_flow(f, error);
2158 if (error != ENOENT) {
2159 delete_op_init__(udpif, &ops[n_ops++], f);
2160 }
2161 }
2162 continue;
2163 }
2164
2165 already_dumped = ukey->dump_seq == dump_seq;
2166 if (already_dumped) {
2167 /* The flow has already been handled during this flow dump
2168 * operation. Skip it. */
2169 if (ukey->xcache) {
2170 COVERAGE_INC(dumped_duplicate_flow);
2171 } else {
2172 COVERAGE_INC(dumped_new_flow);
2173 }
2174 ovs_mutex_unlock(&ukey->mutex);
2175 continue;
2176 }
2177
2178 if (!used) {
2179 used = ukey->created;
2180 }
2181 if (kill_them_all || (used && used < now - max_idle)) {
2182 result = UKEY_DELETE;
2183 } else {
2184 result = revalidate_ukey(udpif, ukey, &f->stats, &odp_actions,
2185 reval_seq, &recircs);
2186 }
2187 ukey->dump_seq = dump_seq;
2188 ukey->flow_exists = result != UKEY_DELETE;
2189
2190 if (result != UKEY_KEEP) {
2191 /* Takes ownership of 'recircs'. */
2192 reval_op_init(&ops[n_ops++], result, udpif, ukey, &recircs,
2193 &odp_actions);
2194 }
2195 ovs_mutex_unlock(&ukey->mutex);
2196 }
2197
2198 if (n_ops) {
2199 /* Push datapath ops but defer ukey deletion to 'sweep' phase. */
2200 push_dp_ops(udpif, ops, n_ops);
2201 }
2202 ovsrcu_quiesce();
2203 }
2204 dpif_flow_dump_thread_destroy(dump_thread);
2205 ofpbuf_uninit(&odp_actions);
2206 }
2207
2208 /* Pauses the 'revalidator', can only proceed after main thread
2209 * calls udpif_resume_revalidators(). */
2210 static void
2211 revalidator_pause(struct revalidator *revalidator)
2212 {
2213 /* The first block is for sync'ing the pause with main thread. */
2214 ovs_barrier_block(&revalidator->udpif->pause_barrier);
2215 /* The second block is for pausing until main thread resumes. */
2216 ovs_barrier_block(&revalidator->udpif->pause_barrier);
2217 }
2218
2219 static void
2220 revalidator_sweep__(struct revalidator *revalidator, bool purge)
2221 {
2222 struct udpif *udpif;
2223 uint64_t dump_seq, reval_seq;
2224 int slice;
2225
2226 udpif = revalidator->udpif;
2227 dump_seq = seq_read(udpif->dump_seq);
2228 reval_seq = seq_read(udpif->reval_seq);
2229 slice = revalidator - udpif->revalidators;
2230 ovs_assert(slice < udpif->n_revalidators);
2231
2232 for (int i = slice; i < N_UMAPS; i += udpif->n_revalidators) {
2233 uint64_t odp_actions_stub[1024 / 8];
2234 struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
2235
2236 struct ukey_op ops[REVALIDATE_MAX_BATCH];
2237 struct udpif_key *ukey;
2238 struct umap *umap = &udpif->ukeys[i];
2239 size_t n_ops = 0;
2240
2241 CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
2242 bool flow_exists;
2243
2244 /* Handler threads could be holding a ukey lock while it installs a
2245 * new flow, so don't hang around waiting for access to it. */
2246 if (ovs_mutex_trylock(&ukey->mutex)) {
2247 continue;
2248 }
2249 flow_exists = ukey->flow_exists;
2250 if (flow_exists) {
2251 struct recirc_refs recircs = RECIRC_REFS_EMPTY_INITIALIZER;
2252 bool seq_mismatch = (ukey->dump_seq != dump_seq
2253 && ukey->reval_seq != reval_seq);
2254 enum reval_result result;
2255
2256 if (purge) {
2257 result = UKEY_DELETE;
2258 } else if (!seq_mismatch) {
2259 result = UKEY_KEEP;
2260 } else {
2261 struct dpif_flow_stats stats;
2262 COVERAGE_INC(revalidate_missed_dp_flow);
2263 memset(&stats, 0, sizeof stats);
2264 result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
2265 reval_seq, &recircs);
2266 }
2267 if (result != UKEY_KEEP) {
2268 /* Clears 'recircs' if filled by revalidate_ukey(). */
2269 reval_op_init(&ops[n_ops++], result, udpif, ukey, &recircs,
2270 &odp_actions);
2271 }
2272 }
2273 ovs_mutex_unlock(&ukey->mutex);
2274
2275 if (!flow_exists) {
2276 /* The common flow deletion case involves deletion of the flow
2277 * during the dump phase and ukey deletion here. */
2278 ovs_mutex_lock(&umap->mutex);
2279 ukey_delete(umap, ukey);
2280 ovs_mutex_unlock(&umap->mutex);
2281 }
2282
2283 if (n_ops == REVALIDATE_MAX_BATCH) {
2284 /* Update/delete missed flows and clean up corresponding ukeys
2285 * if necessary. */
2286 push_ukey_ops(udpif, umap, ops, n_ops);
2287 n_ops = 0;
2288 }
2289 }
2290
2291 if (n_ops) {
2292 push_ukey_ops(udpif, umap, ops, n_ops);
2293 }
2294
2295 ofpbuf_uninit(&odp_actions);
2296 ovsrcu_quiesce();
2297 }
2298 }
2299
2300 static void
2301 revalidator_sweep(struct revalidator *revalidator)
2302 {
2303 revalidator_sweep__(revalidator, false);
2304 }
2305
2306 static void
2307 revalidator_purge(struct revalidator *revalidator)
2308 {
2309 revalidator_sweep__(revalidator, true);
2310 }
2311
2312 /* In reaction to dpif purge, purges all 'ukey's with same 'pmd_id'. */
2313 static void
2314 dp_purge_cb(void *aux, unsigned pmd_id)
2315 {
2316 struct udpif *udpif = aux;
2317 size_t i;
2318
2319 udpif_pause_revalidators(udpif);
2320 for (i = 0; i < N_UMAPS; i++) {
2321 struct ukey_op ops[REVALIDATE_MAX_BATCH];
2322 struct udpif_key *ukey;
2323 struct umap *umap = &udpif->ukeys[i];
2324 size_t n_ops = 0;
2325
2326 CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
2327 if (ukey->pmd_id == pmd_id) {
2328 delete_op_init(udpif, &ops[n_ops++], ukey);
2329 if (n_ops == REVALIDATE_MAX_BATCH) {
2330 push_ukey_ops(udpif, umap, ops, n_ops);
2331 n_ops = 0;
2332 }
2333 }
2334 }
2335
2336 if (n_ops) {
2337 push_ukey_ops(udpif, umap, ops, n_ops);
2338 }
2339
2340 ovsrcu_quiesce();
2341 }
2342 udpif_resume_revalidators(udpif);
2343 }
2344 \f
2345 static void
2346 upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
2347 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2348 {
2349 struct ds ds = DS_EMPTY_INITIALIZER;
2350 struct udpif *udpif;
2351
2352 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2353 unsigned int flow_limit;
2354 bool ufid_enabled;
2355 size_t i;
2356
2357 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
2358 ufid_enabled = udpif_use_ufid(udpif);
2359
2360 ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
2361 ds_put_format(&ds, "\tflows : (current %lu)"
2362 " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
2363 udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
2364 ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
2365 ds_put_format(&ds, "\tufid enabled : ");
2366 if (ufid_enabled) {
2367 ds_put_format(&ds, "true\n");
2368 } else {
2369 ds_put_format(&ds, "false\n");
2370 }
2371 ds_put_char(&ds, '\n');
2372
2373 for (i = 0; i < n_revalidators; i++) {
2374 struct revalidator *revalidator = &udpif->revalidators[i];
2375 int j, elements = 0;
2376
2377 for (j = i; j < N_UMAPS; j += n_revalidators) {
2378 elements += cmap_count(&udpif->ukeys[j].cmap);
2379 }
2380 ds_put_format(&ds, "\t%u: (keys %d)\n", revalidator->id, elements);
2381 }
2382 }
2383
2384 unixctl_command_reply(conn, ds_cstr(&ds));
2385 ds_destroy(&ds);
2386 }
2387
2388 /* Disable using the megaflows.
2389 *
2390 * This command is only needed for advanced debugging, so it's not
2391 * documented in the man page. */
2392 static void
2393 upcall_unixctl_disable_megaflows(struct unixctl_conn *conn,
2394 int argc OVS_UNUSED,
2395 const char *argv[] OVS_UNUSED,
2396 void *aux OVS_UNUSED)
2397 {
2398 atomic_store_relaxed(&enable_megaflows, false);
2399 udpif_flush_all_datapaths();
2400 unixctl_command_reply(conn, "megaflows disabled");
2401 }
2402
2403 /* Re-enable using megaflows.
2404 *
2405 * This command is only needed for advanced debugging, so it's not
2406 * documented in the man page. */
2407 static void
2408 upcall_unixctl_enable_megaflows(struct unixctl_conn *conn,
2409 int argc OVS_UNUSED,
2410 const char *argv[] OVS_UNUSED,
2411 void *aux OVS_UNUSED)
2412 {
2413 atomic_store_relaxed(&enable_megaflows, true);
2414 udpif_flush_all_datapaths();
2415 unixctl_command_reply(conn, "megaflows enabled");
2416 }
2417
2418 /* Disable skipping flow attributes during flow dump.
2419 *
2420 * This command is only needed for advanced debugging, so it's not
2421 * documented in the man page. */
2422 static void
2423 upcall_unixctl_disable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2424 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2425 {
2426 atomic_store_relaxed(&enable_ufid, false);
2427 unixctl_command_reply(conn, "Datapath dumping tersely using UFID disabled");
2428 }
2429
2430 /* Re-enable skipping flow attributes during flow dump.
2431 *
2432 * This command is only needed for advanced debugging, so it's not documented
2433 * in the man page. */
2434 static void
2435 upcall_unixctl_enable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2436 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2437 {
2438 atomic_store_relaxed(&enable_ufid, true);
2439 unixctl_command_reply(conn, "Datapath dumping tersely using UFID enabled "
2440 "for supported datapaths");
2441 }
2442
2443 /* Set the flow limit.
2444 *
2445 * This command is only needed for advanced debugging, so it's not
2446 * documented in the man page. */
2447 static void
2448 upcall_unixctl_set_flow_limit(struct unixctl_conn *conn,
2449 int argc OVS_UNUSED,
2450 const char *argv[] OVS_UNUSED,
2451 void *aux OVS_UNUSED)
2452 {
2453 struct ds ds = DS_EMPTY_INITIALIZER;
2454 struct udpif *udpif;
2455 unsigned int flow_limit = atoi(argv[1]);
2456
2457 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2458 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
2459 }
2460 ds_put_format(&ds, "set flow_limit to %u\n", flow_limit);
2461 unixctl_command_reply(conn, ds_cstr(&ds));
2462 ds_destroy(&ds);
2463 }
2464
2465 static void
2466 upcall_unixctl_dump_wait(struct unixctl_conn *conn,
2467 int argc OVS_UNUSED,
2468 const char *argv[] OVS_UNUSED,
2469 void *aux OVS_UNUSED)
2470 {
2471 if (ovs_list_is_singleton(&all_udpifs)) {
2472 struct udpif *udpif = NULL;
2473 size_t len;
2474
2475 udpif = OBJECT_CONTAINING(ovs_list_front(&all_udpifs), udpif, list_node);
2476 len = (udpif->n_conns + 1) * sizeof *udpif->conns;
2477 udpif->conn_seq = seq_read(udpif->dump_seq);
2478 udpif->conns = xrealloc(udpif->conns, len);
2479 udpif->conns[udpif->n_conns++] = conn;
2480 } else {
2481 unixctl_command_reply_error(conn, "can't wait on multiple udpifs.");
2482 }
2483 }
2484
2485 static void
2486 upcall_unixctl_purge(struct unixctl_conn *conn, int argc OVS_UNUSED,
2487 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2488 {
2489 struct udpif *udpif;
2490
2491 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2492 int n;
2493
2494 for (n = 0; n < udpif->n_revalidators; n++) {
2495 revalidator_purge(&udpif->revalidators[n]);
2496 }
2497 }
2498 unixctl_command_reply(conn, "");
2499 }