1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
25 #include "lib/dpif-provider.h"
27 #include "openvswitch/dynamic-string.h"
28 #include "fail-open.h"
29 #include "guarded-list.h"
31 #include "openvswitch/list.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "ofproto-dpif-ipfix.h"
35 #include "ofproto-dpif-sflow.h"
36 #include "ofproto-dpif-xlate.h"
37 #include "ofproto-dpif-xlate-cache.h"
38 #include "ofproto-dpif-trace.h"
41 #include "openvswitch/poll-loop.h"
45 #include "openvswitch/vlog.h"
46 #include "lib/netdev-provider.h"
48 #define UPCALL_MAX_BATCH 64
49 #define REVALIDATE_MAX_BATCH 50
51 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall
);
53 COVERAGE_DEFINE(dumped_duplicate_flow
);
54 COVERAGE_DEFINE(dumped_new_flow
);
55 COVERAGE_DEFINE(handler_duplicate_upcall
);
56 COVERAGE_DEFINE(upcall_ukey_contention
);
57 COVERAGE_DEFINE(upcall_ukey_replace
);
58 COVERAGE_DEFINE(revalidate_missed_dp_flow
);
59 COVERAGE_DEFINE(upcall_flow_limit_hit
);
60 COVERAGE_DEFINE(upcall_flow_limit_kill
);
62 /* A thread that reads upcalls from dpif, forwards each upcall's packet,
63 * and possibly sets up a kernel flow as a cache. */
65 struct udpif
*udpif
; /* Parent udpif. */
66 pthread_t thread
; /* Thread ID. */
67 uint32_t handler_id
; /* Handler id. */
70 /* In the absence of a multiple-writer multiple-reader datastructure for
71 * storing udpif_keys ("ukeys"), we use a large number of cmaps, each with its
72 * own lock for writing. */
73 #define N_UMAPS 512 /* per udpif. */
75 struct ovs_mutex mutex
; /* Take for writing to the following. */
76 struct cmap cmap
; /* Datapath flow keys. */
79 /* A thread that processes datapath flows, updates OpenFlow statistics, and
80 * updates or removes them if necessary.
82 * Revalidator threads operate in two phases: "dump" and "sweep". In between
83 * each phase, all revalidators sync up so that all revalidator threads are
84 * either in one phase or the other, but not a combination.
86 * During the dump phase, revalidators fetch flows from the datapath and
87 * attribute the statistics to OpenFlow rules. Each datapath flow has a
88 * corresponding ukey which caches the most recently seen statistics. If
89 * a flow needs to be deleted (for example, because it is unused over a
90 * period of time), revalidator threads may delete the flow during the
91 * dump phase. The datapath is not guaranteed to reliably dump all flows
92 * from the datapath, and there is no mapping between datapath flows to
93 * revalidators, so a particular flow may be handled by zero or more
94 * revalidators during a single dump phase. To avoid duplicate attribution
95 * of statistics, ukeys are never deleted during this phase.
97 * During the sweep phase, each revalidator takes ownership of a different
98 * slice of umaps and sweeps through all ukeys in those umaps to figure out
99 * whether they need to be deleted. During this phase, revalidators may
100 * fetch individual flows which were not dumped during the dump phase to
101 * validate them and attribute statistics.
104 struct udpif
*udpif
; /* Parent udpif. */
105 pthread_t thread
; /* Thread ID. */
106 unsigned int id
; /* ovsthread_id_self(). */
109 /* An upcall handler for ofproto_dpif.
111 * udpif keeps records of two kind of logically separate units:
116 * - An array of 'struct handler's for upcall handling and flow
122 * - Revalidation threads which read the datapath flow table and maintains
126 struct ovs_list list_node
; /* In all_udpifs list. */
128 struct dpif
*dpif
; /* Datapath handle. */
129 struct dpif_backer
*backer
; /* Opaque dpif_backer pointer. */
131 struct handler
*handlers
; /* Upcall handlers. */
134 struct revalidator
*revalidators
; /* Flow revalidators. */
135 size_t n_revalidators
;
137 struct latch exit_latch
; /* Tells child threads to exit. */
140 struct seq
*reval_seq
; /* Incremented to force revalidation. */
141 bool reval_exit
; /* Set by leader on 'exit_latch. */
142 struct ovs_barrier reval_barrier
; /* Barrier used by revalidators. */
143 struct dpif_flow_dump
*dump
; /* DPIF flow dump state. */
144 long long int dump_duration
; /* Duration of the last flow dump. */
145 struct seq
*dump_seq
; /* Increments each dump iteration. */
146 atomic_bool enable_ufid
; /* If true, skip dumping flow attrs. */
148 /* These variables provide a mechanism for the main thread to pause
149 * all revalidation without having to completely shut the threads down.
150 * 'pause_latch' is shared between the main thread and the lead
151 * revalidator thread, so when it is desirable to halt revalidation, the
152 * main thread will set the latch. 'pause' and 'pause_barrier' are shared
153 * by revalidator threads. The lead revalidator will set 'pause' when it
154 * observes the latch has been set, and this will cause all revalidator
155 * threads to wait on 'pause_barrier' at the beginning of the next
156 * revalidation round. */
157 bool pause
; /* Set by leader on 'pause_latch. */
158 struct latch pause_latch
; /* Set to force revalidators pause. */
159 struct ovs_barrier pause_barrier
; /* Barrier used to pause all */
160 /* revalidators by main thread. */
162 /* There are 'N_UMAPS' maps containing 'struct udpif_key' elements.
164 * During the flow dump phase, revalidators insert into these with a random
165 * distribution. During the garbage collection phase, each revalidator
166 * takes care of garbage collecting a slice of these maps. */
169 /* Datapath flow statistics. */
170 unsigned int max_n_flows
;
171 unsigned int avg_n_flows
;
173 /* Following fields are accessed and modified by different threads. */
174 atomic_uint flow_limit
; /* Datapath flow hard limit. */
176 /* n_flows_mutex prevents multiple threads updating these concurrently. */
177 atomic_uint n_flows
; /* Number of flows in the datapath. */
178 atomic_llong n_flows_timestamp
; /* Last time n_flows was updated. */
179 struct ovs_mutex n_flows_mutex
;
181 /* Following fields are accessed and modified only from the main thread. */
182 struct unixctl_conn
**conns
; /* Connections waiting on dump_seq. */
183 uint64_t conn_seq
; /* Corresponds to 'dump_seq' when
184 conns[n_conns-1] was stored. */
185 size_t n_conns
; /* Number of connections waiting. */
187 long long int offload_rebalance_time
; /* Time of last offload rebalance */
191 BAD_UPCALL
, /* Some kind of bug somewhere. */
192 MISS_UPCALL
, /* A flow miss. */
193 SLOW_PATH_UPCALL
, /* Slow path upcall. */
194 SFLOW_UPCALL
, /* sFlow sample. */
195 FLOW_SAMPLE_UPCALL
, /* Per-flow sampling. */
196 IPFIX_UPCALL
, /* Per-bridge sampling. */
197 CONTROLLER_UPCALL
/* Destined for the controller. */
207 struct ofproto_dpif
*ofproto
; /* Parent ofproto. */
208 const struct recirc_id_node
*recirc
; /* Recirculation context. */
209 bool have_recirc_ref
; /* Reference held on recirc ctx? */
211 /* The flow and packet are only required to be constant when using
212 * dpif-netdev. If a modification is absolutely necessary, a const cast
213 * may be used with other datapaths. */
214 const struct flow
*flow
; /* Parsed representation of the packet. */
215 enum odp_key_fitness fitness
; /* Fitness of 'flow' relative to ODP key. */
216 const ovs_u128
*ufid
; /* Unique identifier for 'flow'. */
217 unsigned pmd_id
; /* Datapath poll mode driver id. */
218 const struct dp_packet
*packet
; /* Packet associated with this upcall. */
219 ofp_port_t ofp_in_port
; /* OpenFlow in port, or OFPP_NONE. */
220 uint16_t mru
; /* If !0, Maximum receive unit of
221 fragmented IP packet */
224 enum upcall_type type
; /* Type of the upcall. */
225 const struct nlattr
*actions
; /* Flow actions in DPIF_UC_ACTION Upcalls. */
227 bool xout_initialized
; /* True if 'xout' must be uninitialized. */
228 struct xlate_out xout
; /* Result of xlate_actions(). */
229 struct ofpbuf odp_actions
; /* Datapath actions from xlate_actions(). */
230 struct flow_wildcards wc
; /* Dependencies that megaflow must match. */
231 struct ofpbuf put_actions
; /* Actions 'put' in the fastpath. */
233 struct dpif_ipfix
*ipfix
; /* IPFIX pointer or NULL. */
234 struct dpif_sflow
*sflow
; /* SFlow pointer or NULL. */
236 struct udpif_key
*ukey
; /* Revalidator flow cache. */
237 bool ukey_persists
; /* Set true to keep 'ukey' beyond the
238 lifetime of this upcall. */
240 uint64_t reval_seq
; /* udpif->reval_seq at translation time. */
242 /* Not used by the upcall callback interface. */
243 const struct nlattr
*key
; /* Datapath flow key. */
244 size_t key_len
; /* Datapath flow key length. */
245 const struct nlattr
*out_tun_key
; /* Datapath output tunnel key. */
247 struct user_action_cookie cookie
;
249 uint64_t odp_actions_stub
[1024 / 8]; /* Stub for odp_actions. */
252 /* Ukeys must transition through these states using transition_ukey(). */
255 UKEY_VISIBLE
, /* Ukey is in umap, datapath flow install is queued. */
256 UKEY_OPERATIONAL
, /* Ukey is in umap, datapath flow is installed. */
257 UKEY_EVICTING
, /* Ukey is in umap, datapath flow delete is queued. */
258 UKEY_EVICTED
, /* Ukey is in umap, datapath flow is deleted. */
259 UKEY_DELETED
, /* Ukey removed from umap, ukey free is deferred. */
261 #define N_UKEY_STATES (UKEY_DELETED + 1)
263 /* 'udpif_key's are responsible for tracking the little bit of state udpif
264 * needs to do flow expiration which can't be pulled directly from the
265 * datapath. They may be created by any handler or revalidator thread at any
266 * time, and read by any revalidator during the dump phase. They are however
267 * each owned by a single revalidator which takes care of destroying them
268 * during the garbage-collection phase.
270 * The mutex within the ukey protects some members of the ukey. The ukey
271 * itself is protected by RCU and is held within a umap in the parent udpif.
272 * Adding or removing a ukey from a umap is only safe when holding the
273 * corresponding umap lock. */
275 struct cmap_node cmap_node
; /* In parent revalidator 'ukeys' map. */
277 /* These elements are read only once created, and therefore aren't
278 * protected by a mutex. */
279 const struct nlattr
*key
; /* Datapath flow key. */
280 size_t key_len
; /* Length of 'key'. */
281 const struct nlattr
*mask
; /* Datapath flow mask. */
282 size_t mask_len
; /* Length of 'mask'. */
283 ovs_u128 ufid
; /* Unique flow identifier. */
284 bool ufid_present
; /* True if 'ufid' is in datapath. */
285 uint32_t hash
; /* Pre-computed hash for 'key'. */
286 unsigned pmd_id
; /* Datapath poll mode driver id. */
288 struct ovs_mutex mutex
; /* Guards the following. */
289 struct dpif_flow_stats stats OVS_GUARDED
; /* Last known stats.*/
290 long long int created OVS_GUARDED
; /* Estimate of creation time. */
291 uint64_t dump_seq OVS_GUARDED
; /* Tracks udpif->dump_seq. */
292 uint64_t reval_seq OVS_GUARDED
; /* Tracks udpif->reval_seq. */
293 enum ukey_state state OVS_GUARDED
; /* Tracks ukey lifetime. */
295 /* 'state' debug information. */
296 unsigned int state_thread OVS_GUARDED
; /* Thread that transitions. */
297 const char *state_where OVS_GUARDED
; /* transition_ukey() locator. */
299 /* Datapath flow actions as nlattrs. Protected by RCU. Read with
300 * ukey_get_actions(), and write with ukey_set_actions(). */
301 OVSRCU_TYPE(struct ofpbuf
*) actions
;
303 struct xlate_cache
*xcache OVS_GUARDED
; /* Cache for xlate entries that
304 * are affected by this ukey.
305 * Used for stats and learning.*/
307 struct odputil_keybuf buf
;
311 uint32_t key_recirc_id
; /* Non-zero if reference is held by the ukey. */
312 struct recirc_refs recircs
; /* Action recirc IDs with references held. */
314 #define OFFL_REBAL_INTVL_MSEC 3000 /* dynamic offload rebalance freq */
315 struct netdev
*in_netdev
; /* in_odp_port's netdev */
316 bool offloaded
; /* True if flow is offloaded */
317 uint64_t flow_pps_rate
; /* Packets-Per-Second rate */
318 long long int flow_time
; /* last pps update time */
319 uint64_t flow_packets
; /* #pkts seen in interval */
320 uint64_t flow_backlog_packets
; /* prev-mode #pkts (offl or kernel) */
323 /* Datapath operation with optional ukey attached. */
325 struct udpif_key
*ukey
;
326 struct dpif_flow_stats stats
; /* Stats for 'op'. */
327 struct dpif_op dop
; /* Flow operation. */
330 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
331 static struct ovs_list all_udpifs
= OVS_LIST_INITIALIZER(&all_udpifs
);
333 static size_t recv_upcalls(struct handler
*);
334 static int process_upcall(struct udpif
*, struct upcall
*,
335 struct ofpbuf
*odp_actions
, struct flow_wildcards
*);
336 static void handle_upcalls(struct udpif
*, struct upcall
*, size_t n_upcalls
);
337 static void udpif_stop_threads(struct udpif
*, bool delete_flows
);
338 static void udpif_start_threads(struct udpif
*, size_t n_handlers
,
339 size_t n_revalidators
);
340 static void udpif_pause_revalidators(struct udpif
*);
341 static void udpif_resume_revalidators(struct udpif
*);
342 static void *udpif_upcall_handler(void *);
343 static void *udpif_revalidator(void *);
344 static unsigned long udpif_get_n_flows(struct udpif
*);
345 static void revalidate(struct revalidator
*);
346 static void revalidator_pause(struct revalidator
*);
347 static void revalidator_sweep(struct revalidator
*);
348 static void revalidator_purge(struct revalidator
*);
349 static void upcall_unixctl_show(struct unixctl_conn
*conn
, int argc
,
350 const char *argv
[], void *aux
);
351 static void upcall_unixctl_disable_megaflows(struct unixctl_conn
*, int argc
,
352 const char *argv
[], void *aux
);
353 static void upcall_unixctl_enable_megaflows(struct unixctl_conn
*, int argc
,
354 const char *argv
[], void *aux
);
355 static void upcall_unixctl_disable_ufid(struct unixctl_conn
*, int argc
,
356 const char *argv
[], void *aux
);
357 static void upcall_unixctl_enable_ufid(struct unixctl_conn
*, int argc
,
358 const char *argv
[], void *aux
);
359 static void upcall_unixctl_set_flow_limit(struct unixctl_conn
*conn
, int argc
,
360 const char *argv
[], void *aux
);
361 static void upcall_unixctl_dump_wait(struct unixctl_conn
*conn
, int argc
,
362 const char *argv
[], void *aux
);
363 static void upcall_unixctl_purge(struct unixctl_conn
*conn
, int argc
,
364 const char *argv
[], void *aux
);
366 static struct udpif_key
*ukey_create_from_upcall(struct upcall
*,
367 struct flow_wildcards
*);
368 static int ukey_create_from_dpif_flow(const struct udpif
*,
369 const struct dpif_flow
*,
370 struct udpif_key
**);
371 static void ukey_get_actions(struct udpif_key
*, const struct nlattr
**actions
,
373 static bool ukey_install__(struct udpif
*, struct udpif_key
*ukey
)
374 OVS_TRY_LOCK(true, ukey
->mutex
);
375 static bool ukey_install(struct udpif
*udpif
, struct udpif_key
*ukey
);
376 static void transition_ukey_at(struct udpif_key
*ukey
, enum ukey_state dst
,
378 OVS_REQUIRES(ukey
->mutex
);
379 #define transition_ukey(UKEY, DST) \
380 transition_ukey_at(UKEY, DST, OVS_SOURCE_LOCATOR)
381 static struct udpif_key
*ukey_lookup(struct udpif
*udpif
,
382 const ovs_u128
*ufid
,
383 const unsigned pmd_id
);
384 static int ukey_acquire(struct udpif
*, const struct dpif_flow
*,
385 struct udpif_key
**result
, int *error
);
386 static void ukey_delete__(struct udpif_key
*);
387 static void ukey_delete(struct umap
*, struct udpif_key
*);
388 static enum upcall_type
classify_upcall(enum dpif_upcall_type type
,
389 const struct nlattr
*userdata
,
390 struct user_action_cookie
*cookie
);
392 static void put_op_init(struct ukey_op
*op
, struct udpif_key
*ukey
,
393 enum dpif_flow_put_flags flags
);
394 static void delete_op_init(struct udpif
*udpif
, struct ukey_op
*op
,
395 struct udpif_key
*ukey
);
397 static int upcall_receive(struct upcall
*, const struct dpif_backer
*,
398 const struct dp_packet
*packet
, enum dpif_upcall_type
,
399 const struct nlattr
*userdata
, const struct flow
*,
400 const unsigned int mru
,
401 const ovs_u128
*ufid
, const unsigned pmd_id
);
402 static void upcall_uninit(struct upcall
*);
404 static void udpif_flow_rebalance(struct udpif
*udpif
);
405 static int udpif_flow_program(struct udpif
*udpif
, struct udpif_key
*ukey
,
406 enum dpif_offload_type offload_type
);
407 static int udpif_flow_unprogram(struct udpif
*udpif
, struct udpif_key
*ukey
,
408 enum dpif_offload_type offload_type
);
410 static upcall_callback upcall_cb
;
411 static dp_purge_callback dp_purge_cb
;
413 static atomic_bool enable_megaflows
= ATOMIC_VAR_INIT(true);
414 static atomic_bool enable_ufid
= ATOMIC_VAR_INIT(true);
419 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
420 if (ovsthread_once_start(&once
)) {
421 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show
,
423 unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
424 upcall_unixctl_disable_megaflows
, NULL
);
425 unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
426 upcall_unixctl_enable_megaflows
, NULL
);
427 unixctl_command_register("upcall/disable-ufid", "", 0, 0,
428 upcall_unixctl_disable_ufid
, NULL
);
429 unixctl_command_register("upcall/enable-ufid", "", 0, 0,
430 upcall_unixctl_enable_ufid
, NULL
);
431 unixctl_command_register("upcall/set-flow-limit", "flow-limit-number",
432 1, 1, upcall_unixctl_set_flow_limit
, NULL
);
433 unixctl_command_register("revalidator/wait", "", 0, 0,
434 upcall_unixctl_dump_wait
, NULL
);
435 unixctl_command_register("revalidator/purge", "", 0, 0,
436 upcall_unixctl_purge
, NULL
);
437 ovsthread_once_done(&once
);
442 udpif_create(struct dpif_backer
*backer
, struct dpif
*dpif
)
444 struct udpif
*udpif
= xzalloc(sizeof *udpif
);
447 udpif
->backer
= backer
;
448 atomic_init(&udpif
->flow_limit
, MIN(ofproto_flow_limit
, 10000));
449 udpif
->reval_seq
= seq_create();
450 udpif
->dump_seq
= seq_create();
451 latch_init(&udpif
->exit_latch
);
452 latch_init(&udpif
->pause_latch
);
453 ovs_list_push_back(&all_udpifs
, &udpif
->list_node
);
454 atomic_init(&udpif
->enable_ufid
, false);
455 atomic_init(&udpif
->n_flows
, 0);
456 atomic_init(&udpif
->n_flows_timestamp
, LLONG_MIN
);
457 ovs_mutex_init(&udpif
->n_flows_mutex
);
458 udpif
->ukeys
= xmalloc(N_UMAPS
* sizeof *udpif
->ukeys
);
459 for (int i
= 0; i
< N_UMAPS
; i
++) {
460 cmap_init(&udpif
->ukeys
[i
].cmap
);
461 ovs_mutex_init(&udpif
->ukeys
[i
].mutex
);
464 dpif_register_upcall_cb(dpif
, upcall_cb
, udpif
);
465 dpif_register_dp_purge_cb(dpif
, dp_purge_cb
, udpif
);
471 udpif_run(struct udpif
*udpif
)
473 if (udpif
->conns
&& udpif
->conn_seq
!= seq_read(udpif
->dump_seq
)) {
476 for (i
= 0; i
< udpif
->n_conns
; i
++) {
477 unixctl_command_reply(udpif
->conns
[i
], NULL
);
486 udpif_destroy(struct udpif
*udpif
)
488 udpif_stop_threads(udpif
, false);
490 dpif_register_dp_purge_cb(udpif
->dpif
, NULL
, udpif
);
491 dpif_register_upcall_cb(udpif
->dpif
, NULL
, udpif
);
493 for (int i
= 0; i
< N_UMAPS
; i
++) {
494 cmap_destroy(&udpif
->ukeys
[i
].cmap
);
495 ovs_mutex_destroy(&udpif
->ukeys
[i
].mutex
);
500 ovs_list_remove(&udpif
->list_node
);
501 latch_destroy(&udpif
->exit_latch
);
502 latch_destroy(&udpif
->pause_latch
);
503 seq_destroy(udpif
->reval_seq
);
504 seq_destroy(udpif
->dump_seq
);
505 ovs_mutex_destroy(&udpif
->n_flows_mutex
);
509 /* Stops the handler and revalidator threads.
511 * If 'delete_flows' is true, we delete ukeys and delete all flows from the
512 * datapath. Otherwise, we end up double-counting stats for flows that remain
513 * in the datapath. If 'delete_flows' is false, we skip this step. This is
514 * appropriate if OVS is about to exit anyway and it is desirable to let
515 * existing network connections continue being forwarded afterward. */
517 udpif_stop_threads(struct udpif
*udpif
, bool delete_flows
)
519 if (udpif
&& (udpif
->n_handlers
!= 0 || udpif
->n_revalidators
!= 0)) {
522 /* Tell the threads to exit. */
523 latch_set(&udpif
->exit_latch
);
525 /* Wait for the threads to exit. Quiesce because this can take a long
527 ovsrcu_quiesce_start();
528 for (i
= 0; i
< udpif
->n_handlers
; i
++) {
529 xpthread_join(udpif
->handlers
[i
].thread
, NULL
);
531 for (i
= 0; i
< udpif
->n_revalidators
; i
++) {
532 xpthread_join(udpif
->revalidators
[i
].thread
, NULL
);
534 dpif_disable_upcall(udpif
->dpif
);
535 ovsrcu_quiesce_end();
538 for (i
= 0; i
< udpif
->n_revalidators
; i
++) {
539 revalidator_purge(&udpif
->revalidators
[i
]);
543 latch_poll(&udpif
->exit_latch
);
545 ovs_barrier_destroy(&udpif
->reval_barrier
);
546 ovs_barrier_destroy(&udpif
->pause_barrier
);
548 free(udpif
->revalidators
);
549 udpif
->revalidators
= NULL
;
550 udpif
->n_revalidators
= 0;
552 free(udpif
->handlers
);
553 udpif
->handlers
= NULL
;
554 udpif
->n_handlers
= 0;
558 /* Starts the handler and revalidator threads. */
560 udpif_start_threads(struct udpif
*udpif
, size_t n_handlers_
,
561 size_t n_revalidators_
)
563 if (udpif
&& n_handlers_
&& n_revalidators_
) {
564 /* Creating a thread can take a significant amount of time on some
565 * systems, even hundred of milliseconds, so quiesce around it. */
566 ovsrcu_quiesce_start();
568 udpif
->n_handlers
= n_handlers_
;
569 udpif
->n_revalidators
= n_revalidators_
;
571 udpif
->handlers
= xzalloc(udpif
->n_handlers
* sizeof *udpif
->handlers
);
572 for (size_t i
= 0; i
< udpif
->n_handlers
; i
++) {
573 struct handler
*handler
= &udpif
->handlers
[i
];
575 handler
->udpif
= udpif
;
576 handler
->handler_id
= i
;
577 handler
->thread
= ovs_thread_create(
578 "handler", udpif_upcall_handler
, handler
);
581 atomic_init(&udpif
->enable_ufid
, udpif
->backer
->rt_support
.ufid
);
582 dpif_enable_upcall(udpif
->dpif
);
584 ovs_barrier_init(&udpif
->reval_barrier
, udpif
->n_revalidators
);
585 ovs_barrier_init(&udpif
->pause_barrier
, udpif
->n_revalidators
+ 1);
586 udpif
->reval_exit
= false;
587 udpif
->pause
= false;
588 udpif
->offload_rebalance_time
= time_msec();
589 udpif
->revalidators
= xzalloc(udpif
->n_revalidators
590 * sizeof *udpif
->revalidators
);
591 for (size_t i
= 0; i
< udpif
->n_revalidators
; i
++) {
592 struct revalidator
*revalidator
= &udpif
->revalidators
[i
];
594 revalidator
->udpif
= udpif
;
595 revalidator
->thread
= ovs_thread_create(
596 "revalidator", udpif_revalidator
, revalidator
);
598 ovsrcu_quiesce_end();
602 /* Pauses all revalidators. Should only be called by the main thread.
603 * When function returns, all revalidators are paused and will proceed
604 * only after udpif_resume_revalidators() is called. */
606 udpif_pause_revalidators(struct udpif
*udpif
)
608 if (udpif
->backer
->recv_set_enable
) {
609 latch_set(&udpif
->pause_latch
);
610 ovs_barrier_block(&udpif
->pause_barrier
);
614 /* Resumes the pausing of revalidators. Should only be called by the
617 udpif_resume_revalidators(struct udpif
*udpif
)
619 if (udpif
->backer
->recv_set_enable
) {
620 latch_poll(&udpif
->pause_latch
);
621 ovs_barrier_block(&udpif
->pause_barrier
);
625 /* Tells 'udpif' how many threads it should use to handle upcalls.
626 * 'n_handlers_' and 'n_revalidators_' can never be zero. 'udpif''s
627 * datapath handle must have packet reception enabled before starting
630 udpif_set_threads(struct udpif
*udpif
, size_t n_handlers_
,
631 size_t n_revalidators_
)
634 ovs_assert(n_handlers_
&& n_revalidators_
);
636 if (udpif
->n_handlers
!= n_handlers_
637 || udpif
->n_revalidators
!= n_revalidators_
) {
638 udpif_stop_threads(udpif
, true);
641 if (!udpif
->handlers
&& !udpif
->revalidators
) {
644 error
= dpif_handlers_set(udpif
->dpif
, n_handlers_
);
646 VLOG_ERR("failed to configure handlers in dpif %s: %s",
647 dpif_name(udpif
->dpif
), ovs_strerror(error
));
651 udpif_start_threads(udpif
, n_handlers_
, n_revalidators_
);
655 /* Notifies 'udpif' that something changed which may render previous
656 * xlate_actions() results invalid. */
658 udpif_revalidate(struct udpif
*udpif
)
660 seq_change(udpif
->reval_seq
);
663 /* Returns a seq which increments every time 'udpif' pulls stats from the
664 * datapath. Callers can use this to get a sense of when might be a good time
665 * to do periodic work which relies on relatively up to date statistics. */
667 udpif_dump_seq(struct udpif
*udpif
)
669 return udpif
->dump_seq
;
673 udpif_get_memory_usage(struct udpif
*udpif
, struct simap
*usage
)
677 simap_increase(usage
, "handlers", udpif
->n_handlers
);
679 simap_increase(usage
, "revalidators", udpif
->n_revalidators
);
680 for (i
= 0; i
< N_UMAPS
; i
++) {
681 simap_increase(usage
, "udpif keys", cmap_count(&udpif
->ukeys
[i
].cmap
));
685 /* Remove flows from a single datapath. */
687 udpif_flush(struct udpif
*udpif
)
689 size_t n_handlers_
= udpif
->n_handlers
;
690 size_t n_revalidators_
= udpif
->n_revalidators
;
692 udpif_stop_threads(udpif
, true);
693 dpif_flow_flush(udpif
->dpif
);
694 udpif_start_threads(udpif
, n_handlers_
, n_revalidators_
);
697 /* Removes all flows from all datapaths. */
699 udpif_flush_all_datapaths(void)
703 LIST_FOR_EACH (udpif
, list_node
, &all_udpifs
) {
709 udpif_use_ufid(struct udpif
*udpif
)
713 atomic_read_relaxed(&enable_ufid
, &enable
);
714 return enable
&& udpif
->backer
->rt_support
.ufid
;
719 udpif_get_n_flows(struct udpif
*udpif
)
721 long long int time
, now
;
722 unsigned long flow_count
;
725 atomic_read_relaxed(&udpif
->n_flows_timestamp
, &time
);
726 if (time
< now
- 100 && !ovs_mutex_trylock(&udpif
->n_flows_mutex
)) {
727 struct dpif_dp_stats stats
;
729 atomic_store_relaxed(&udpif
->n_flows_timestamp
, now
);
730 dpif_get_dp_stats(udpif
->dpif
, &stats
);
731 flow_count
= stats
.n_flows
;
732 atomic_store_relaxed(&udpif
->n_flows
, flow_count
);
733 ovs_mutex_unlock(&udpif
->n_flows_mutex
);
735 atomic_read_relaxed(&udpif
->n_flows
, &flow_count
);
740 /* The upcall handler thread tries to read a batch of UPCALL_MAX_BATCH
741 * upcalls from dpif, processes the batch and installs corresponding flows
744 udpif_upcall_handler(void *arg
)
746 struct handler
*handler
= arg
;
747 struct udpif
*udpif
= handler
->udpif
;
749 while (!latch_is_set(&handler
->udpif
->exit_latch
)) {
750 if (recv_upcalls(handler
)) {
751 poll_immediate_wake();
753 dpif_recv_wait(udpif
->dpif
, handler
->handler_id
);
754 latch_wait(&udpif
->exit_latch
);
763 recv_upcalls(struct handler
*handler
)
765 struct udpif
*udpif
= handler
->udpif
;
766 uint64_t recv_stubs
[UPCALL_MAX_BATCH
][512 / 8];
767 struct ofpbuf recv_bufs
[UPCALL_MAX_BATCH
];
768 struct dpif_upcall dupcalls
[UPCALL_MAX_BATCH
];
769 struct upcall upcalls
[UPCALL_MAX_BATCH
];
770 struct flow flows
[UPCALL_MAX_BATCH
];
774 while (n_upcalls
< UPCALL_MAX_BATCH
) {
775 struct ofpbuf
*recv_buf
= &recv_bufs
[n_upcalls
];
776 struct dpif_upcall
*dupcall
= &dupcalls
[n_upcalls
];
777 struct upcall
*upcall
= &upcalls
[n_upcalls
];
778 struct flow
*flow
= &flows
[n_upcalls
];
779 unsigned int mru
= 0;
783 ofpbuf_use_stub(recv_buf
, recv_stubs
[n_upcalls
],
784 sizeof recv_stubs
[n_upcalls
]);
785 if (dpif_recv(udpif
->dpif
, handler
->handler_id
, dupcall
, recv_buf
)) {
786 ofpbuf_uninit(recv_buf
);
790 upcall
->fitness
= odp_flow_key_to_flow(dupcall
->key
, dupcall
->key_len
,
792 if (upcall
->fitness
== ODP_FIT_ERROR
) {
797 mru
= nl_attr_get_u16(dupcall
->mru
);
801 hash
= nl_attr_get_u64(dupcall
->hash
);
804 error
= upcall_receive(upcall
, udpif
->backer
, &dupcall
->packet
,
805 dupcall
->type
, dupcall
->userdata
, flow
, mru
,
806 &dupcall
->ufid
, PMD_ID_NULL
);
808 if (error
== ENODEV
) {
809 /* Received packet on datapath port for which we couldn't
810 * associate an ofproto. This can happen if a port is removed
811 * while traffic is being received. Print a rate-limited
812 * message in case it happens frequently. */
813 dpif_flow_put(udpif
->dpif
, DPIF_FP_CREATE
, dupcall
->key
,
814 dupcall
->key_len
, NULL
, 0, NULL
, 0,
815 &dupcall
->ufid
, PMD_ID_NULL
, NULL
);
816 VLOG_INFO_RL(&rl
, "received packet on unassociated datapath "
817 "port %"PRIu32
, flow
->in_port
.odp_port
);
822 upcall
->key
= dupcall
->key
;
823 upcall
->key_len
= dupcall
->key_len
;
824 upcall
->ufid
= &dupcall
->ufid
;
827 upcall
->out_tun_key
= dupcall
->out_tun_key
;
828 upcall
->actions
= dupcall
->actions
;
830 pkt_metadata_from_flow(&dupcall
->packet
.md
, flow
);
831 flow_extract(&dupcall
->packet
, flow
);
833 error
= process_upcall(udpif
, upcall
,
834 &upcall
->odp_actions
, &upcall
->wc
);
843 upcall_uninit(upcall
);
845 dp_packet_uninit(&dupcall
->packet
);
846 ofpbuf_uninit(recv_buf
);
850 handle_upcalls(handler
->udpif
, upcalls
, n_upcalls
);
851 for (i
= 0; i
< n_upcalls
; i
++) {
852 dp_packet_uninit(&dupcalls
[i
].packet
);
853 ofpbuf_uninit(&recv_bufs
[i
]);
854 upcall_uninit(&upcalls
[i
]);
862 udpif_run_flow_rebalance(struct udpif
*udpif
)
864 long long int now
= 0;
866 /* Don't rebalance if OFFL_REBAL_INTVL_MSEC have not elapsed */
868 if (now
< udpif
->offload_rebalance_time
+ OFFL_REBAL_INTVL_MSEC
) {
872 if (!netdev_any_oor()) {
876 VLOG_DBG("Offload rebalance: Found OOR netdevs");
877 udpif
->offload_rebalance_time
= now
;
878 udpif_flow_rebalance(udpif
);
882 udpif_revalidator(void *arg
)
884 /* Used by all revalidators. */
885 struct revalidator
*revalidator
= arg
;
886 struct udpif
*udpif
= revalidator
->udpif
;
887 bool leader
= revalidator
== &udpif
->revalidators
[0];
889 /* Used only by the leader. */
890 long long int start_time
= 0;
891 uint64_t last_reval_seq
= 0;
894 revalidator
->id
= ovsthread_id_self();
899 recirc_run(); /* Recirculation cleanup. */
901 reval_seq
= seq_read(udpif
->reval_seq
);
902 last_reval_seq
= reval_seq
;
904 n_flows
= udpif_get_n_flows(udpif
);
905 udpif
->max_n_flows
= MAX(n_flows
, udpif
->max_n_flows
);
906 udpif
->avg_n_flows
= (udpif
->avg_n_flows
+ n_flows
) / 2;
908 /* Only the leader checks the pause latch to prevent a race where
909 * some threads think it's false and proceed to block on
910 * reval_barrier and others think it's true and block indefinitely
911 * on the pause_barrier */
912 udpif
->pause
= latch_is_set(&udpif
->pause_latch
);
914 /* Only the leader checks the exit latch to prevent a race where
915 * some threads think it's true and exit and others think it's
916 * false and block indefinitely on the reval_barrier */
917 udpif
->reval_exit
= latch_is_set(&udpif
->exit_latch
);
919 start_time
= time_msec();
920 if (!udpif
->reval_exit
) {
923 terse_dump
= udpif_use_ufid(udpif
);
924 udpif
->dump
= dpif_flow_dump_create(udpif
->dpif
, terse_dump
,
929 /* Wait for the leader to start the flow dump. */
930 ovs_barrier_block(&udpif
->reval_barrier
);
932 revalidator_pause(revalidator
);
935 if (udpif
->reval_exit
) {
938 revalidate(revalidator
);
940 /* Wait for all flows to have been dumped before we garbage collect. */
941 ovs_barrier_block(&udpif
->reval_barrier
);
942 revalidator_sweep(revalidator
);
944 /* Wait for all revalidators to finish garbage collection. */
945 ovs_barrier_block(&udpif
->reval_barrier
);
948 unsigned int flow_limit
;
949 long long int duration
;
951 atomic_read_relaxed(&udpif
->flow_limit
, &flow_limit
);
953 dpif_flow_dump_destroy(udpif
->dump
);
954 seq_change(udpif
->dump_seq
);
955 if (netdev_is_offload_rebalance_policy_enabled()) {
956 udpif_run_flow_rebalance(udpif
);
959 duration
= MAX(time_msec() - start_time
, 1);
960 udpif
->dump_duration
= duration
;
961 if (duration
> 2000) {
962 flow_limit
/= duration
/ 1000;
963 } else if (duration
> 1300) {
964 flow_limit
= flow_limit
* 3 / 4;
965 } else if (duration
< 1000 &&
966 flow_limit
< n_flows
* 1000 / duration
) {
969 flow_limit
= MIN(ofproto_flow_limit
, MAX(flow_limit
, 1000));
970 atomic_store_relaxed(&udpif
->flow_limit
, flow_limit
);
972 if (duration
> 2000) {
973 VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
977 poll_timer_wait_until(start_time
+ MIN(ofproto_max_idle
,
978 ofproto_max_revalidator
));
979 seq_wait(udpif
->reval_seq
, last_reval_seq
);
980 latch_wait(&udpif
->exit_latch
);
981 latch_wait(&udpif
->pause_latch
);
984 if (!latch_is_set(&udpif
->pause_latch
) &&
985 !latch_is_set(&udpif
->exit_latch
)) {
986 long long int now
= time_msec();
987 /* Block again if we are woken up within 5ms of the last start
991 if (now
< start_time
) {
992 poll_timer_wait_until(start_time
);
993 latch_wait(&udpif
->exit_latch
);
994 latch_wait(&udpif
->pause_latch
);
1004 static enum upcall_type
1005 classify_upcall(enum dpif_upcall_type type
, const struct nlattr
*userdata
,
1006 struct user_action_cookie
*cookie
)
1008 /* First look at the upcall type. */
1010 case DPIF_UC_ACTION
:
1016 case DPIF_N_UC_TYPES
:
1018 VLOG_WARN_RL(&rl
, "upcall has unexpected type %"PRIu32
, type
);
1022 /* "action" upcalls need a closer look. */
1024 VLOG_WARN_RL(&rl
, "action upcall missing cookie");
1028 size_t userdata_len
= nl_attr_get_size(userdata
);
1029 if (userdata_len
!= sizeof *cookie
) {
1030 VLOG_WARN_RL(&rl
, "action upcall cookie has unexpected size %"PRIuSIZE
,
1034 memcpy(cookie
, nl_attr_get(userdata
), sizeof *cookie
);
1035 if (cookie
->type
== USER_ACTION_COOKIE_SFLOW
) {
1036 return SFLOW_UPCALL
;
1037 } else if (cookie
->type
== USER_ACTION_COOKIE_SLOW_PATH
) {
1038 return SLOW_PATH_UPCALL
;
1039 } else if (cookie
->type
== USER_ACTION_COOKIE_FLOW_SAMPLE
) {
1040 return FLOW_SAMPLE_UPCALL
;
1041 } else if (cookie
->type
== USER_ACTION_COOKIE_IPFIX
) {
1042 return IPFIX_UPCALL
;
1043 } else if (cookie
->type
== USER_ACTION_COOKIE_CONTROLLER
) {
1044 return CONTROLLER_UPCALL
;
1046 VLOG_WARN_RL(&rl
, "invalid user cookie of type %"PRIu16
1047 " and size %"PRIuSIZE
, cookie
->type
, userdata_len
);
1052 /* Calculates slow path actions for 'xout'. 'buf' must statically be
1053 * initialized with at least 128 bytes of space. */
1055 compose_slow_path(struct udpif
*udpif
, struct xlate_out
*xout
,
1056 odp_port_t odp_in_port
, ofp_port_t ofp_in_port
,
1057 struct ofpbuf
*buf
, uint32_t meter_id
,
1058 struct uuid
*ofproto_uuid
)
1060 struct user_action_cookie cookie
;
1064 memset(&cookie
, 0, sizeof cookie
);
1065 cookie
.type
= USER_ACTION_COOKIE_SLOW_PATH
;
1066 cookie
.ofp_in_port
= ofp_in_port
;
1067 cookie
.ofproto_uuid
= *ofproto_uuid
;
1068 cookie
.slow_path
.reason
= xout
->slow
;
1070 port
= xout
->slow
& (SLOW_CFM
| SLOW_BFD
| SLOW_LACP
| SLOW_STP
)
1073 pid
= dpif_port_get_pid(udpif
->dpif
, port
);
1077 if (meter_id
!= UINT32_MAX
) {
1078 /* If slowpath meter is configured, generate clone(meter, userspace)
1080 offset
= nl_msg_start_nested(buf
, OVS_ACTION_ATTR_SAMPLE
);
1081 nl_msg_put_u32(buf
, OVS_SAMPLE_ATTR_PROBABILITY
, UINT32_MAX
);
1082 ac_offset
= nl_msg_start_nested(buf
, OVS_SAMPLE_ATTR_ACTIONS
);
1083 nl_msg_put_u32(buf
, OVS_ACTION_ATTR_METER
, meter_id
);
1086 odp_put_userspace_action(pid
, &cookie
, sizeof cookie
,
1087 ODPP_NONE
, false, buf
, NULL
);
1089 if (meter_id
!= UINT32_MAX
) {
1090 nl_msg_end_nested(buf
, ac_offset
);
1091 nl_msg_end_nested(buf
, offset
);
1095 /* If there is no error, the upcall must be destroyed with upcall_uninit()
1096 * before quiescing, as the referred objects are guaranteed to exist only
1097 * until the calling thread quiesces. Otherwise, do not call upcall_uninit()
1098 * since the 'upcall->put_actions' remains uninitialized. */
1100 upcall_receive(struct upcall
*upcall
, const struct dpif_backer
*backer
,
1101 const struct dp_packet
*packet
, enum dpif_upcall_type type
,
1102 const struct nlattr
*userdata
, const struct flow
*flow
,
1103 const unsigned int mru
,
1104 const ovs_u128
*ufid
, const unsigned pmd_id
)
1108 upcall
->type
= classify_upcall(type
, userdata
, &upcall
->cookie
);
1109 if (upcall
->type
== BAD_UPCALL
) {
1111 } else if (upcall
->type
== MISS_UPCALL
) {
1112 error
= xlate_lookup(backer
, flow
, &upcall
->ofproto
, &upcall
->ipfix
,
1113 &upcall
->sflow
, NULL
, &upcall
->ofp_in_port
);
1118 struct ofproto_dpif
*ofproto
1119 = ofproto_dpif_lookup_by_uuid(&upcall
->cookie
.ofproto_uuid
);
1121 VLOG_INFO_RL(&rl
, "upcall could not find ofproto");
1124 upcall
->ofproto
= ofproto
;
1125 upcall
->ipfix
= ofproto
->ipfix
;
1126 upcall
->sflow
= ofproto
->sflow
;
1127 upcall
->ofp_in_port
= upcall
->cookie
.ofp_in_port
;
1130 upcall
->recirc
= NULL
;
1131 upcall
->have_recirc_ref
= false;
1132 upcall
->flow
= flow
;
1133 upcall
->packet
= packet
;
1134 upcall
->ufid
= ufid
;
1135 upcall
->pmd_id
= pmd_id
;
1136 ofpbuf_use_stub(&upcall
->odp_actions
, upcall
->odp_actions_stub
,
1137 sizeof upcall
->odp_actions_stub
);
1138 ofpbuf_init(&upcall
->put_actions
, 0);
1140 upcall
->xout_initialized
= false;
1141 upcall
->ukey_persists
= false;
1143 upcall
->ukey
= NULL
;
1145 upcall
->key_len
= 0;
1148 upcall
->out_tun_key
= NULL
;
1149 upcall
->actions
= NULL
;
1155 upcall_xlate(struct udpif
*udpif
, struct upcall
*upcall
,
1156 struct ofpbuf
*odp_actions
, struct flow_wildcards
*wc
)
1158 struct dpif_flow_stats stats
;
1159 enum xlate_error xerr
;
1160 struct xlate_in xin
;
1163 stats
.n_packets
= 1;
1164 stats
.n_bytes
= dp_packet_size(upcall
->packet
);
1165 stats
.used
= time_msec();
1166 stats
.tcp_flags
= ntohs(upcall
->flow
->tcp_flags
);
1168 xlate_in_init(&xin
, upcall
->ofproto
,
1169 ofproto_dpif_get_tables_version(upcall
->ofproto
),
1170 upcall
->flow
, upcall
->ofp_in_port
, NULL
,
1171 stats
.tcp_flags
, upcall
->packet
, wc
, odp_actions
);
1173 if (upcall
->type
== MISS_UPCALL
) {
1174 xin
.resubmit_stats
= &stats
;
1176 if (xin
.frozen_state
) {
1177 /* We may install a datapath flow only if we get a reference to the
1178 * recirculation context (otherwise we could have recirculation
1179 * upcalls using recirculation ID for which no context can be
1180 * found). We may still execute the flow's actions even if we
1181 * don't install the flow. */
1182 upcall
->recirc
= recirc_id_node_from_state(xin
.frozen_state
);
1183 upcall
->have_recirc_ref
= recirc_id_node_try_ref_rcu(upcall
->recirc
);
1186 /* For non-miss upcalls, we are either executing actions (one of which
1187 * is an userspace action) for an upcall, in which case the stats have
1188 * already been taken care of, or there's a flow in the datapath which
1189 * this packet was accounted to. Presumably the revalidators will deal
1190 * with pushing its stats eventually. */
1193 upcall
->reval_seq
= seq_read(udpif
->reval_seq
);
1195 xerr
= xlate_actions(&xin
, &upcall
->xout
);
1197 /* Translate again and log the ofproto trace for
1198 * these two error types. */
1199 if (xerr
== XLATE_RECURSION_TOO_DEEP
||
1200 xerr
== XLATE_TOO_MANY_RESUBMITS
) {
1201 static struct vlog_rate_limit rll
= VLOG_RATE_LIMIT_INIT(1, 1);
1203 /* This is a huge log, so be conservative. */
1204 if (!VLOG_DROP_WARN(&rll
)) {
1206 ofproto_trace(upcall
->ofproto
, upcall
->flow
,
1207 upcall
->packet
, NULL
, 0, NULL
, &output
);
1208 VLOG_WARN("%s", ds_cstr(&output
));
1209 ds_destroy(&output
);
1214 /* Convert the input port wildcard from OFP to ODP format. There's no
1215 * real way to do this for arbitrary bitmasks since the numbering spaces
1216 * aren't the same. However, flow translation always exact matches the
1217 * whole thing, so we can do the same here. */
1218 WC_MASK_FIELD(wc
, in_port
.odp_port
);
1221 upcall
->xout_initialized
= true;
1223 if (upcall
->fitness
== ODP_FIT_TOO_LITTLE
) {
1224 upcall
->xout
.slow
|= SLOW_MATCH
;
1226 if (!upcall
->xout
.slow
) {
1227 ofpbuf_use_const(&upcall
->put_actions
,
1228 odp_actions
->data
, odp_actions
->size
);
1230 /* upcall->put_actions already initialized by upcall_receive(). */
1231 compose_slow_path(udpif
, &upcall
->xout
,
1232 upcall
->flow
->in_port
.odp_port
, upcall
->ofp_in_port
,
1233 &upcall
->put_actions
,
1234 upcall
->ofproto
->up
.slowpath_meter_id
,
1235 &upcall
->ofproto
->uuid
);
1238 /* This function is also called for slow-pathed flows. As we are only
1239 * going to create new datapath flows for actual datapath misses, there is
1240 * no point in creating a ukey otherwise. */
1241 if (upcall
->type
== MISS_UPCALL
) {
1242 upcall
->ukey
= ukey_create_from_upcall(upcall
, wc
);
1247 upcall_uninit(struct upcall
*upcall
)
1250 if (upcall
->xout_initialized
) {
1251 xlate_out_uninit(&upcall
->xout
);
1253 ofpbuf_uninit(&upcall
->odp_actions
);
1254 ofpbuf_uninit(&upcall
->put_actions
);
1256 if (!upcall
->ukey_persists
) {
1257 ukey_delete__(upcall
->ukey
);
1259 } else if (upcall
->have_recirc_ref
) {
1260 /* The reference was transferred to the ukey if one was created. */
1261 recirc_id_node_unref(upcall
->recirc
);
1266 /* If there are less flows than the limit, and this is a miss upcall which
1268 * - Has no recirc_id, OR
1269 * - Has a recirc_id and we can get a reference on the recirc ctx,
1271 * Then we should install the flow (true). Otherwise, return false. */
1273 should_install_flow(struct udpif
*udpif
, struct upcall
*upcall
)
1275 unsigned int flow_limit
;
1277 if (upcall
->type
!= MISS_UPCALL
) {
1279 } else if (upcall
->recirc
&& !upcall
->have_recirc_ref
) {
1280 VLOG_DBG_RL(&rl
, "upcall: no reference for recirc flow");
1284 atomic_read_relaxed(&udpif
->flow_limit
, &flow_limit
);
1285 if (udpif_get_n_flows(udpif
) >= flow_limit
) {
1286 COVERAGE_INC(upcall_flow_limit_hit
);
1288 "upcall: datapath reached the dynamic limit of %u flows.",
1297 upcall_cb(const struct dp_packet
*packet
, const struct flow
*flow
, ovs_u128
*ufid
,
1298 unsigned pmd_id
, enum dpif_upcall_type type
,
1299 const struct nlattr
*userdata
, struct ofpbuf
*actions
,
1300 struct flow_wildcards
*wc
, struct ofpbuf
*put_actions
, void *aux
)
1302 struct udpif
*udpif
= aux
;
1303 struct upcall upcall
;
1307 atomic_read_relaxed(&enable_megaflows
, &megaflow
);
1309 error
= upcall_receive(&upcall
, udpif
->backer
, packet
, type
, userdata
,
1310 flow
, 0, ufid
, pmd_id
);
1315 upcall
.fitness
= ODP_FIT_PERFECT
;
1316 error
= process_upcall(udpif
, &upcall
, actions
, wc
);
1321 if (upcall
.xout
.slow
&& put_actions
) {
1322 ofpbuf_put(put_actions
, upcall
.put_actions
.data
,
1323 upcall
.put_actions
.size
);
1326 if (OVS_UNLIKELY(!megaflow
&& wc
)) {
1327 flow_wildcards_init_for_packet(wc
, flow
);
1330 if (!should_install_flow(udpif
, &upcall
)) {
1335 if (upcall
.ukey
&& !ukey_install(udpif
, upcall
.ukey
)) {
1336 static struct vlog_rate_limit rll
= VLOG_RATE_LIMIT_INIT(1, 1);
1337 VLOG_WARN_RL(&rll
, "upcall_cb failure: ukey installation fails");
1342 upcall
.ukey_persists
= true;
1344 upcall_uninit(&upcall
);
1349 dpif_get_actions(struct udpif
*udpif
, struct upcall
*upcall
,
1350 const struct nlattr
**actions
)
1352 size_t actions_len
= 0;
1354 if (upcall
->actions
) {
1355 /* Actions were passed up from datapath. */
1356 *actions
= nl_attr_get(upcall
->actions
);
1357 actions_len
= nl_attr_get_size(upcall
->actions
);
1360 if (actions_len
== 0) {
1361 /* Lookup actions in userspace cache. */
1362 struct udpif_key
*ukey
= ukey_lookup(udpif
, upcall
->ufid
,
1365 ukey_get_actions(ukey
, actions
, &actions_len
);
1373 dpif_read_actions(struct udpif
*udpif
, struct upcall
*upcall
,
1374 const struct flow
*flow
, enum upcall_type type
,
1377 const struct nlattr
*actions
= NULL
;
1378 size_t actions_len
= dpif_get_actions(udpif
, upcall
, &actions
);
1380 if (!actions
|| !actions_len
) {
1386 dpif_sflow_read_actions(flow
, actions
, actions_len
, upcall_data
, true);
1388 case FLOW_SAMPLE_UPCALL
:
1390 dpif_ipfix_read_actions(flow
, actions
, actions_len
, upcall_data
);
1394 case SLOW_PATH_UPCALL
:
1395 case CONTROLLER_UPCALL
:
1404 process_upcall(struct udpif
*udpif
, struct upcall
*upcall
,
1405 struct ofpbuf
*odp_actions
, struct flow_wildcards
*wc
)
1407 const struct dp_packet
*packet
= upcall
->packet
;
1408 const struct flow
*flow
= upcall
->flow
;
1409 size_t actions_len
= 0;
1411 switch (upcall
->type
) {
1413 case SLOW_PATH_UPCALL
:
1414 upcall_xlate(udpif
, upcall
, odp_actions
, wc
);
1418 if (upcall
->sflow
) {
1419 struct dpif_sflow_actions sflow_actions
;
1421 memset(&sflow_actions
, 0, sizeof sflow_actions
);
1423 actions_len
= dpif_read_actions(udpif
, upcall
, flow
,
1424 upcall
->type
, &sflow_actions
);
1425 dpif_sflow_received(upcall
->sflow
, packet
, flow
,
1426 flow
->in_port
.odp_port
, &upcall
->cookie
,
1427 actions_len
> 0 ? &sflow_actions
: NULL
);
1432 case FLOW_SAMPLE_UPCALL
:
1433 if (upcall
->ipfix
) {
1434 struct flow_tnl output_tunnel_key
;
1435 struct dpif_ipfix_actions ipfix_actions
;
1437 memset(&ipfix_actions
, 0, sizeof ipfix_actions
);
1439 if (upcall
->out_tun_key
) {
1440 odp_tun_key_from_attr(upcall
->out_tun_key
, &output_tunnel_key
,
1444 actions_len
= dpif_read_actions(udpif
, upcall
, flow
,
1445 upcall
->type
, &ipfix_actions
);
1446 if (upcall
->type
== IPFIX_UPCALL
) {
1447 dpif_ipfix_bridge_sample(upcall
->ipfix
, packet
, flow
,
1448 flow
->in_port
.odp_port
,
1449 upcall
->cookie
.ipfix
.output_odp_port
,
1450 upcall
->out_tun_key
?
1451 &output_tunnel_key
: NULL
,
1453 &ipfix_actions
: NULL
);
1455 /* The flow reflects exactly the contents of the packet.
1456 * Sample the packet using it. */
1457 dpif_ipfix_flow_sample(upcall
->ipfix
, packet
, flow
,
1458 &upcall
->cookie
, flow
->in_port
.odp_port
,
1459 upcall
->out_tun_key
?
1460 &output_tunnel_key
: NULL
,
1461 actions_len
> 0 ? &ipfix_actions
: NULL
);
1466 case CONTROLLER_UPCALL
:
1468 struct user_action_cookie
*cookie
= &upcall
->cookie
;
1470 if (cookie
->controller
.dont_send
) {
1474 uint32_t recirc_id
= cookie
->controller
.recirc_id
;
1479 const struct recirc_id_node
*recirc_node
1480 = recirc_id_node_find(recirc_id
);
1485 const struct frozen_state
*state
= &recirc_node
->state
;
1487 struct ofproto_async_msg
*am
= xmalloc(sizeof *am
);
1488 *am
= (struct ofproto_async_msg
) {
1489 .controller_id
= cookie
->controller
.controller_id
,
1490 .oam
= OAM_PACKET_IN
,
1494 .packet
= xmemdup(dp_packet_data(packet
),
1495 dp_packet_size(packet
)),
1496 .packet_len
= dp_packet_size(packet
),
1497 .reason
= cookie
->controller
.reason
,
1498 .table_id
= state
->table_id
,
1499 .cookie
= get_32aligned_be64(
1500 &cookie
->controller
.rule_cookie
),
1501 .userdata
= (recirc_node
->state
.userdata_len
1502 ? xmemdup(recirc_node
->state
.userdata
,
1503 recirc_node
->state
.userdata_len
)
1505 .userdata_len
= recirc_node
->state
.userdata_len
,
1508 .max_len
= cookie
->controller
.max_len
,
1512 if (cookie
->controller
.continuation
) {
1513 am
->pin
.up
.stack
= (state
->stack_size
1514 ? xmemdup(state
->stack
, state
->stack_size
)
1516 am
->pin
.up
.stack_size
= state
->stack_size
,
1517 am
->pin
.up
.mirrors
= state
->mirrors
,
1518 am
->pin
.up
.conntracked
= state
->conntracked
,
1519 am
->pin
.up
.actions
= (state
->ofpacts_len
1520 ? xmemdup(state
->ofpacts
,
1521 state
->ofpacts_len
) : NULL
),
1522 am
->pin
.up
.actions_len
= state
->ofpacts_len
,
1523 am
->pin
.up
.action_set
= (state
->action_set_len
1524 ? xmemdup(state
->action_set
,
1525 state
->action_set_len
)
1527 am
->pin
.up
.action_set_len
= state
->action_set_len
,
1528 am
->pin
.up
.bridge
= upcall
->ofproto
->uuid
;
1529 am
->pin
.up
.odp_port
= upcall
->packet
->md
.in_port
.odp_port
;
1532 /* We don't want to use the upcall 'flow', since it may be
1533 * more specific than the point at which the "controller"
1534 * action was specified. */
1535 struct flow frozen_flow
;
1537 frozen_flow
= *flow
;
1538 if (!state
->conntracked
) {
1539 flow_clear_conntrack(&frozen_flow
);
1542 frozen_metadata_to_flow(&upcall
->ofproto
->up
, &state
->metadata
,
1544 flow_get_metadata(&frozen_flow
, &am
->pin
.up
.base
.flow_metadata
);
1546 ofproto_dpif_send_async_msg(upcall
->ofproto
, am
);
1558 handle_upcalls(struct udpif
*udpif
, struct upcall
*upcalls
,
1561 struct dpif_op
*opsp
[UPCALL_MAX_BATCH
* 2];
1562 struct ukey_op ops
[UPCALL_MAX_BATCH
* 2];
1563 size_t n_ops
, n_opsp
, i
;
1565 /* Handle the packets individually in order of arrival.
1567 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, SLOW_BFD, and SLOW_LLDP,
1568 * translation is what processes received packets for these
1571 * - For SLOW_ACTION, translation executes the actions directly.
1573 * The loop fills 'ops' with an array of operations to execute in the
1576 for (i
= 0; i
< n_upcalls
; i
++) {
1577 struct upcall
*upcall
= &upcalls
[i
];
1578 const struct dp_packet
*packet
= upcall
->packet
;
1581 if (should_install_flow(udpif
, upcall
)) {
1582 struct udpif_key
*ukey
= upcall
->ukey
;
1584 if (ukey_install(udpif
, ukey
)) {
1585 upcall
->ukey_persists
= true;
1586 put_op_init(&ops
[n_ops
++], ukey
, DPIF_FP_CREATE
);
1590 if (upcall
->odp_actions
.size
) {
1593 op
->dop
.type
= DPIF_OP_EXECUTE
;
1594 op
->dop
.execute
.packet
= CONST_CAST(struct dp_packet
*, packet
);
1595 op
->dop
.execute
.flow
= upcall
->flow
;
1596 odp_key_to_dp_packet(upcall
->key
, upcall
->key_len
,
1597 op
->dop
.execute
.packet
);
1598 op
->dop
.execute
.actions
= upcall
->odp_actions
.data
;
1599 op
->dop
.execute
.actions_len
= upcall
->odp_actions
.size
;
1600 op
->dop
.execute
.needs_help
= (upcall
->xout
.slow
& SLOW_ACTION
) != 0;
1601 op
->dop
.execute
.probe
= false;
1602 op
->dop
.execute
.mtu
= upcall
->mru
;
1603 op
->dop
.execute
.hash
= upcall
->hash
;
1607 /* Execute batch. */
1609 for (i
= 0; i
< n_ops
; i
++) {
1610 opsp
[n_opsp
++] = &ops
[i
].dop
;
1612 dpif_operate(udpif
->dpif
, opsp
, n_opsp
, DPIF_OFFLOAD_AUTO
);
1613 for (i
= 0; i
< n_ops
; i
++) {
1614 struct udpif_key
*ukey
= ops
[i
].ukey
;
1617 ovs_mutex_lock(&ukey
->mutex
);
1618 if (ops
[i
].dop
.error
) {
1619 transition_ukey(ukey
, UKEY_EVICTED
);
1620 } else if (ukey
->state
< UKEY_OPERATIONAL
) {
1621 transition_ukey(ukey
, UKEY_OPERATIONAL
);
1623 ovs_mutex_unlock(&ukey
->mutex
);
1629 get_ukey_hash(const ovs_u128
*ufid
, const unsigned pmd_id
)
1631 return hash_2words(ufid
->u32
[0], pmd_id
);
1634 static struct udpif_key
*
1635 ukey_lookup(struct udpif
*udpif
, const ovs_u128
*ufid
, const unsigned pmd_id
)
1637 struct udpif_key
*ukey
;
1638 int idx
= get_ukey_hash(ufid
, pmd_id
) % N_UMAPS
;
1639 struct cmap
*cmap
= &udpif
->ukeys
[idx
].cmap
;
1641 CMAP_FOR_EACH_WITH_HASH (ukey
, cmap_node
,
1642 get_ukey_hash(ufid
, pmd_id
), cmap
) {
1643 if (ovs_u128_equals(ukey
->ufid
, *ufid
)) {
1650 /* Provides safe lockless access of RCU protected 'ukey->actions'. Callers may
1651 * alternatively access the field directly if they take 'ukey->mutex'. */
1653 ukey_get_actions(struct udpif_key
*ukey
, const struct nlattr
**actions
, size_t *size
)
1655 const struct ofpbuf
*buf
= ovsrcu_get(struct ofpbuf
*, &ukey
->actions
);
1656 *actions
= buf
->data
;
1661 ukey_set_actions(struct udpif_key
*ukey
, const struct ofpbuf
*actions
)
1663 struct ofpbuf
*old_actions
= ovsrcu_get_protected(struct ofpbuf
*,
1667 ovsrcu_postpone(ofpbuf_delete
, old_actions
);
1670 ovsrcu_set(&ukey
->actions
, ofpbuf_clone(actions
));
1673 static struct udpif_key
*
1674 ukey_create__(const struct nlattr
*key
, size_t key_len
,
1675 const struct nlattr
*mask
, size_t mask_len
,
1676 bool ufid_present
, const ovs_u128
*ufid
,
1677 const unsigned pmd_id
, const struct ofpbuf
*actions
,
1678 uint64_t reval_seq
, long long int used
,
1679 uint32_t key_recirc_id
, struct xlate_out
*xout
)
1680 OVS_NO_THREAD_SAFETY_ANALYSIS
1682 struct udpif_key
*ukey
= xmalloc(sizeof *ukey
);
1684 memcpy(&ukey
->keybuf
, key
, key_len
);
1685 ukey
->key
= &ukey
->keybuf
.nla
;
1686 ukey
->key_len
= key_len
;
1687 memcpy(&ukey
->maskbuf
, mask
, mask_len
);
1688 ukey
->mask
= &ukey
->maskbuf
.nla
;
1689 ukey
->mask_len
= mask_len
;
1690 ukey
->ufid_present
= ufid_present
;
1692 ukey
->pmd_id
= pmd_id
;
1693 ukey
->hash
= get_ukey_hash(&ukey
->ufid
, pmd_id
);
1695 ovsrcu_init(&ukey
->actions
, NULL
);
1696 ukey_set_actions(ukey
, actions
);
1698 ovs_mutex_init(&ukey
->mutex
);
1699 ukey
->dump_seq
= 0; /* Not yet dumped */
1700 ukey
->reval_seq
= reval_seq
;
1701 ukey
->state
= UKEY_CREATED
;
1702 ukey
->state_thread
= ovsthread_id_self();
1703 ukey
->state_where
= OVS_SOURCE_LOCATOR
;
1704 ukey
->created
= ukey
->flow_time
= time_msec();
1705 memset(&ukey
->stats
, 0, sizeof ukey
->stats
);
1706 ukey
->stats
.used
= used
;
1707 ukey
->xcache
= NULL
;
1709 ukey
->offloaded
= false;
1710 ukey
->in_netdev
= NULL
;
1711 ukey
->flow_packets
= ukey
->flow_backlog_packets
= 0;
1713 ukey
->key_recirc_id
= key_recirc_id
;
1714 recirc_refs_init(&ukey
->recircs
);
1716 /* Take ownership of the action recirc id references. */
1717 recirc_refs_swap(&ukey
->recircs
, &xout
->recircs
);
1723 static struct udpif_key
*
1724 ukey_create_from_upcall(struct upcall
*upcall
, struct flow_wildcards
*wc
)
1726 struct odputil_keybuf keystub
, maskstub
;
1727 struct ofpbuf keybuf
, maskbuf
;
1729 struct odp_flow_key_parms odp_parms
= {
1730 .flow
= upcall
->flow
,
1731 .mask
= wc
? &wc
->masks
: NULL
,
1734 odp_parms
.support
= upcall
->ofproto
->backer
->rt_support
.odp
;
1735 if (upcall
->key_len
) {
1736 ofpbuf_use_const(&keybuf
, upcall
->key
, upcall
->key_len
);
1738 /* dpif-netdev doesn't provide a netlink-formatted flow key in the
1739 * upcall, so convert the upcall's flow here. */
1740 ofpbuf_use_stack(&keybuf
, &keystub
, sizeof keystub
);
1741 odp_flow_key_from_flow(&odp_parms
, &keybuf
);
1744 atomic_read_relaxed(&enable_megaflows
, &megaflow
);
1745 ofpbuf_use_stack(&maskbuf
, &maskstub
, sizeof maskstub
);
1746 if (megaflow
&& wc
) {
1747 odp_parms
.key_buf
= &keybuf
;
1748 odp_flow_key_from_mask(&odp_parms
, &maskbuf
);
1751 return ukey_create__(keybuf
.data
, keybuf
.size
, maskbuf
.data
, maskbuf
.size
,
1752 true, upcall
->ufid
, upcall
->pmd_id
,
1753 &upcall
->put_actions
, upcall
->reval_seq
, 0,
1754 upcall
->have_recirc_ref
? upcall
->recirc
->id
: 0,
1759 ukey_create_from_dpif_flow(const struct udpif
*udpif
,
1760 const struct dpif_flow
*flow
,
1761 struct udpif_key
**ukey
)
1763 struct dpif_flow full_flow
;
1764 struct ofpbuf actions
;
1766 uint64_t stub
[DPIF_FLOW_BUFSIZE
/ 8];
1767 const struct nlattr
*a
;
1770 if (!flow
->key_len
|| !flow
->actions_len
) {
1774 /* If the key or actions were not provided by the datapath, fetch the
1776 ofpbuf_use_stack(&buf
, &stub
, sizeof stub
);
1777 err
= dpif_flow_get(udpif
->dpif
, flow
->key
, flow
->key_len
,
1778 flow
->ufid_present
? &flow
->ufid
: NULL
,
1779 flow
->pmd_id
, &buf
, &full_flow
);
1786 /* Check the flow actions for recirculation action. As recirculation
1787 * relies on OVS userspace internal state, we need to delete all old
1788 * datapath flows with either a non-zero recirc_id in the key, or any
1789 * recirculation actions upon OVS restart. */
1790 NL_ATTR_FOR_EACH (a
, left
, flow
->key
, flow
->key_len
) {
1791 if (nl_attr_type(a
) == OVS_KEY_ATTR_RECIRC_ID
1792 && nl_attr_get_u32(a
) != 0) {
1796 NL_ATTR_FOR_EACH (a
, left
, flow
->actions
, flow
->actions_len
) {
1797 if (nl_attr_type(a
) == OVS_ACTION_ATTR_RECIRC
) {
1802 reval_seq
= seq_read(udpif
->reval_seq
) - 1; /* Ensure revalidation. */
1803 ofpbuf_use_const(&actions
, flow
->actions
, flow
->actions_len
);
1804 *ukey
= ukey_create__(flow
->key
, flow
->key_len
,
1805 flow
->mask
, flow
->mask_len
, flow
->ufid_present
,
1806 &flow
->ufid
, flow
->pmd_id
, &actions
,
1807 reval_seq
, flow
->stats
.used
, 0, NULL
);
1813 try_ukey_replace(struct umap
*umap
, struct udpif_key
*old_ukey
,
1814 struct udpif_key
*new_ukey
)
1815 OVS_REQUIRES(umap
->mutex
)
1816 OVS_TRY_LOCK(true, new_ukey
->mutex
)
1818 bool replaced
= false;
1820 if (!ovs_mutex_trylock(&old_ukey
->mutex
)) {
1821 if (old_ukey
->state
== UKEY_EVICTED
) {
1822 /* The flow was deleted during the current revalidator dump,
1823 * but its ukey won't be fully cleaned up until the sweep phase.
1824 * In the mean time, we are receiving upcalls for this traffic.
1825 * Expedite the (new) flow install by replacing the ukey. */
1826 ovs_mutex_lock(&new_ukey
->mutex
);
1827 cmap_replace(&umap
->cmap
, &old_ukey
->cmap_node
,
1828 &new_ukey
->cmap_node
, new_ukey
->hash
);
1829 ovsrcu_postpone(ukey_delete__
, old_ukey
);
1830 transition_ukey(old_ukey
, UKEY_DELETED
);
1831 transition_ukey(new_ukey
, UKEY_VISIBLE
);
1834 ovs_mutex_unlock(&old_ukey
->mutex
);
1838 COVERAGE_INC(upcall_ukey_replace
);
1840 COVERAGE_INC(handler_duplicate_upcall
);
1845 /* Attempts to insert a ukey into the shared ukey maps.
1847 * On success, returns true, installs the ukey and returns it in a locked
1848 * state. Otherwise, returns false. */
1850 ukey_install__(struct udpif
*udpif
, struct udpif_key
*new_ukey
)
1851 OVS_TRY_LOCK(true, new_ukey
->mutex
)
1854 struct udpif_key
*old_ukey
;
1856 bool locked
= false;
1858 idx
= new_ukey
->hash
% N_UMAPS
;
1859 umap
= &udpif
->ukeys
[idx
];
1860 ovs_mutex_lock(&umap
->mutex
);
1861 old_ukey
= ukey_lookup(udpif
, &new_ukey
->ufid
, new_ukey
->pmd_id
);
1863 /* Uncommon case: A ukey is already installed with the same UFID. */
1864 if (old_ukey
->key_len
== new_ukey
->key_len
1865 && !memcmp(old_ukey
->key
, new_ukey
->key
, new_ukey
->key_len
)) {
1866 locked
= try_ukey_replace(umap
, old_ukey
, new_ukey
);
1868 struct ds ds
= DS_EMPTY_INITIALIZER
;
1870 odp_format_ufid(&old_ukey
->ufid
, &ds
);
1871 ds_put_cstr(&ds
, " ");
1872 odp_flow_key_format(old_ukey
->key
, old_ukey
->key_len
, &ds
);
1873 ds_put_cstr(&ds
, "\n");
1874 odp_format_ufid(&new_ukey
->ufid
, &ds
);
1875 ds_put_cstr(&ds
, " ");
1876 odp_flow_key_format(new_ukey
->key
, new_ukey
->key_len
, &ds
);
1878 VLOG_WARN_RL(&rl
, "Conflicting ukey for flows:\n%s", ds_cstr(&ds
));
1882 ovs_mutex_lock(&new_ukey
->mutex
);
1883 cmap_insert(&umap
->cmap
, &new_ukey
->cmap_node
, new_ukey
->hash
);
1884 transition_ukey(new_ukey
, UKEY_VISIBLE
);
1887 ovs_mutex_unlock(&umap
->mutex
);
1893 transition_ukey_at(struct udpif_key
*ukey
, enum ukey_state dst
,
1895 OVS_REQUIRES(ukey
->mutex
)
1897 if (dst
< ukey
->state
) {
1898 VLOG_ABORT("Invalid ukey transition %d->%d (last transitioned from "
1899 "thread %u at %s)", ukey
->state
, dst
, ukey
->state_thread
,
1902 if (ukey
->state
== dst
&& dst
== UKEY_OPERATIONAL
) {
1906 /* Valid state transitions:
1907 * UKEY_CREATED -> UKEY_VISIBLE
1908 * Ukey is now visible in the umap.
1909 * UKEY_VISIBLE -> UKEY_OPERATIONAL
1910 * A handler has installed the flow, and the flow is in the datapath.
1911 * UKEY_VISIBLE -> UKEY_EVICTING
1912 * A handler installs the flow, then revalidator sweeps the ukey before
1913 * the flow is dumped. Most likely the flow was installed; start trying
1915 * UKEY_VISIBLE -> UKEY_EVICTED
1916 * A handler attempts to install the flow, but the datapath rejects it.
1917 * Consider that the datapath has already destroyed it.
1918 * UKEY_OPERATIONAL -> UKEY_EVICTING
1919 * A revalidator decides to evict the datapath flow.
1920 * UKEY_EVICTING -> UKEY_EVICTED
1921 * A revalidator has evicted the datapath flow.
1922 * UKEY_EVICTED -> UKEY_DELETED
1923 * A revalidator has removed the ukey from the umap and is deleting it.
1925 if (ukey
->state
== dst
- 1 || (ukey
->state
== UKEY_VISIBLE
&&
1926 dst
< UKEY_DELETED
)) {
1929 struct ds ds
= DS_EMPTY_INITIALIZER
;
1931 odp_format_ufid(&ukey
->ufid
, &ds
);
1932 VLOG_WARN_RL(&rl
, "Invalid state transition for ukey %s: %d -> %d",
1933 ds_cstr(&ds
), ukey
->state
, dst
);
1936 ukey
->state_thread
= ovsthread_id_self();
1937 ukey
->state_where
= where
;
1941 ukey_install(struct udpif
*udpif
, struct udpif_key
*ukey
)
1945 installed
= ukey_install__(udpif
, ukey
);
1947 ovs_mutex_unlock(&ukey
->mutex
);
1953 /* Searches for a ukey in 'udpif->ukeys' that matches 'flow' and attempts to
1954 * lock the ukey. If the ukey does not exist, create it.
1956 * Returns 0 on success, setting *result to the matching ukey and returning it
1957 * in a locked state. Otherwise, returns an errno and clears *result. EBUSY
1958 * indicates that another thread is handling this flow. Other errors indicate
1959 * an unexpected condition creating a new ukey.
1961 * *error is an output parameter provided to appease the threadsafety analyser,
1962 * and its value matches the return value. */
1964 ukey_acquire(struct udpif
*udpif
, const struct dpif_flow
*flow
,
1965 struct udpif_key
**result
, int *error
)
1966 OVS_TRY_LOCK(0, (*result
)->mutex
)
1968 struct udpif_key
*ukey
;
1971 ukey
= ukey_lookup(udpif
, &flow
->ufid
, flow
->pmd_id
);
1973 retval
= ovs_mutex_trylock(&ukey
->mutex
);
1975 /* Usually we try to avoid installing flows from revalidator threads,
1976 * because locking on a umap may cause handler threads to block.
1977 * However there are certain cases, like when ovs-vswitchd is
1978 * restarted, where it is desirable to handle flows that exist in the
1979 * datapath gracefully (ie, don't just clear the datapath). */
1982 retval
= ukey_create_from_dpif_flow(udpif
, flow
, &ukey
);
1986 install
= ukey_install__(udpif
, ukey
);
1990 ukey_delete__(ukey
);
2006 ukey_delete__(struct udpif_key
*ukey
)
2007 OVS_NO_THREAD_SAFETY_ANALYSIS
2010 if (ukey
->key_recirc_id
) {
2011 recirc_free_id(ukey
->key_recirc_id
);
2013 recirc_refs_unref(&ukey
->recircs
);
2014 xlate_cache_delete(ukey
->xcache
);
2015 ofpbuf_delete(ovsrcu_get(struct ofpbuf
*, &ukey
->actions
));
2016 ovs_mutex_destroy(&ukey
->mutex
);
2022 ukey_delete(struct umap
*umap
, struct udpif_key
*ukey
)
2023 OVS_REQUIRES(umap
->mutex
)
2025 ovs_mutex_lock(&ukey
->mutex
);
2026 if (ukey
->state
< UKEY_DELETED
) {
2027 cmap_remove(&umap
->cmap
, &ukey
->cmap_node
, ukey
->hash
);
2028 ovsrcu_postpone(ukey_delete__
, ukey
);
2029 transition_ukey(ukey
, UKEY_DELETED
);
2031 ovs_mutex_unlock(&ukey
->mutex
);
2035 should_revalidate(const struct udpif
*udpif
, uint64_t packets
,
2038 long long int metric
, now
, duration
;
2041 /* Always revalidate the first time a flow is dumped. */
2045 if (udpif
->dump_duration
< ofproto_max_revalidator
/ 2) {
2046 /* We are likely to handle full revalidation for the flows. */
2050 /* Calculate the mean time between seeing these packets. If this
2051 * exceeds the threshold, then delete the flow rather than performing
2052 * costly revalidation for flows that aren't being hit frequently.
2054 * This is targeted at situations where the dump_duration is high (~1s),
2055 * and revalidation is triggered by a call to udpif_revalidate(). In
2056 * these situations, revalidation of all flows causes fluctuations in the
2057 * flow_limit due to the interaction with the dump_duration and max_idle.
2058 * This tends to result in deletion of low-throughput flows anyway, so
2059 * skip the revalidation and just delete those flows. */
2060 packets
= MAX(packets
, 1);
2061 now
= MAX(used
, time_msec());
2062 duration
= now
- used
;
2063 metric
= duration
/ packets
;
2065 if (metric
< 1000 / ofproto_min_revalidate_pps
) {
2066 /* The flow is receiving more than min-revalidate-pps, so keep it. */
2072 struct reval_context
{
2073 /* Optional output parameters */
2074 struct flow_wildcards
*wc
;
2075 struct ofpbuf
*odp_actions
;
2076 struct netflow
**netflow
;
2077 struct xlate_cache
*xcache
;
2079 /* Required output parameters */
2080 struct xlate_out xout
;
2084 /* Translates 'key' into a flow, populating 'ctx' as it goes along.
2086 * Returns 0 on success, otherwise a positive errno value.
2088 * The caller is responsible for uninitializing ctx->xout on success.
2091 xlate_key(struct udpif
*udpif
, const struct nlattr
*key
, unsigned int len
,
2092 const struct dpif_flow_stats
*push
, struct reval_context
*ctx
)
2094 struct ofproto_dpif
*ofproto
;
2095 ofp_port_t ofp_in_port
;
2096 enum odp_key_fitness fitness
;
2097 struct xlate_in xin
;
2100 fitness
= odp_flow_key_to_flow(key
, len
, &ctx
->flow
, NULL
);
2101 if (fitness
== ODP_FIT_ERROR
) {
2105 error
= xlate_lookup(udpif
->backer
, &ctx
->flow
, &ofproto
, NULL
, NULL
,
2106 ctx
->netflow
, &ofp_in_port
);
2111 xlate_in_init(&xin
, ofproto
, ofproto_dpif_get_tables_version(ofproto
),
2112 &ctx
->flow
, ofp_in_port
, NULL
, push
->tcp_flags
,
2113 NULL
, ctx
->wc
, ctx
->odp_actions
);
2114 if (push
->n_packets
) {
2115 xin
.resubmit_stats
= push
;
2116 xin
.allow_side_effects
= true;
2118 xin
.xcache
= ctx
->xcache
;
2119 xlate_actions(&xin
, &ctx
->xout
);
2120 if (fitness
== ODP_FIT_TOO_LITTLE
) {
2121 ctx
->xout
.slow
|= SLOW_MATCH
;
2128 xlate_ukey(struct udpif
*udpif
, const struct udpif_key
*ukey
,
2129 uint16_t tcp_flags
, struct reval_context
*ctx
)
2131 struct dpif_flow_stats push
= {
2132 .tcp_flags
= tcp_flags
,
2134 return xlate_key(udpif
, ukey
->key
, ukey
->key_len
, &push
, ctx
);
2138 populate_xcache(struct udpif
*udpif
, struct udpif_key
*ukey
,
2140 OVS_REQUIRES(ukey
->mutex
)
2142 struct reval_context ctx
= {
2143 .odp_actions
= NULL
,
2149 ovs_assert(!ukey
->xcache
);
2150 ukey
->xcache
= ctx
.xcache
= xlate_cache_new();
2151 error
= xlate_ukey(udpif
, ukey
, tcp_flags
, &ctx
);
2155 xlate_out_uninit(&ctx
.xout
);
2160 static enum reval_result
2161 revalidate_ukey__(struct udpif
*udpif
, const struct udpif_key
*ukey
,
2162 uint16_t tcp_flags
, struct ofpbuf
*odp_actions
,
2163 struct recirc_refs
*recircs
, struct xlate_cache
*xcache
)
2165 struct xlate_out
*xoutp
;
2166 struct netflow
*netflow
;
2167 struct flow_wildcards dp_mask
, wc
;
2168 enum reval_result result
;
2169 struct reval_context ctx
= {
2170 .odp_actions
= odp_actions
,
2171 .netflow
= &netflow
,
2176 result
= UKEY_DELETE
;
2180 if (xlate_ukey(udpif
, ukey
, tcp_flags
, &ctx
)) {
2185 if (xoutp
->avoid_caching
) {
2190 struct ofproto_dpif
*ofproto
;
2191 ofp_port_t ofp_in_port
;
2193 ofproto
= xlate_lookup_ofproto(udpif
->backer
, &ctx
.flow
, &ofp_in_port
,
2196 ofpbuf_clear(odp_actions
);
2202 compose_slow_path(udpif
, xoutp
, ctx
.flow
.in_port
.odp_port
,
2203 ofp_in_port
, odp_actions
,
2204 ofproto
->up
.slowpath_meter_id
, &ofproto
->uuid
);
2207 if (odp_flow_key_to_mask(ukey
->mask
, ukey
->mask_len
, &dp_mask
, &ctx
.flow
,
2213 /* Do not modify if any bit is wildcarded by the installed datapath flow,
2214 * but not the newly revalidated wildcard mask (wc), i.e., if revalidation
2215 * tells that the datapath flow is now too generic and must be narrowed
2216 * down. Note that we do not know if the datapath has ignored any of the
2217 * wildcarded bits, so we may be overly conservative here. */
2218 if (flow_wildcards_has_extra(&dp_mask
, ctx
.wc
)) {
2222 if (!ofpbuf_equal(odp_actions
,
2223 ovsrcu_get(struct ofpbuf
*, &ukey
->actions
))) {
2224 /* The datapath mask was OK, but the actions seem to have changed.
2225 * Let's modify it in place. */
2226 result
= UKEY_MODIFY
;
2227 /* Transfer recirc action ID references to the caller. */
2228 recirc_refs_swap(recircs
, &xoutp
->recircs
);
2235 if (netflow
&& result
== UKEY_DELETE
) {
2236 netflow_flow_clear(netflow
, &ctx
.flow
);
2238 xlate_out_uninit(xoutp
);
2242 /* Verifies that the datapath actions of 'ukey' are still correct, and pushes
2245 * Returns a recommended action for 'ukey', options include:
2246 * UKEY_DELETE The ukey should be deleted.
2247 * UKEY_KEEP The ukey is fine as is.
2248 * UKEY_MODIFY The ukey's actions should be changed but is otherwise
2249 * fine. Callers should change the actions to those found
2250 * in the caller supplied 'odp_actions' buffer. The
2251 * recirculation references can be found in 'recircs' and
2252 * must be handled by the caller.
2254 * If the result is UKEY_MODIFY, then references to all recirc_ids used by the
2255 * new flow will be held within 'recircs' (which may be none).
2257 * The caller is responsible for both initializing 'recircs' prior this call,
2258 * and ensuring any references are eventually freed.
2260 static enum reval_result
2261 revalidate_ukey(struct udpif
*udpif
, struct udpif_key
*ukey
,
2262 const struct dpif_flow_stats
*stats
,
2263 struct ofpbuf
*odp_actions
, uint64_t reval_seq
,
2264 struct recirc_refs
*recircs
, bool offloaded
)
2265 OVS_REQUIRES(ukey
->mutex
)
2267 bool need_revalidate
= ukey
->reval_seq
!= reval_seq
;
2268 enum reval_result result
= UKEY_DELETE
;
2269 struct dpif_flow_stats push
;
2271 ofpbuf_clear(odp_actions
);
2273 push
.used
= stats
->used
;
2274 push
.tcp_flags
= stats
->tcp_flags
;
2275 push
.n_packets
= (stats
->n_packets
> ukey
->stats
.n_packets
2276 ? stats
->n_packets
- ukey
->stats
.n_packets
2278 push
.n_bytes
= (stats
->n_bytes
> ukey
->stats
.n_bytes
2279 ? stats
->n_bytes
- ukey
->stats
.n_bytes
2282 if (need_revalidate
) {
2283 if (should_revalidate(udpif
, push
.n_packets
, ukey
->stats
.used
)) {
2284 if (!ukey
->xcache
) {
2285 ukey
->xcache
= xlate_cache_new();
2287 xlate_cache_clear(ukey
->xcache
);
2289 result
= revalidate_ukey__(udpif
, ukey
, push
.tcp_flags
,
2290 odp_actions
, recircs
, ukey
->xcache
);
2291 } /* else delete; too expensive to revalidate */
2292 } else if (!push
.n_packets
|| ukey
->xcache
2293 || !populate_xcache(udpif
, ukey
, push
.tcp_flags
)) {
2297 /* Stats for deleted flows will be attributed upon flow deletion. Skip. */
2298 if (result
!= UKEY_DELETE
) {
2299 xlate_push_stats(ukey
->xcache
, &push
, offloaded
);
2300 ukey
->stats
= *stats
;
2301 ukey
->reval_seq
= reval_seq
;
2308 delete_op_init__(struct udpif
*udpif
, struct ukey_op
*op
,
2309 const struct dpif_flow
*flow
)
2312 op
->dop
.type
= DPIF_OP_FLOW_DEL
;
2313 op
->dop
.flow_del
.key
= flow
->key
;
2314 op
->dop
.flow_del
.key_len
= flow
->key_len
;
2315 op
->dop
.flow_del
.ufid
= flow
->ufid_present
? &flow
->ufid
: NULL
;
2316 op
->dop
.flow_del
.pmd_id
= flow
->pmd_id
;
2317 op
->dop
.flow_del
.stats
= &op
->stats
;
2318 op
->dop
.flow_del
.terse
= udpif_use_ufid(udpif
);
2322 delete_op_init(struct udpif
*udpif
, struct ukey_op
*op
, struct udpif_key
*ukey
)
2325 op
->dop
.type
= DPIF_OP_FLOW_DEL
;
2326 op
->dop
.flow_del
.key
= ukey
->key
;
2327 op
->dop
.flow_del
.key_len
= ukey
->key_len
;
2328 op
->dop
.flow_del
.ufid
= ukey
->ufid_present
? &ukey
->ufid
: NULL
;
2329 op
->dop
.flow_del
.pmd_id
= ukey
->pmd_id
;
2330 op
->dop
.flow_del
.stats
= &op
->stats
;
2331 op
->dop
.flow_del
.terse
= udpif_use_ufid(udpif
);
2335 put_op_init(struct ukey_op
*op
, struct udpif_key
*ukey
,
2336 enum dpif_flow_put_flags flags
)
2339 op
->dop
.type
= DPIF_OP_FLOW_PUT
;
2340 op
->dop
.flow_put
.flags
= flags
;
2341 op
->dop
.flow_put
.key
= ukey
->key
;
2342 op
->dop
.flow_put
.key_len
= ukey
->key_len
;
2343 op
->dop
.flow_put
.mask
= ukey
->mask
;
2344 op
->dop
.flow_put
.mask_len
= ukey
->mask_len
;
2345 op
->dop
.flow_put
.ufid
= ukey
->ufid_present
? &ukey
->ufid
: NULL
;
2346 op
->dop
.flow_put
.pmd_id
= ukey
->pmd_id
;
2347 op
->dop
.flow_put
.stats
= NULL
;
2348 ukey_get_actions(ukey
, &op
->dop
.flow_put
.actions
,
2349 &op
->dop
.flow_put
.actions_len
);
2352 /* Executes datapath operations 'ops' and attributes stats retrieved from the
2353 * datapath as part of those operations. */
2355 push_dp_ops(struct udpif
*udpif
, struct ukey_op
*ops
, size_t n_ops
)
2357 struct dpif_op
*opsp
[REVALIDATE_MAX_BATCH
];
2360 ovs_assert(n_ops
<= REVALIDATE_MAX_BATCH
);
2361 for (i
= 0; i
< n_ops
; i
++) {
2362 opsp
[i
] = &ops
[i
].dop
;
2364 dpif_operate(udpif
->dpif
, opsp
, n_ops
, DPIF_OFFLOAD_AUTO
);
2366 for (i
= 0; i
< n_ops
; i
++) {
2367 struct ukey_op
*op
= &ops
[i
];
2368 struct dpif_flow_stats
*push
, *stats
, push_buf
;
2370 stats
= op
->dop
.flow_del
.stats
;
2373 if (op
->dop
.type
!= DPIF_OP_FLOW_DEL
) {
2374 /* Only deleted flows need their stats pushed. */
2378 if (op
->dop
.error
) {
2379 /* flow_del error, 'stats' is unusable. */
2381 ovs_mutex_lock(&op
->ukey
->mutex
);
2382 transition_ukey(op
->ukey
, UKEY_EVICTED
);
2383 ovs_mutex_unlock(&op
->ukey
->mutex
);
2389 ovs_mutex_lock(&op
->ukey
->mutex
);
2390 transition_ukey(op
->ukey
, UKEY_EVICTED
);
2391 push
->used
= MAX(stats
->used
, op
->ukey
->stats
.used
);
2392 push
->tcp_flags
= stats
->tcp_flags
| op
->ukey
->stats
.tcp_flags
;
2393 push
->n_packets
= stats
->n_packets
- op
->ukey
->stats
.n_packets
;
2394 push
->n_bytes
= stats
->n_bytes
- op
->ukey
->stats
.n_bytes
;
2395 ovs_mutex_unlock(&op
->ukey
->mutex
);
2400 if (push
->n_packets
|| netflow_exists()) {
2401 const struct nlattr
*key
= op
->dop
.flow_del
.key
;
2402 size_t key_len
= op
->dop
.flow_del
.key_len
;
2403 struct netflow
*netflow
;
2404 struct reval_context ctx
= {
2405 .netflow
= &netflow
,
2410 ovs_mutex_lock(&op
->ukey
->mutex
);
2411 if (op
->ukey
->xcache
) {
2412 xlate_push_stats(op
->ukey
->xcache
, push
, false);
2413 ovs_mutex_unlock(&op
->ukey
->mutex
);
2416 ovs_mutex_unlock(&op
->ukey
->mutex
);
2417 key
= op
->ukey
->key
;
2418 key_len
= op
->ukey
->key_len
;
2421 error
= xlate_key(udpif
, key
, key_len
, push
, &ctx
);
2423 static struct vlog_rate_limit rll
= VLOG_RATE_LIMIT_INIT(1, 5);
2424 VLOG_WARN_RL(&rll
, "xlate_key failed (%s)!",
2425 ovs_strerror(error
));
2427 xlate_out_uninit(&ctx
.xout
);
2429 netflow_flow_clear(netflow
, &ctx
.flow
);
2436 /* Executes datapath operations 'ops', attributes stats retrieved from the
2437 * datapath, and deletes ukeys corresponding to deleted flows. */
2439 push_ukey_ops(struct udpif
*udpif
, struct umap
*umap
,
2440 struct ukey_op
*ops
, size_t n_ops
)
2444 push_dp_ops(udpif
, ops
, n_ops
);
2445 ovs_mutex_lock(&umap
->mutex
);
2446 for (i
= 0; i
< n_ops
; i
++) {
2447 if (ops
[i
].dop
.type
== DPIF_OP_FLOW_DEL
) {
2448 ukey_delete(umap
, ops
[i
].ukey
);
2451 ovs_mutex_unlock(&umap
->mutex
);
2455 log_unexpected_flow(const struct dpif_flow
*flow
, int error
)
2457 struct ds ds
= DS_EMPTY_INITIALIZER
;
2459 ds_put_format(&ds
, "Failed to acquire udpif_key corresponding to "
2460 "unexpected flow (%s): ", ovs_strerror(error
));
2461 odp_format_ufid(&flow
->ufid
, &ds
);
2463 static struct vlog_rate_limit rll
= VLOG_RATE_LIMIT_INIT(10, 60);
2464 VLOG_WARN_RL(&rll
, "%s", ds_cstr(&ds
));
2470 reval_op_init(struct ukey_op
*op
, enum reval_result result
,
2471 struct udpif
*udpif
, struct udpif_key
*ukey
,
2472 struct recirc_refs
*recircs
, struct ofpbuf
*odp_actions
)
2473 OVS_REQUIRES(ukey
->mutex
)
2475 if (result
== UKEY_DELETE
) {
2476 delete_op_init(udpif
, op
, ukey
);
2477 transition_ukey(ukey
, UKEY_EVICTING
);
2478 } else if (result
== UKEY_MODIFY
) {
2479 /* Store the new recircs. */
2480 recirc_refs_swap(&ukey
->recircs
, recircs
);
2481 /* Release old recircs. */
2482 recirc_refs_unref(recircs
);
2483 /* ukey->key_recirc_id remains, as the key is the same as before. */
2485 ukey_set_actions(ukey
, odp_actions
);
2486 put_op_init(op
, ukey
, DPIF_FP_MODIFY
);
2491 ukey_netdev_unref(struct udpif_key
*ukey
)
2493 if (!ukey
->in_netdev
) {
2496 netdev_close(ukey
->in_netdev
);
2497 ukey
->in_netdev
= NULL
;
2501 * Given a udpif_key, get its input port (netdev) by parsing the flow keys
2502 * and actions. The flow may not contain flow attributes if it is a terse
2503 * dump; read its attributes from the ukey and then parse the flow to get
2504 * the port info. Save them in udpif_key.
2507 ukey_to_flow_netdev(struct udpif
*udpif
, struct udpif_key
*ukey
)
2509 const char *dpif_type_str
= dpif_normalize_type(dpif_type(udpif
->dpif
));
2510 const struct nlattr
*k
;
2513 /* Remove existing references to netdev */
2514 ukey_netdev_unref(ukey
);
2516 /* Find the input port and get a reference to its netdev */
2517 NL_ATTR_FOR_EACH (k
, left
, ukey
->key
, ukey
->key_len
) {
2518 enum ovs_key_attr type
= nl_attr_type(k
);
2520 if (type
== OVS_KEY_ATTR_IN_PORT
) {
2521 ukey
->in_netdev
= netdev_ports_get(nl_attr_get_odp_port(k
),
2523 } else if (type
== OVS_KEY_ATTR_TUNNEL
) {
2524 struct flow_tnl tnl
;
2525 enum odp_key_fitness res
;
2527 if (ukey
->in_netdev
) {
2528 netdev_close(ukey
->in_netdev
);
2529 ukey
->in_netdev
= NULL
;
2531 res
= odp_tun_key_from_attr(k
, &tnl
, NULL
);
2532 if (res
!= ODP_FIT_ERROR
) {
2533 ukey
->in_netdev
= flow_get_tunnel_netdev(&tnl
);
2541 udpif_flow_packet_delta(struct udpif_key
*ukey
, const struct dpif_flow
*f
)
2543 return f
->stats
.n_packets
+ ukey
->flow_backlog_packets
-
2547 static long long int
2548 udpif_flow_time_delta(struct udpif
*udpif
, struct udpif_key
*ukey
)
2550 return (udpif
->dpif
->current_ms
- ukey
->flow_time
) / 1000;
2554 * Save backlog packet count while switching modes
2555 * between offloaded and kernel datapaths.
2558 udpif_set_ukey_backlog_packets(struct udpif_key
*ukey
)
2560 ukey
->flow_backlog_packets
= ukey
->flow_packets
;
2563 /* Gather pps-rate for the given dpif_flow and save it in its ukey */
2565 udpif_update_flow_pps(struct udpif
*udpif
, struct udpif_key
*ukey
,
2566 const struct dpif_flow
*f
)
2570 /* Update pps-rate only when we are close to rebalance interval */
2571 if (udpif
->dpif
->current_ms
- ukey
->flow_time
< OFFL_REBAL_INTVL_MSEC
) {
2575 ukey
->offloaded
= f
->attrs
.offloaded
;
2576 pps
= udpif_flow_packet_delta(ukey
, f
) /
2577 udpif_flow_time_delta(udpif
, ukey
);
2578 ukey
->flow_pps_rate
= pps
;
2579 ukey
->flow_packets
= ukey
->flow_backlog_packets
+ f
->stats
.n_packets
;
2580 ukey
->flow_time
= udpif
->dpif
->current_ms
;
2583 static long long int
2584 udpif_update_used(struct udpif
*udpif
, struct udpif_key
*ukey
,
2585 struct dpif_flow_stats
*stats
)
2586 OVS_REQUIRES(ukey
->mutex
)
2588 if (!udpif
->dump
->terse
) {
2589 return ukey
->created
;
2592 if (stats
->n_packets
> ukey
->stats
.n_packets
) {
2593 stats
->used
= udpif
->dpif
->current_ms
;
2594 } else if (ukey
->stats
.used
) {
2595 stats
->used
= ukey
->stats
.used
;
2597 stats
->used
= ukey
->created
;
2603 revalidate(struct revalidator
*revalidator
)
2605 uint64_t odp_actions_stub
[1024 / 8];
2606 struct ofpbuf odp_actions
= OFPBUF_STUB_INITIALIZER(odp_actions_stub
);
2608 struct udpif
*udpif
= revalidator
->udpif
;
2609 struct dpif_flow_dump_thread
*dump_thread
;
2610 uint64_t dump_seq
, reval_seq
;
2611 bool kill_warn_print
= true;
2612 unsigned int flow_limit
;
2614 dump_seq
= seq_read(udpif
->dump_seq
);
2615 reval_seq
= seq_read(udpif
->reval_seq
);
2616 atomic_read_relaxed(&udpif
->flow_limit
, &flow_limit
);
2617 dump_thread
= dpif_flow_dump_thread_create(udpif
->dump
);
2619 struct ukey_op ops
[REVALIDATE_MAX_BATCH
];
2622 struct dpif_flow flows
[REVALIDATE_MAX_BATCH
];
2623 const struct dpif_flow
*f
;
2626 long long int max_idle
;
2628 size_t kill_all_limit
;
2632 n_dumped
= dpif_flow_dump_next(dump_thread
, flows
, ARRAY_SIZE(flows
));
2639 /* In normal operation we want to keep flows around until they have
2640 * been idle for 'ofproto_max_idle' milliseconds. However:
2642 * - If the number of datapath flows climbs above 'flow_limit',
2643 * drop that down to 100 ms to try to bring the flows down to
2646 * - If the number of datapath flows climbs above twice
2647 * 'flow_limit', delete all the datapath flows as an emergency
2648 * measure. (We reassess this condition for the next batch of
2649 * datapath flows, so we will recover before all the flows are
2651 n_dp_flows
= udpif_get_n_flows(udpif
);
2652 if (n_dp_flows
>= flow_limit
) {
2653 COVERAGE_INC(upcall_flow_limit_hit
);
2656 kill_them_all
= false;
2657 kill_all_limit
= flow_limit
* 2;
2658 if (OVS_UNLIKELY(n_dp_flows
> kill_all_limit
)) {
2659 static struct vlog_rate_limit rlem
= VLOG_RATE_LIMIT_INIT(1, 1);
2661 kill_them_all
= true;
2662 COVERAGE_INC(upcall_flow_limit_kill
);
2663 if (kill_warn_print
) {
2664 kill_warn_print
= false;
2666 "Number of datapath flows (%"PRIuSIZE
") twice as high as "
2667 "current dynamic flow limit (%"PRIuSIZE
"). "
2668 "Starting to delete flows unconditionally "
2669 "as an emergency measure.", n_dp_flows
, kill_all_limit
);
2673 max_idle
= n_dp_flows
> flow_limit
? 100 : ofproto_max_idle
;
2675 udpif
->dpif
->current_ms
= time_msec();
2676 for (f
= flows
; f
< &flows
[n_dumped
]; f
++) {
2677 long long int used
= f
->stats
.used
;
2678 struct recirc_refs recircs
= RECIRC_REFS_EMPTY_INITIALIZER
;
2679 struct dpif_flow_stats stats
= f
->stats
;
2680 enum reval_result result
;
2681 struct udpif_key
*ukey
;
2682 bool already_dumped
;
2685 if (ukey_acquire(udpif
, f
, &ukey
, &error
)) {
2686 if (error
== EBUSY
) {
2687 /* Another thread is processing this flow, so don't bother
2689 COVERAGE_INC(upcall_ukey_contention
);
2691 log_unexpected_flow(f
, error
);
2692 if (error
!= ENOENT
) {
2693 delete_op_init__(udpif
, &ops
[n_ops
++], f
);
2699 already_dumped
= ukey
->dump_seq
== dump_seq
;
2700 if (already_dumped
) {
2701 /* The flow has already been handled during this flow dump
2702 * operation. Skip it. */
2704 COVERAGE_INC(dumped_duplicate_flow
);
2706 COVERAGE_INC(dumped_new_flow
);
2708 ovs_mutex_unlock(&ukey
->mutex
);
2712 if (ukey
->state
<= UKEY_OPERATIONAL
) {
2713 /* The flow is now confirmed to be in the datapath. */
2714 transition_ukey(ukey
, UKEY_OPERATIONAL
);
2716 VLOG_INFO("Unexpected ukey transition from state %d "
2717 "(last transitioned from thread %u at %s)",
2718 ukey
->state
, ukey
->state_thread
, ukey
->state_where
);
2719 ovs_mutex_unlock(&ukey
->mutex
);
2724 used
= udpif_update_used(udpif
, ukey
, &stats
);
2726 if (kill_them_all
|| (used
&& used
< now
- max_idle
)) {
2727 result
= UKEY_DELETE
;
2729 result
= revalidate_ukey(udpif
, ukey
, &stats
, &odp_actions
,
2730 reval_seq
, &recircs
,
2731 f
->attrs
.offloaded
);
2733 ukey
->dump_seq
= dump_seq
;
2735 if (netdev_is_offload_rebalance_policy_enabled() &&
2736 result
!= UKEY_DELETE
) {
2737 udpif_update_flow_pps(udpif
, ukey
, f
);
2740 if (result
!= UKEY_KEEP
) {
2741 /* Takes ownership of 'recircs'. */
2742 reval_op_init(&ops
[n_ops
++], result
, udpif
, ukey
, &recircs
,
2745 ovs_mutex_unlock(&ukey
->mutex
);
2749 /* Push datapath ops but defer ukey deletion to 'sweep' phase. */
2750 push_dp_ops(udpif
, ops
, n_ops
);
2754 dpif_flow_dump_thread_destroy(dump_thread
);
2755 ofpbuf_uninit(&odp_actions
);
2758 /* Pauses the 'revalidator', can only proceed after main thread
2759 * calls udpif_resume_revalidators(). */
2761 revalidator_pause(struct revalidator
*revalidator
)
2763 /* The first block is for sync'ing the pause with main thread. */
2764 ovs_barrier_block(&revalidator
->udpif
->pause_barrier
);
2765 /* The second block is for pausing until main thread resumes. */
2766 ovs_barrier_block(&revalidator
->udpif
->pause_barrier
);
2770 revalidator_sweep__(struct revalidator
*revalidator
, bool purge
)
2772 struct udpif
*udpif
;
2773 uint64_t dump_seq
, reval_seq
;
2776 udpif
= revalidator
->udpif
;
2777 dump_seq
= seq_read(udpif
->dump_seq
);
2778 reval_seq
= seq_read(udpif
->reval_seq
);
2779 slice
= revalidator
- udpif
->revalidators
;
2780 ovs_assert(slice
< udpif
->n_revalidators
);
2782 for (int i
= slice
; i
< N_UMAPS
; i
+= udpif
->n_revalidators
) {
2783 uint64_t odp_actions_stub
[1024 / 8];
2784 struct ofpbuf odp_actions
= OFPBUF_STUB_INITIALIZER(odp_actions_stub
);
2786 struct ukey_op ops
[REVALIDATE_MAX_BATCH
];
2787 struct udpif_key
*ukey
;
2788 struct umap
*umap
= &udpif
->ukeys
[i
];
2791 CMAP_FOR_EACH(ukey
, cmap_node
, &umap
->cmap
) {
2792 enum ukey_state ukey_state
;
2794 /* Handler threads could be holding a ukey lock while it installs a
2795 * new flow, so don't hang around waiting for access to it. */
2796 if (ovs_mutex_trylock(&ukey
->mutex
)) {
2799 ukey_state
= ukey
->state
;
2800 if (ukey_state
== UKEY_OPERATIONAL
2801 || (ukey_state
== UKEY_VISIBLE
&& purge
)) {
2802 struct recirc_refs recircs
= RECIRC_REFS_EMPTY_INITIALIZER
;
2803 bool seq_mismatch
= (ukey
->dump_seq
!= dump_seq
2804 && ukey
->reval_seq
!= reval_seq
);
2805 enum reval_result result
;
2808 result
= UKEY_DELETE
;
2809 } else if (!seq_mismatch
) {
2812 struct dpif_flow_stats stats
;
2813 COVERAGE_INC(revalidate_missed_dp_flow
);
2814 memset(&stats
, 0, sizeof stats
);
2815 result
= revalidate_ukey(udpif
, ukey
, &stats
, &odp_actions
,
2816 reval_seq
, &recircs
, false);
2818 if (result
!= UKEY_KEEP
) {
2819 /* Clears 'recircs' if filled by revalidate_ukey(). */
2820 reval_op_init(&ops
[n_ops
++], result
, udpif
, ukey
, &recircs
,
2824 ovs_mutex_unlock(&ukey
->mutex
);
2826 if (ukey_state
== UKEY_EVICTED
) {
2827 /* The common flow deletion case involves deletion of the flow
2828 * during the dump phase and ukey deletion here. */
2829 ovs_mutex_lock(&umap
->mutex
);
2830 ukey_delete(umap
, ukey
);
2831 ovs_mutex_unlock(&umap
->mutex
);
2834 if (n_ops
== REVALIDATE_MAX_BATCH
) {
2835 /* Update/delete missed flows and clean up corresponding ukeys
2837 push_ukey_ops(udpif
, umap
, ops
, n_ops
);
2843 push_ukey_ops(udpif
, umap
, ops
, n_ops
);
2846 ofpbuf_uninit(&odp_actions
);
2852 revalidator_sweep(struct revalidator
*revalidator
)
2854 revalidator_sweep__(revalidator
, false);
2858 revalidator_purge(struct revalidator
*revalidator
)
2860 revalidator_sweep__(revalidator
, true);
2863 /* In reaction to dpif purge, purges all 'ukey's with same 'pmd_id'. */
2865 dp_purge_cb(void *aux
, unsigned pmd_id
)
2866 OVS_NO_THREAD_SAFETY_ANALYSIS
2868 struct udpif
*udpif
= aux
;
2871 udpif_pause_revalidators(udpif
);
2872 for (i
= 0; i
< N_UMAPS
; i
++) {
2873 struct ukey_op ops
[REVALIDATE_MAX_BATCH
];
2874 struct udpif_key
*ukey
;
2875 struct umap
*umap
= &udpif
->ukeys
[i
];
2878 CMAP_FOR_EACH(ukey
, cmap_node
, &umap
->cmap
) {
2879 if (ukey
->pmd_id
== pmd_id
) {
2880 delete_op_init(udpif
, &ops
[n_ops
++], ukey
);
2881 transition_ukey(ukey
, UKEY_EVICTING
);
2883 if (n_ops
== REVALIDATE_MAX_BATCH
) {
2884 push_ukey_ops(udpif
, umap
, ops
, n_ops
);
2891 push_ukey_ops(udpif
, umap
, ops
, n_ops
);
2896 udpif_resume_revalidators(udpif
);
2900 upcall_unixctl_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
2901 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
2903 struct ds ds
= DS_EMPTY_INITIALIZER
;
2904 uint64_t n_offloaded_flows
;
2905 struct udpif
*udpif
;
2907 LIST_FOR_EACH (udpif
, list_node
, &all_udpifs
) {
2908 unsigned int flow_limit
;
2912 atomic_read_relaxed(&udpif
->flow_limit
, &flow_limit
);
2913 ufid_enabled
= udpif_use_ufid(udpif
);
2915 ds_put_format(&ds
, "%s:\n", dpif_name(udpif
->dpif
));
2916 ds_put_format(&ds
, " flows : (current %lu)"
2917 " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif
),
2918 udpif
->avg_n_flows
, udpif
->max_n_flows
, flow_limit
);
2919 if (!dpif_get_n_offloaded_flows(udpif
->dpif
, &n_offloaded_flows
)) {
2920 ds_put_format(&ds
, " offloaded flows : %"PRIu64
"\n",
2923 ds_put_format(&ds
, " dump duration : %lldms\n", udpif
->dump_duration
);
2924 ds_put_format(&ds
, " ufid enabled : ");
2926 ds_put_format(&ds
, "true\n");
2928 ds_put_format(&ds
, "false\n");
2930 ds_put_char(&ds
, '\n');
2932 for (i
= 0; i
< n_revalidators
; i
++) {
2933 struct revalidator
*revalidator
= &udpif
->revalidators
[i
];
2934 int j
, elements
= 0;
2936 for (j
= i
; j
< N_UMAPS
; j
+= n_revalidators
) {
2937 elements
+= cmap_count(&udpif
->ukeys
[j
].cmap
);
2939 ds_put_format(&ds
, " %u: (keys %d)\n", revalidator
->id
, elements
);
2943 unixctl_command_reply(conn
, ds_cstr(&ds
));
2947 /* Disable using the megaflows.
2949 * This command is only needed for advanced debugging, so it's not
2950 * documented in the man page. */
2952 upcall_unixctl_disable_megaflows(struct unixctl_conn
*conn
,
2953 int argc OVS_UNUSED
,
2954 const char *argv
[] OVS_UNUSED
,
2955 void *aux OVS_UNUSED
)
2957 atomic_store_relaxed(&enable_megaflows
, false);
2958 udpif_flush_all_datapaths();
2959 unixctl_command_reply(conn
, "megaflows disabled");
2962 /* Re-enable using megaflows.
2964 * This command is only needed for advanced debugging, so it's not
2965 * documented in the man page. */
2967 upcall_unixctl_enable_megaflows(struct unixctl_conn
*conn
,
2968 int argc OVS_UNUSED
,
2969 const char *argv
[] OVS_UNUSED
,
2970 void *aux OVS_UNUSED
)
2972 atomic_store_relaxed(&enable_megaflows
, true);
2973 udpif_flush_all_datapaths();
2974 unixctl_command_reply(conn
, "megaflows enabled");
2977 /* Disable skipping flow attributes during flow dump.
2979 * This command is only needed for advanced debugging, so it's not
2980 * documented in the man page. */
2982 upcall_unixctl_disable_ufid(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
2983 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
2985 atomic_store_relaxed(&enable_ufid
, false);
2986 unixctl_command_reply(conn
, "Datapath dumping tersely using UFID disabled");
2989 /* Re-enable skipping flow attributes during flow dump.
2991 * This command is only needed for advanced debugging, so it's not documented
2992 * in the man page. */
2994 upcall_unixctl_enable_ufid(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
2995 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
2997 atomic_store_relaxed(&enable_ufid
, true);
2998 unixctl_command_reply(conn
, "Datapath dumping tersely using UFID enabled "
2999 "for supported datapaths");
3002 /* Set the flow limit.
3004 * This command is only needed for advanced debugging, so it's not
3005 * documented in the man page. */
3007 upcall_unixctl_set_flow_limit(struct unixctl_conn
*conn
,
3008 int argc OVS_UNUSED
,
3010 void *aux OVS_UNUSED
)
3012 struct ds ds
= DS_EMPTY_INITIALIZER
;
3013 struct udpif
*udpif
;
3014 unsigned int flow_limit
= atoi(argv
[1]);
3016 LIST_FOR_EACH (udpif
, list_node
, &all_udpifs
) {
3017 atomic_store_relaxed(&udpif
->flow_limit
, flow_limit
);
3019 ds_put_format(&ds
, "set flow_limit to %u\n", flow_limit
);
3020 unixctl_command_reply(conn
, ds_cstr(&ds
));
3025 upcall_unixctl_dump_wait(struct unixctl_conn
*conn
,
3026 int argc OVS_UNUSED
,
3027 const char *argv
[] OVS_UNUSED
,
3028 void *aux OVS_UNUSED
)
3030 if (ovs_list_is_singleton(&all_udpifs
)) {
3031 struct udpif
*udpif
= NULL
;
3034 udpif
= OBJECT_CONTAINING(ovs_list_front(&all_udpifs
), udpif
, list_node
);
3035 len
= (udpif
->n_conns
+ 1) * sizeof *udpif
->conns
;
3036 udpif
->conn_seq
= seq_read(udpif
->dump_seq
);
3037 udpif
->conns
= xrealloc(udpif
->conns
, len
);
3038 udpif
->conns
[udpif
->n_conns
++] = conn
;
3040 unixctl_command_reply_error(conn
, "can't wait on multiple udpifs.");
3045 upcall_unixctl_purge(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3046 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
3048 struct udpif
*udpif
;
3050 LIST_FOR_EACH (udpif
, list_node
, &all_udpifs
) {
3053 for (n
= 0; n
< udpif
->n_revalidators
; n
++) {
3054 revalidator_purge(&udpif
->revalidators
[n
]);
3057 unixctl_command_reply(conn
, "");
3060 /* Flows are sorted in the following order:
3061 * netdev, flow state (offloaded/kernel path), flow_pps_rate.
3064 flow_compare_rebalance(const void *elem1
, const void *elem2
)
3066 const struct udpif_key
*f1
= *(struct udpif_key
**)elem1
;
3067 const struct udpif_key
*f2
= *(struct udpif_key
**)elem2
;
3070 if (f1
->in_netdev
< f2
->in_netdev
) {
3072 } else if (f1
->in_netdev
> f2
->in_netdev
) {
3076 if (f1
->offloaded
!= f2
->offloaded
) {
3077 return f2
->offloaded
- f1
->offloaded
;
3080 diff
= (f1
->offloaded
== true) ?
3081 f1
->flow_pps_rate
- f2
->flow_pps_rate
:
3082 f2
->flow_pps_rate
- f1
->flow_pps_rate
;
3084 return (diff
< 0) ? -1 : 1;
3087 /* Insert flows from pending array during rebalancing */
3089 rebalance_insert_pending(struct udpif
*udpif
, struct udpif_key
**pending_flows
,
3090 int pending_count
, int insert_count
,
3091 uint64_t rate_threshold
)
3095 for (int i
= 0; i
< pending_count
; i
++) {
3096 struct udpif_key
*flow
= pending_flows
[i
];
3099 /* Stop offloading pending flows if the insert count is
3100 * reached and the flow rate is less than the threshold
3102 if (count
>= insert_count
&& flow
->flow_pps_rate
< rate_threshold
) {
3106 /* Offload the flow to netdev */
3107 err
= udpif_flow_program(udpif
, flow
, DPIF_OFFLOAD_ALWAYS
);
3109 if (err
== ENOSPC
) {
3110 /* Stop if we are out of resources */
3118 /* Offload succeeded; delete it from the kernel datapath */
3119 udpif_flow_unprogram(udpif
, flow
, DPIF_OFFLOAD_NEVER
);
3121 /* Change the state of the flow, adjust dpif counters */
3122 flow
->offloaded
= true;
3124 udpif_set_ukey_backlog_packets(flow
);
3131 /* Remove flows from offloaded array during rebalancing */
3133 rebalance_remove_offloaded(struct udpif
*udpif
,
3134 struct udpif_key
**offloaded_flows
,
3137 for (int i
= 0; i
< offload_count
; i
++) {
3138 struct udpif_key
*flow
= offloaded_flows
[i
];
3141 /* Install the flow into kernel path first */
3142 err
= udpif_flow_program(udpif
, flow
, DPIF_OFFLOAD_NEVER
);
3147 /* Success; now remove offloaded flow from netdev */
3148 err
= udpif_flow_unprogram(udpif
, flow
, DPIF_OFFLOAD_ALWAYS
);
3150 udpif_flow_unprogram(udpif
, flow
, DPIF_OFFLOAD_NEVER
);
3153 udpif_set_ukey_backlog_packets(flow
);
3154 flow
->offloaded
= false;
3159 * Rebalance offloaded flows on a netdev that's in OOR state.
3161 * The rebalancing is done in two phases. In the first phase, we check if
3162 * the pending flows can be offloaded (if some resources became available
3163 * in the meantime) by trying to offload each pending flow. If all pending
3164 * flows get successfully offloaded, the OOR state is cleared on the netdev
3165 * and there's nothing to rebalance.
3167 * If some of the pending flows could not be offloaded, i.e, we still see
3168 * the OOR error, then we move to the second phase of rebalancing. In this
3169 * phase, the rebalancer compares pps-rate of an offloaded flow with the
3170 * least pps-rate with that of a pending flow with the highest pps-rate from
3171 * their respective sorted arrays. If pps-rate of the offloaded flow is less
3172 * than the pps-rate of the pending flow, then it deletes the offloaded flow
3173 * from the HW/netdev and adds it to kernel datapath and then offloads pending
3174 * to HW/netdev. This process is repeated for every pair of offloaded and
3175 * pending flows in the ordered list. The process stops when we encounter an
3176 * offloaded flow that has a higher pps-rate than the corresponding pending
3177 * flow. The entire rebalancing process is repeated in the next iteration.
3180 rebalance_device(struct udpif
*udpif
, struct udpif_key
**offloaded_flows
,
3181 int offload_count
, struct udpif_key
**pending_flows
,
3186 int num_inserted
= rebalance_insert_pending(udpif
, pending_flows
,
3187 pending_count
, pending_count
,
3190 VLOG_DBG("Offload rebalance: Phase1: inserted %d pending flows",
3194 /* Adjust pending array */
3195 pending_flows
= &pending_flows
[num_inserted
];
3196 pending_count
-= num_inserted
;
3198 if (!pending_count
) {
3200 * Successfully offloaded all pending flows. The device
3201 * is no longer in OOR state; done rebalancing this device.
3207 * Phase 2; determine how many offloaded flows to churn.
3209 #define OFFL_REBAL_MAX_CHURN 1024
3210 int churn_count
= 0;
3211 while (churn_count
< OFFL_REBAL_MAX_CHURN
&& churn_count
< offload_count
3212 && churn_count
< pending_count
) {
3213 if (pending_flows
[churn_count
]->flow_pps_rate
<=
3214 offloaded_flows
[churn_count
]->flow_pps_rate
)
3220 VLOG_DBG("Offload rebalance: Phase2: removing %d offloaded flows",
3224 /* Bail early if nothing to churn */
3229 /* Remove offloaded flows */
3230 rebalance_remove_offloaded(udpif
, offloaded_flows
, churn_count
);
3232 /* Adjust offloaded array */
3233 offloaded_flows
= &offloaded_flows
[churn_count
];
3234 offload_count
-= churn_count
;
3236 /* Replace offloaded flows with pending flows */
3237 num_inserted
= rebalance_insert_pending(udpif
, pending_flows
,
3238 pending_count
, churn_count
,
3240 offloaded_flows
[0]->flow_pps_rate
:
3243 VLOG_DBG("Offload rebalance: Phase2: inserted %d pending flows",
3250 static struct udpif_key
**
3251 udpif_add_oor_flows(struct udpif_key
**sort_flows
, size_t *total_flow_count
,
3252 size_t *alloc_flow_count
, struct udpif_key
*ukey
)
3254 if (*total_flow_count
>= *alloc_flow_count
) {
3255 sort_flows
= x2nrealloc(sort_flows
, alloc_flow_count
, sizeof ukey
);
3257 sort_flows
[(*total_flow_count
)++] = ukey
;
3262 * Build sort_flows[] initially with flows that
3263 * reference an 'OOR' netdev as their input port.
3265 static struct udpif_key
**
3266 udpif_build_oor_flows(struct udpif_key
**sort_flows
, size_t *total_flow_count
,
3267 size_t *alloc_flow_count
, struct udpif_key
*ukey
,
3268 int *oor_netdev_count
)
3270 struct netdev
*netdev
;
3273 /* Input netdev must be available for the flow */
3274 netdev
= ukey
->in_netdev
;
3279 /* Is the in-netdev for this flow in OOR state ? */
3280 if (!netdev_get_hw_info(netdev
, HW_INFO_TYPE_OOR
)) {
3281 ukey_netdev_unref(ukey
);
3285 /* Add the flow to sort_flows[] */
3286 sort_flows
= udpif_add_oor_flows(sort_flows
, total_flow_count
,
3287 alloc_flow_count
, ukey
);
3288 if (ukey
->offloaded
) {
3289 count
= netdev_get_hw_info(netdev
, HW_INFO_TYPE_OFFL_COUNT
);
3290 ovs_assert(count
>= 0);
3292 (*oor_netdev_count
)++;
3294 netdev_set_hw_info(netdev
, HW_INFO_TYPE_OFFL_COUNT
, count
);
3296 count
= netdev_get_hw_info(netdev
, HW_INFO_TYPE_PEND_COUNT
);
3297 ovs_assert(count
>= 0);
3298 netdev_set_hw_info(netdev
, HW_INFO_TYPE_PEND_COUNT
, ++count
);
3305 * Rebalance offloaded flows on HW netdevs that are in OOR state.
3308 udpif_flow_rebalance(struct udpif
*udpif
)
3310 struct udpif_key
**sort_flows
= NULL
;
3311 size_t alloc_flow_count
= 0;
3312 size_t total_flow_count
= 0;
3313 int oor_netdev_count
= 0;
3314 int offload_index
= 0;
3317 /* Collect flows (offloaded and pending) that reference OOR netdevs */
3318 for (size_t i
= 0; i
< N_UMAPS
; i
++) {
3319 struct udpif_key
*ukey
;
3320 struct umap
*umap
= &udpif
->ukeys
[i
];
3322 CMAP_FOR_EACH (ukey
, cmap_node
, &umap
->cmap
) {
3323 ukey_to_flow_netdev(udpif
, ukey
);
3324 sort_flows
= udpif_build_oor_flows(sort_flows
, &total_flow_count
,
3325 &alloc_flow_count
, ukey
,
3330 /* Sort flows by OOR netdevs, state (offloaded/pending) and pps-rate */
3331 qsort(sort_flows
, total_flow_count
, sizeof(struct udpif_key
*),
3332 flow_compare_rebalance
);
3335 * We now have flows referencing OOR netdevs, that are sorted. We also
3336 * have a count of offloaded and pending flows on each of the netdevs
3337 * that are in OOR state. Now rebalance each oor-netdev.
3339 while (oor_netdev_count
) {
3340 struct netdev
*netdev
;
3345 netdev
= sort_flows
[offload_index
]->in_netdev
;
3346 ovs_assert(netdev_get_hw_info(netdev
, HW_INFO_TYPE_OOR
) == true);
3347 VLOG_DBG("Offload rebalance: netdev: %s is OOR", netdev
->name
);
3349 offload_count
= netdev_get_hw_info(netdev
, HW_INFO_TYPE_OFFL_COUNT
);
3350 pending_count
= netdev_get_hw_info(netdev
, HW_INFO_TYPE_PEND_COUNT
);
3351 pending_index
= offload_index
+ offload_count
;
3353 oor
= rebalance_device(udpif
,
3354 &sort_flows
[offload_index
], offload_count
,
3355 &sort_flows
[pending_index
], pending_count
);
3356 netdev_set_hw_info(netdev
, HW_INFO_TYPE_OOR
, oor
);
3358 offload_index
= pending_index
+ pending_count
;
3359 netdev_set_hw_info(netdev
, HW_INFO_TYPE_OFFL_COUNT
, 0);
3360 netdev_set_hw_info(netdev
, HW_INFO_TYPE_PEND_COUNT
, 0);
3364 for (int i
= 0; i
< total_flow_count
; i
++) {
3365 struct udpif_key
*ukey
= sort_flows
[i
];
3366 ukey_netdev_unref(ukey
);
3372 udpif_flow_program(struct udpif
*udpif
, struct udpif_key
*ukey
,
3373 enum dpif_offload_type offload_type
)
3375 struct dpif_op
*opsp
;
3379 put_op_init(&uop
, ukey
, DPIF_FP_CREATE
);
3380 dpif_operate(udpif
->dpif
, &opsp
, 1, offload_type
);
3386 udpif_flow_unprogram(struct udpif
*udpif
, struct udpif_key
*ukey
,
3387 enum dpif_offload_type offload_type
)
3389 struct dpif_op
*opsp
;
3393 delete_op_init(udpif
, &uop
, ukey
);
3394 dpif_operate(udpif
->dpif
, &opsp
, 1, offload_type
);