]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.h
tests: windows ovsdb online compact
[mirror_ovs.git] / lib / dpif.h
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * dpif, the DataPath InterFace.
19 *
20 * In Open vSwitch terminology, a "datapath" is a flow-based software switch.
21 * A datapath has no intelligence of its own. Rather, it relies entirely on
22 * its client to set up flows. The datapath layer is core to the Open vSwitch
23 * software switch: one could say, without much exaggeration, that everything
24 * in ovs-vswitchd above dpif exists only to make the correct decisions
25 * interacting with dpif.
26 *
27 * Typically, the client of a datapath is the software switch module in
28 * "ovs-vswitchd", but other clients can be written. The "ovs-dpctl" utility
29 * is also a (simple) client.
30 *
31 *
32 * Overview
33 * ========
34 *
35 * The terms written in quotes below are defined in later sections.
36 *
37 * When a datapath "port" receives a packet, it extracts the headers (the
38 * "flow"). If the datapath's "flow table" contains a "flow entry" matching
39 * the packet, then it executes the "actions" in the flow entry and increments
40 * the flow's statistics. If there is no matching flow entry, the datapath
41 * instead appends the packet to an "upcall" queue.
42 *
43 *
44 * Ports
45 * =====
46 *
47 * A datapath has a set of ports that are analogous to the ports on an Ethernet
48 * switch. At the datapath level, each port has the following information
49 * associated with it:
50 *
51 * - A name, a short string that must be unique within the host. This is
52 * typically a name that would be familiar to the system administrator,
53 * e.g. "eth0" or "vif1.1", but it is otherwise arbitrary.
54 *
55 * - A 32-bit port number that must be unique within the datapath but is
56 * otherwise arbitrary. The port number is the most important identifier
57 * for a port in the datapath interface.
58 *
59 * - A type, a short string that identifies the kind of port. On a Linux
60 * host, typical types are "system" (for a network device such as eth0),
61 * "internal" (for a simulated port used to connect to the TCP/IP stack),
62 * and "gre" (for a GRE tunnel).
63 *
64 * - A Netlink PID for each upcall reading thread (see "Upcall Queuing and
65 * Ordering" below).
66 *
67 * The dpif interface has functions for adding and deleting ports. When a
68 * datapath implements these (e.g. as the Linux and netdev datapaths do), then
69 * Open vSwitch's ovs-vswitchd daemon can directly control what ports are used
70 * for switching. Some datapaths might not implement them, or implement them
71 * with restrictions on the types of ports that can be added or removed
72 * (e.g. on ESX), on systems where port membership can only be changed by some
73 * external entity.
74 *
75 * Each datapath must have a port, sometimes called the "local port", whose
76 * name is the same as the datapath itself, with port number 0. The local port
77 * cannot be deleted.
78 *
79 * Ports are available as "struct netdev"s. To obtain a "struct netdev *" for
80 * a port named 'name' with type 'port_type', in a datapath of type
81 * 'datapath_type', call netdev_open(name, dpif_port_open_type(datapath_type,
82 * port_type). The netdev can be used to get and set important data related to
83 * the port, such as:
84 *
85 * - MTU (netdev_get_mtu(), netdev_set_mtu()).
86 *
87 * - Ethernet address (netdev_get_etheraddr(), netdev_set_etheraddr()).
88 *
89 * - Statistics such as the number of packets and bytes transmitted and
90 * received (netdev_get_stats()).
91 *
92 * - Carrier status (netdev_get_carrier()).
93 *
94 * - Speed (netdev_get_features()).
95 *
96 * - QoS queue configuration (netdev_get_queue(), netdev_set_queue() and
97 * related functions.)
98 *
99 * - Arbitrary port-specific configuration parameters (netdev_get_config(),
100 * netdev_set_config()). An example of such a parameter is the IP
101 * endpoint for a GRE tunnel.
102 *
103 *
104 * Flow Table
105 * ==========
106 *
107 * The flow table is a collection of "flow entries". Each flow entry contains:
108 *
109 * - A "flow", that is, a summary of the headers in an Ethernet packet. The
110 * flow must be unique within the flow table. Flows are fine-grained
111 * entities that include L2, L3, and L4 headers. A single TCP connection
112 * consists of two flows, one in each direction.
113 *
114 * In Open vSwitch userspace, "struct flow" is the typical way to describe
115 * a flow, but the datapath interface uses a different data format to
116 * allow ABI forward- and backward-compatibility. datapath/README.md
117 * describes the rationale and design. Refer to OVS_KEY_ATTR_* and
118 * "struct ovs_key_*" in include/odp-netlink.h for details.
119 * lib/odp-util.h defines several functions for working with these flows.
120 *
121 * - A "mask" that, for each bit in the flow, specifies whether the datapath
122 * should consider the corresponding flow bit when deciding whether a
123 * given packet matches the flow entry. The original datapath design did
124 * not support matching: every flow entry was exact match. With the
125 * addition of a mask, the interface supports datapaths with a spectrum of
126 * wildcard matching capabilities, from those that only support exact
127 * matches to those that support bitwise wildcarding on the entire flow
128 * key, as well as datapaths with capabilities somewhere in between.
129 *
130 * Datapaths do not provide a way to query their wildcarding capabilities,
131 * nor is it expected that the client should attempt to probe for the
132 * details of their support. Instead, a client installs flows with masks
133 * that wildcard as many bits as acceptable. The datapath then actually
134 * wildcards as many of those bits as it can and changes the wildcard bits
135 * that it does not support into exact match bits. A datapath that can
136 * wildcard any bit, for example, would install the supplied mask, an
137 * exact-match only datapath would install an exact-match mask regardless
138 * of what mask the client supplied, and a datapath in the middle of the
139 * spectrum would selectively change some wildcard bits into exact match
140 * bits.
141 *
142 * Regardless of the requested or installed mask, the datapath retains the
143 * original flow supplied by the client. (It does not, for example, "zero
144 * out" the wildcarded bits.) This allows the client to unambiguously
145 * identify the flow entry in later flow table operations.
146 *
147 * The flow table does not have priorities; that is, all flow entries have
148 * equal priority. Detecting overlapping flow entries is expensive in
149 * general, so the datapath is not required to do it. It is primarily the
150 * client's responsibility not to install flow entries whose flow and mask
151 * combinations overlap.
152 *
153 * - A list of "actions" that tell the datapath what to do with packets
154 * within a flow. Some examples of actions are OVS_ACTION_ATTR_OUTPUT,
155 * which transmits the packet out a port, and OVS_ACTION_ATTR_SET, which
156 * modifies packet headers. Refer to OVS_ACTION_ATTR_* and "struct
157 * ovs_action_*" in include/odp-netlink.h for details. lib/odp-util.h
158 * defines several functions for working with datapath actions.
159 *
160 * The actions list may be empty. This indicates that nothing should be
161 * done to matching packets, that is, they should be dropped.
162 *
163 * (In case you are familiar with OpenFlow, datapath actions are analogous
164 * to OpenFlow actions.)
165 *
166 * - Statistics: the number of packets and bytes that the flow has
167 * processed, the last time that the flow processed a packet, and the
168 * union of all the TCP flags in packets processed by the flow. (The
169 * latter is 0 if the flow is not a TCP flow.)
170 *
171 * The datapath's client manages the flow table, primarily in reaction to
172 * "upcalls" (see below).
173 *
174 *
175 * Upcalls
176 * =======
177 *
178 * A datapath sometimes needs to notify its client that a packet was received.
179 * The datapath mechanism to do this is called an "upcall".
180 *
181 * Upcalls are used in two situations:
182 *
183 * - When a packet is received, but there is no matching flow entry in its
184 * flow table (a flow table "miss"), this causes an upcall of type
185 * DPIF_UC_MISS. These are called "miss" upcalls.
186 *
187 * - A datapath action of type OVS_ACTION_ATTR_USERSPACE causes an upcall of
188 * type DPIF_UC_ACTION. These are called "action" upcalls.
189 *
190 * An upcall contains an entire packet. There is no attempt to, e.g., copy
191 * only as much of the packet as normally needed to make a forwarding decision.
192 * Such an optimization is doable, but experimental prototypes showed it to be
193 * of little benefit because an upcall typically contains the first packet of a
194 * flow, which is usually short (e.g. a TCP SYN). Also, the entire packet can
195 * sometimes really be needed.
196 *
197 * After a client reads a given upcall, the datapath is finished with it, that
198 * is, the datapath doesn't maintain any lingering state past that point.
199 *
200 * The latency from the time that a packet arrives at a port to the time that
201 * it is received from dpif_recv() is critical in some benchmarks. For
202 * example, if this latency is 1 ms, then a netperf TCP_CRR test, which opens
203 * and closes TCP connections one at a time as quickly as it can, cannot
204 * possibly achieve more than 500 transactions per second, since every
205 * connection consists of two flows with 1-ms latency to set up each one.
206 *
207 * To receive upcalls, a client has to enable them with dpif_recv_set(). A
208 * datapath should generally support being opened multiple times (e.g. so that
209 * one may run "ovs-dpctl show" or "ovs-dpctl dump-flows" while "ovs-vswitchd"
210 * is also running) but need not support more than one of these clients
211 * enabling upcalls at once.
212 *
213 *
214 * Upcall Queuing and Ordering
215 * ---------------------------
216 *
217 * The datapath's client reads upcalls one at a time by calling dpif_recv().
218 * When more than one upcall is pending, the order in which the datapath
219 * presents upcalls to its client is important. The datapath's client does not
220 * directly control this order, so the datapath implementer must take care
221 * during design.
222 *
223 * The minimal behavior, suitable for initial testing of a datapath
224 * implementation, is that all upcalls are appended to a single queue, which is
225 * delivered to the client in order.
226 *
227 * The datapath should ensure that a high rate of upcalls from one particular
228 * port cannot cause upcalls from other sources to be dropped or unreasonably
229 * delayed. Otherwise, one port conducting a port scan or otherwise initiating
230 * high-rate traffic spanning many flows could suppress other traffic.
231 * Ideally, the datapath should present upcalls from each port in a "round
232 * robin" manner, to ensure fairness.
233 *
234 * The client has no control over "miss" upcalls and no insight into the
235 * datapath's implementation, so the datapath is entirely responsible for
236 * queuing and delivering them. On the other hand, the datapath has
237 * considerable freedom of implementation. One good approach is to maintain a
238 * separate queue for each port, to prevent any given port's upcalls from
239 * interfering with other ports' upcalls. If this is impractical, then another
240 * reasonable choice is to maintain some fixed number of queues and assign each
241 * port to one of them. Ports assigned to the same queue can then interfere
242 * with each other, but not with ports assigned to different queues. Other
243 * approaches are also possible.
244 *
245 * The client has some control over "action" upcalls: it can specify a 32-bit
246 * "Netlink PID" as part of the action. This terminology comes from the Linux
247 * datapath implementation, which uses a protocol called Netlink in which a PID
248 * designates a particular socket and the upcall data is delivered to the
249 * socket's receive queue. Generically, though, a Netlink PID identifies a
250 * queue for upcalls. The basic requirements on the datapath are:
251 *
252 * - The datapath must provide a Netlink PID associated with each port. The
253 * client can retrieve the PID with dpif_port_get_pid().
254 *
255 * - The datapath must provide a "special" Netlink PID not associated with
256 * any port. dpif_port_get_pid() also provides this PID. (ovs-vswitchd
257 * uses this PID to queue special packets that must not be lost even if a
258 * port is otherwise busy, such as packets used for tunnel monitoring.)
259 *
260 * The minimal behavior of dpif_port_get_pid() and the treatment of the Netlink
261 * PID in "action" upcalls is that dpif_port_get_pid() returns a constant value
262 * and all upcalls are appended to a single queue.
263 *
264 * The preferred behavior is:
265 *
266 * - Each port has a PID that identifies the queue used for "miss" upcalls
267 * on that port. (Thus, if each port has its own queue for "miss"
268 * upcalls, then each port has a different Netlink PID.)
269 *
270 * - "miss" upcalls for a given port and "action" upcalls that specify that
271 * port's Netlink PID add their upcalls to the same queue. The upcalls
272 * are delivered to the datapath's client in the order that the packets
273 * were received, regardless of whether the upcalls are "miss" or "action"
274 * upcalls.
275 *
276 * - Upcalls that specify the "special" Netlink PID are queued separately.
277 *
278 * Multiple threads may want to read upcalls simultaneously from a single
279 * datapath. To support multiple threads well, one extends the above preferred
280 * behavior:
281 *
282 * - Each port has multiple PIDs. The datapath distributes "miss" upcalls
283 * across the PIDs, ensuring that a given flow is mapped in a stable way
284 * to a single PID.
285 *
286 * - For "action" upcalls, the thread can specify its own Netlink PID or
287 * other threads' Netlink PID of the same port for offloading purpose
288 * (e.g. in a "round robin" manner).
289 *
290 *
291 * Packet Format
292 * =============
293 *
294 * The datapath interface works with packets in a particular form. This is the
295 * form taken by packets received via upcalls (i.e. by dpif_recv()). Packets
296 * supplied to the datapath for processing (i.e. to dpif_execute()) also take
297 * this form.
298 *
299 * A VLAN tag is represented by an 802.1Q header. If the layer below the
300 * datapath interface uses another representation, then the datapath interface
301 * must perform conversion.
302 *
303 * The datapath interface requires all packets to fit within the MTU. Some
304 * operating systems internally process packets larger than MTU, with features
305 * such as TSO and UFO. When such a packet passes through the datapath
306 * interface, it must be broken into multiple MTU or smaller sized packets for
307 * presentation as upcalls. (This does not happen often, because an upcall
308 * typically contains the first packet of a flow, which is usually short.)
309 *
310 * Some operating system TCP/IP stacks maintain packets in an unchecksummed or
311 * partially checksummed state until transmission. The datapath interface
312 * requires all host-generated packets to be fully checksummed (e.g. IP and TCP
313 * checksums must be correct). On such an OS, the datapath interface must fill
314 * in these checksums.
315 *
316 * Packets passed through the datapath interface must be at least 14 bytes
317 * long, that is, they must have a complete Ethernet header. They are not
318 * required to be padded to the minimum Ethernet length.
319 *
320 *
321 * Typical Usage
322 * =============
323 *
324 * Typically, the client of a datapath begins by configuring the datapath with
325 * a set of ports. Afterward, the client runs in a loop polling for upcalls to
326 * arrive.
327 *
328 * For each upcall received, the client examines the enclosed packet and
329 * figures out what should be done with it. For example, if the client
330 * implements a MAC-learning switch, then it searches the forwarding database
331 * for the packet's destination MAC and VLAN and determines the set of ports to
332 * which it should be sent. In any case, the client composes a set of datapath
333 * actions to properly dispatch the packet and then directs the datapath to
334 * execute those actions on the packet (e.g. with dpif_execute()).
335 *
336 * Most of the time, the actions that the client executed on the packet apply
337 * to every packet with the same flow. For example, the flow includes both
338 * destination MAC and VLAN ID (and much more), so this is true for the
339 * MAC-learning switch example above. In such a case, the client can also
340 * direct the datapath to treat any further packets in the flow in the same
341 * way, using dpif_flow_put() to add a new flow entry.
342 *
343 * Other tasks the client might need to perform, in addition to reacting to
344 * upcalls, include:
345 *
346 * - Periodically polling flow statistics, perhaps to supply to its own
347 * clients.
348 *
349 * - Deleting flow entries from the datapath that haven't been used
350 * recently, to save memory.
351 *
352 * - Updating flow entries whose actions should change. For example, if a
353 * MAC learning switch learns that a MAC has moved, then it must update
354 * the actions of flow entries that sent packets to the MAC at its old
355 * location.
356 *
357 * - Adding and removing ports to achieve a new configuration.
358 *
359 *
360 * Thread-safety
361 * =============
362 *
363 * Most of the dpif functions are fully thread-safe: they may be called from
364 * any number of threads on the same or different dpif objects. The exceptions
365 * are:
366 *
367 * - dpif_port_poll() and dpif_port_poll_wait() are conditionally
368 * thread-safe: they may be called from different threads only on
369 * different dpif objects.
370 *
371 * - dpif_flow_dump_next() is conditionally thread-safe: It may be called
372 * from different threads with the same 'struct dpif_flow_dump', but all
373 * other parameters must be different for each thread.
374 *
375 * - dpif_flow_dump_done() is conditionally thread-safe: All threads that
376 * share the same 'struct dpif_flow_dump' must have finished using it.
377 * This function must then be called exactly once for a particular
378 * dpif_flow_dump to finish the corresponding flow dump operation.
379 *
380 * - Functions that operate on 'struct dpif_port_dump' are conditionally
381 * thread-safe with respect to those objects. That is, one may dump ports
382 * from any number of threads at once, but each thread must use its own
383 * struct dpif_port_dump.
384 */
385 #ifndef DPIF_H
386 #define DPIF_H 1
387
388 #include <stdbool.h>
389 #include <stddef.h>
390 #include <stdint.h>
391
392 #include "dpdk.h"
393 #include "netdev.h"
394 #include "dp-packet.h"
395 #include "openflow/openflow.h"
396 #include "ovs-numa.h"
397 #include "packets.h"
398 #include "util.h"
399
400 #ifdef __cplusplus
401 extern "C" {
402 #endif
403
404 struct dpif;
405 struct dpif_class;
406 struct dpif_flow;
407 struct ds;
408 struct flow;
409 struct flow_wildcards;
410 struct nlattr;
411 struct sset;
412
413 int dp_register_provider(const struct dpif_class *);
414 int dp_unregister_provider(const char *type);
415 void dp_blacklist_provider(const char *type);
416 void dp_enumerate_types(struct sset *types);
417 const char *dpif_normalize_type(const char *);
418
419 int dp_enumerate_names(const char *type, struct sset *names);
420 void dp_parse_name(const char *datapath_name, char **name, char **type);
421
422 int dpif_open(const char *name, const char *type, struct dpif **);
423 int dpif_create(const char *name, const char *type, struct dpif **);
424 int dpif_create_and_open(const char *name, const char *type, struct dpif **);
425 void dpif_close(struct dpif *);
426
427 bool dpif_run(struct dpif *);
428 void dpif_wait(struct dpif *);
429
430 const char *dpif_name(const struct dpif *);
431 const char *dpif_base_name(const struct dpif *);
432 const char *dpif_type(const struct dpif *);
433
434 int dpif_delete(struct dpif *);
435
436 /* Statistics for a dpif as a whole. */
437 struct dpif_dp_stats {
438 uint64_t n_hit; /* Number of flow table matches. */
439 uint64_t n_missed; /* Number of flow table misses. */
440 uint64_t n_lost; /* Number of misses not sent to userspace. */
441 uint64_t n_flows; /* Number of flows present. */
442 uint64_t n_mask_hit; /* Number of mega flow masks visited for
443 flow table matches. */
444 uint32_t n_masks; /* Number of mega flow masks. */
445 };
446 int dpif_get_dp_stats(const struct dpif *, struct dpif_dp_stats *);
447
448 \f
449 /* Port operations. */
450
451 const char *dpif_port_open_type(const char *datapath_type,
452 const char *port_type);
453 int dpif_port_add(struct dpif *, struct netdev *, odp_port_t *port_nop);
454 int dpif_port_del(struct dpif *, odp_port_t port_no);
455
456 /* A port within a datapath.
457 *
458 * 'name' and 'type' are suitable for passing to netdev_open(). */
459 struct dpif_port {
460 char *name; /* Network device name, e.g. "eth0". */
461 char *type; /* Network device type, e.g. "system". */
462 odp_port_t port_no; /* Port number within datapath. */
463 };
464 void dpif_port_clone(struct dpif_port *, const struct dpif_port *);
465 void dpif_port_destroy(struct dpif_port *);
466 bool dpif_port_exists(const struct dpif *dpif, const char *devname);
467 int dpif_port_query_by_number(const struct dpif *, odp_port_t port_no,
468 struct dpif_port *);
469 int dpif_port_query_by_name(const struct dpif *, const char *devname,
470 struct dpif_port *);
471 int dpif_port_get_name(struct dpif *, odp_port_t port_no,
472 char *name, size_t name_size);
473 uint32_t dpif_port_get_pid(const struct dpif *, odp_port_t port_no,
474 uint32_t hash);
475
476 struct dpif_port_dump {
477 const struct dpif *dpif;
478 int error;
479 void *state;
480 };
481 void dpif_port_dump_start(struct dpif_port_dump *, const struct dpif *);
482 bool dpif_port_dump_next(struct dpif_port_dump *, struct dpif_port *);
483 int dpif_port_dump_done(struct dpif_port_dump *);
484
485 /* Iterates through each DPIF_PORT in DPIF, using DUMP as state.
486 *
487 * Arguments all have pointer type.
488 *
489 * If you break out of the loop, then you need to free the dump structure by
490 * hand using dpif_port_dump_done(). */
491 #define DPIF_PORT_FOR_EACH(DPIF_PORT, DUMP, DPIF) \
492 for (dpif_port_dump_start(DUMP, DPIF); \
493 (dpif_port_dump_next(DUMP, DPIF_PORT) \
494 ? true \
495 : (dpif_port_dump_done(DUMP), false)); \
496 )
497
498 int dpif_port_poll(const struct dpif *, char **devnamep);
499 void dpif_port_poll_wait(const struct dpif *);
500 \f
501 /* Flow table operations. */
502
503 struct dpif_flow_stats {
504 uint64_t n_packets;
505 uint64_t n_bytes;
506 long long int used;
507 uint16_t tcp_flags;
508 };
509
510 void dpif_flow_stats_extract(const struct flow *, const struct dp_packet *packet,
511 long long int used, struct dpif_flow_stats *);
512 void dpif_flow_stats_format(const struct dpif_flow_stats *, struct ds *);
513
514 enum dpif_flow_put_flags {
515 DPIF_FP_CREATE = 1 << 0, /* Allow creating a new flow. */
516 DPIF_FP_MODIFY = 1 << 1, /* Allow modifying an existing flow. */
517 DPIF_FP_ZERO_STATS = 1 << 2, /* Zero the stats of an existing flow. */
518 DPIF_FP_PROBE = 1 << 3 /* Suppress error messages, if any. */
519 };
520
521 bool dpif_probe_feature(struct dpif *, const char *name,
522 const struct ofpbuf *key, const ovs_u128 *ufid);
523 void dpif_flow_hash(const struct dpif *, const void *key, size_t key_len,
524 ovs_u128 *hash);
525 int dpif_flow_flush(struct dpif *);
526 int dpif_flow_put(struct dpif *, enum dpif_flow_put_flags,
527 const struct nlattr *key, size_t key_len,
528 const struct nlattr *mask, size_t mask_len,
529 const struct nlattr *actions, size_t actions_len,
530 const ovs_u128 *ufid, const unsigned pmd_id,
531 struct dpif_flow_stats *);
532 int dpif_flow_del(struct dpif *,
533 const struct nlattr *key, size_t key_len,
534 const ovs_u128 *ufid, const unsigned pmd_id,
535 struct dpif_flow_stats *);
536 int dpif_flow_get(struct dpif *,
537 const struct nlattr *key, size_t key_len,
538 const ovs_u128 *ufid, const unsigned pmd_id,
539 struct ofpbuf *, struct dpif_flow *);
540 \f
541 /* Flow dumping interface
542 * ======================
543 *
544 * This interface allows iteration through all of the flows currently installed
545 * in a datapath. It is somewhat complicated by two requirements:
546 *
547 * - Efficient support for dumping flows in parallel from multiple threads.
548 *
549 * - Allow callers to avoid making unnecessary copies of data returned by
550 * the interface across several flows in cases where the dpif
551 * implementation has to maintain a copy of that information anyhow.
552 * (That is, allow the client visibility into any underlying batching as
553 * part of its own batching.)
554 *
555 *
556 * Usage
557 * -----
558 *
559 * 1. Call dpif_flow_dump_create().
560 * 2. In each thread that participates in the dump (which may be just a single
561 * thread if parallelism isn't important):
562 * (a) Call dpif_flow_dump_thread_create().
563 * (b) Call dpif_flow_dump_next() repeatedly until it returns 0.
564 * (c) Call dpif_flow_dump_thread_destroy().
565 * 3. Call dpif_flow_dump_destroy().
566 *
567 * All error reporting is deferred to the call to dpif_flow_dump_destroy().
568 */
569 struct dpif_flow_dump *dpif_flow_dump_create(const struct dpif *, bool terse);
570 int dpif_flow_dump_destroy(struct dpif_flow_dump *);
571
572 struct dpif_flow_dump_thread *dpif_flow_dump_thread_create(
573 struct dpif_flow_dump *);
574 void dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *);
575
576 #define PMD_ID_NULL OVS_CORE_UNSPEC
577
578 /* A datapath flow as dumped by dpif_flow_dump_next(). */
579 struct dpif_flow {
580 const struct nlattr *key; /* Flow key, as OVS_KEY_ATTR_* attrs. */
581 size_t key_len; /* 'key' length in bytes. */
582 const struct nlattr *mask; /* Flow mask, as OVS_KEY_ATTR_* attrs. */
583 size_t mask_len; /* 'mask' length in bytes. */
584 const struct nlattr *actions; /* Actions, as OVS_ACTION_ATTR_ */
585 size_t actions_len; /* 'actions' length in bytes. */
586 ovs_u128 ufid; /* Unique flow identifier. */
587 bool ufid_present; /* True if 'ufid' was provided by datapath.*/
588 unsigned pmd_id; /* Datapath poll mode driver id. */
589 struct dpif_flow_stats stats; /* Flow statistics. */
590 };
591 int dpif_flow_dump_next(struct dpif_flow_dump_thread *,
592 struct dpif_flow *flows, int max_flows);
593
594 #define DPIF_FLOW_BUFSIZE 2048
595 \f
596 /* Operation batching interface.
597 *
598 * Some datapaths are faster at performing N operations together than the same
599 * N operations individually, hence an interface for batching.
600 */
601
602 enum dpif_op_type {
603 DPIF_OP_FLOW_PUT = 1,
604 DPIF_OP_FLOW_DEL,
605 DPIF_OP_EXECUTE,
606 DPIF_OP_FLOW_GET,
607 };
608
609 /* Add or modify a flow.
610 *
611 * The flow is specified by the Netlink attributes with types OVS_KEY_ATTR_* in
612 * the 'key_len' bytes starting at 'key'. The associated actions are specified
613 * by the Netlink attributes with types OVS_ACTION_ATTR_* in the 'actions_len'
614 * bytes starting at 'actions'.
615 *
616 * - If the flow's key does not exist in the dpif, then the flow will be
617 * added if 'flags' includes DPIF_FP_CREATE. Otherwise the operation will
618 * fail with ENOENT.
619 *
620 * If the operation succeeds, then 'stats', if nonnull, will be zeroed.
621 *
622 * - If the flow's key does exist in the dpif, then the flow's actions will
623 * be updated if 'flags' includes DPIF_FP_MODIFY. Otherwise the operation
624 * will fail with EEXIST. If the flow's actions are updated, then its
625 * statistics will be zeroed if 'flags' includes DPIF_FP_ZERO_STATS, and
626 * left as-is otherwise.
627 *
628 * If the operation succeeds, then 'stats', if nonnull, will be set to the
629 * flow's statistics before the update.
630 *
631 * - If the datapath implements multiple pmd thread with its own flow
632 * table, 'pmd_id' should be used to specify the particular polling
633 * thread for the operation.
634 */
635 struct dpif_flow_put {
636 /* Input. */
637 enum dpif_flow_put_flags flags; /* DPIF_FP_*. */
638 const struct nlattr *key; /* Flow to put. */
639 size_t key_len; /* Length of 'key' in bytes. */
640 const struct nlattr *mask; /* Mask to put. */
641 size_t mask_len; /* Length of 'mask' in bytes. */
642 const struct nlattr *actions; /* Actions to perform on flow. */
643 size_t actions_len; /* Length of 'actions' in bytes. */
644 const ovs_u128 *ufid; /* Optional unique flow identifier. */
645 unsigned pmd_id; /* Datapath poll mode driver id. */
646
647 /* Output. */
648 struct dpif_flow_stats *stats; /* Optional flow statistics. */
649 };
650
651 /* Delete a flow.
652 *
653 * The flow is specified by the Netlink attributes with types OVS_KEY_ATTR_* in
654 * the 'key_len' bytes starting at 'key', or the unique identifier 'ufid'. If
655 * the flow was created using 'ufid', then 'ufid' must be specified to delete
656 * the flow. If both are specified, 'key' will be ignored for flow deletion.
657 * Succeeds with status 0 if the flow is deleted, or fails with ENOENT if the
658 * dpif does not contain such a flow.
659 *
660 * Callers should always provide the 'key' to improve dpif logging in the event
661 * of errors or unexpected behaviour.
662 *
663 * If the datapath implements multiple polling thread with its own flow table,
664 * 'pmd_id' should be used to specify the particular polling thread for the
665 * operation.
666 *
667 * If the operation succeeds, then 'stats', if nonnull, will be set to the
668 * flow's statistics before its deletion. */
669 struct dpif_flow_del {
670 /* Input. */
671 const struct nlattr *key; /* Flow to delete. */
672 size_t key_len; /* Length of 'key' in bytes. */
673 const ovs_u128 *ufid; /* Unique identifier of flow to delete. */
674 bool terse; /* OK to skip sending/receiving full flow
675 * info? */
676 unsigned pmd_id; /* Datapath poll mode driver id. */
677
678 /* Output. */
679 struct dpif_flow_stats *stats; /* Optional flow statistics. */
680 };
681
682 /* Executes actions on a specified packet.
683 *
684 * Performs the 'actions_len' bytes of actions in 'actions' on the Ethernet
685 * frame in 'packet' and on the packet metadata in 'md'. May modify both
686 * 'packet' and 'md'.
687 *
688 * Some dpif providers do not implement every action. The Linux kernel
689 * datapath, in particular, does not implement ARP field modification. If
690 * 'needs_help' is true, the dpif layer executes in userspace all of the
691 * actions that it can, and for OVS_ACTION_ATTR_OUTPUT and
692 * OVS_ACTION_ATTR_USERSPACE actions it passes the packet through to the dpif
693 * implementation.
694 *
695 * This works even if 'actions_len' is too long for a Netlink attribute. */
696 struct dpif_execute {
697 /* Input. */
698 const struct nlattr *actions; /* Actions to execute on packet. */
699 size_t actions_len; /* Length of 'actions' in bytes. */
700 bool needs_help;
701 bool probe; /* Suppress error messages. */
702 unsigned int mtu; /* Maximum transmission unit to fragment.
703 0 if not a fragmented packet */
704 const struct flow *flow; /* Flow extracted from 'packet'. */
705
706 /* Input, but possibly modified as a side effect of execution. */
707 struct dp_packet *packet; /* Packet to execute. */
708 };
709
710 /* Queries the dpif for a flow entry.
711 *
712 * The flow is specified by the Netlink attributes with types OVS_KEY_ATTR_* in
713 * the 'key_len' bytes starting at 'key', or the unique identifier 'ufid'. If
714 * the flow was created using 'ufid', then 'ufid' must be specified to fetch
715 * the flow. If both are specified, 'key' will be ignored for the flow query.
716 * 'buffer' must point to an initialized buffer, with a recommended size of
717 * DPIF_FLOW_BUFSIZE bytes.
718 *
719 * On success, 'flow' will be populated with the mask, actions and stats for
720 * the datapath flow corresponding to 'key'. The mask and actions may point
721 * within '*buffer', or may point at RCU-protected data. Therefore, callers
722 * that wish to hold these over quiescent periods must make a copy of these
723 * fields before quiescing.
724 *
725 * Callers should always provide 'key' to improve dpif logging in the event of
726 * errors or unexpected behaviour.
727 *
728 * If the datapath implements multiple polling thread with its own flow table,
729 * 'pmd_id' should be used to specify the particular polling thread for the
730 * operation.
731 *
732 * Succeeds with status 0 if the flow is fetched, or fails with ENOENT if no
733 * such flow exists. Other failures are indicated with a positive errno value.
734 */
735 struct dpif_flow_get {
736 /* Input. */
737 const struct nlattr *key; /* Flow to get. */
738 size_t key_len; /* Length of 'key' in bytes. */
739 const ovs_u128 *ufid; /* Unique identifier of flow to get. */
740 unsigned pmd_id; /* Datapath poll mode driver id. */
741 struct ofpbuf *buffer; /* Storage for output parameters. */
742
743 /* Output. */
744 struct dpif_flow *flow; /* Resulting flow from datapath. */
745 };
746
747 int dpif_execute(struct dpif *, struct dpif_execute *);
748
749 struct dpif_op {
750 enum dpif_op_type type;
751 int error;
752 union {
753 struct dpif_flow_put flow_put;
754 struct dpif_flow_del flow_del;
755 struct dpif_execute execute;
756 struct dpif_flow_get flow_get;
757 } u;
758 };
759
760 void dpif_operate(struct dpif *, struct dpif_op **ops, size_t n_ops);
761 \f
762 /* Upcalls. */
763
764 enum dpif_upcall_type {
765 DPIF_UC_MISS, /* Miss in flow table. */
766 DPIF_UC_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */
767 DPIF_N_UC_TYPES
768 };
769
770 const char *dpif_upcall_type_to_string(enum dpif_upcall_type);
771
772 /* A packet passed up from the datapath to userspace.
773 *
774 * The 'packet', 'key' and 'userdata' may point into data in a buffer
775 * provided by the caller, so the buffer should be released only after the
776 * upcall processing has been finished.
777 *
778 * While being processed, the 'packet' may be reallocated, so the packet must
779 * be separately released with ofpbuf_uninit().
780 */
781 struct dpif_upcall {
782 /* All types. */
783 struct dp_packet packet; /* Packet data,'dp_packet' should be the first
784 member to avoid a hole. This is because
785 'rte_mbuf' in dp_packet is aligned atleast
786 on a 64-byte boundary */
787 enum dpif_upcall_type type;
788 struct nlattr *key; /* Flow key. */
789 size_t key_len; /* Length of 'key' in bytes. */
790 ovs_u128 ufid; /* Unique flow identifier for 'key'. */
791 struct nlattr *mru; /* Maximum receive unit. */
792 struct nlattr *cutlen; /* Number of bytes shrink from the end. */
793
794 /* DPIF_UC_ACTION only. */
795 struct nlattr *userdata; /* Argument to OVS_ACTION_ATTR_USERSPACE. */
796 struct nlattr *out_tun_key; /* Output tunnel key. */
797 struct nlattr *actions; /* Argument to OVS_ACTION_ATTR_USERSPACE. */
798 };
799
800 /* A callback to notify higher layer of dpif about to be purged, so that
801 * higher layer could try reacting to this (e.g. grabbing all flow stats
802 * before they are gone). This function is currently implemented only by
803 * dpif-netdev.
804 *
805 * The caller needs to provide the 'aux' pointer passed down by higher
806 * layer from the dpif_register_notify_cb() function and the 'pmd_id' of
807 * the polling thread.
808 */
809 typedef void dp_purge_callback(void *aux, unsigned pmd_id);
810
811 void dpif_register_dp_purge_cb(struct dpif *, dp_purge_callback *, void *aux);
812
813 /* A callback to process an upcall, currently implemented only by dpif-netdev.
814 *
815 * The caller provides the 'packet' and 'flow' to process, the corresponding
816 * 'ufid' as generated by dpif_flow_hash(), the polling thread id 'pmd_id',
817 * the 'type' of the upcall, and if 'type' is DPIF_UC_ACTION then the
818 * 'userdata' attached to the action.
819 *
820 * The callback must fill in 'actions' with the datapath actions to apply to
821 * 'packet'. 'wc' and 'put_actions' will either be both null or both nonnull.
822 * If they are nonnull, then the caller will install a flow entry to process
823 * all future packets that match 'flow' and 'wc'; the callback must store a
824 * wildcard mask suitable for that purpose into 'wc'. If the actions to store
825 * into the flow entry are the same as 'actions', then the callback may leave
826 * 'put_actions' empty; otherwise it must store the desired actions into
827 * 'put_actions'.
828 *
829 * Returns 0 if successful, ENOSPC if the flow limit has been reached and no
830 * flow should be installed, or some otherwise a positive errno value. */
831 typedef int upcall_callback(const struct dp_packet *packet,
832 const struct flow *flow,
833 ovs_u128 *ufid,
834 unsigned pmd_id,
835 enum dpif_upcall_type type,
836 const struct nlattr *userdata,
837 struct ofpbuf *actions,
838 struct flow_wildcards *wc,
839 struct ofpbuf *put_actions,
840 void *aux);
841
842 void dpif_register_upcall_cb(struct dpif *, upcall_callback *, void *aux);
843
844 int dpif_recv_set(struct dpif *, bool enable);
845 int dpif_handlers_set(struct dpif *, uint32_t n_handlers);
846 int dpif_poll_threads_set(struct dpif *, const char *cmask);
847 int dpif_port_set_config(struct dpif *, odp_port_t, const struct smap *cfg);
848 int dpif_recv(struct dpif *, uint32_t handler_id, struct dpif_upcall *,
849 struct ofpbuf *);
850 void dpif_recv_purge(struct dpif *);
851 void dpif_recv_wait(struct dpif *, uint32_t handler_id);
852 void dpif_enable_upcall(struct dpif *);
853 void dpif_disable_upcall(struct dpif *);
854
855 void dpif_print_packet(struct dpif *, struct dpif_upcall *);
856 \f
857 /* Miscellaneous. */
858
859 void dpif_get_netflow_ids(const struct dpif *,
860 uint8_t *engine_type, uint8_t *engine_id);
861
862 int dpif_queue_to_priority(const struct dpif *, uint32_t queue_id,
863 uint32_t *priority);
864
865 char *dpif_get_dp_version(const struct dpif *);
866 bool dpif_supports_tnl_push_pop(const struct dpif *);
867 #ifdef __cplusplus
868 }
869 #endif
870
871 #endif /* dpif.h */