]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif-netdev.c
treewide: Convert leading tabs to spaces.
[mirror_ovs.git] / lib / dpif-netdev.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-netdev.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <net/if.h>
25 #include <sys/types.h>
26 #include <netinet/in.h>
27 #include <stdint.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/ioctl.h>
31 #include <sys/socket.h>
32 #include <sys/stat.h>
33 #include <unistd.h>
34
35 #include "bitmap.h"
36 #include "cmap.h"
37 #include "conntrack.h"
38 #include "coverage.h"
39 #include "ct-dpif.h"
40 #include "csum.h"
41 #include "dp-packet.h"
42 #include "dpif.h"
43 #include "dpif-netdev-perf.h"
44 #include "dpif-provider.h"
45 #include "dummy.h"
46 #include "fat-rwlock.h"
47 #include "flow.h"
48 #include "hmapx.h"
49 #include "id-pool.h"
50 #include "latch.h"
51 #include "netdev.h"
52 #include "netdev-provider.h"
53 #include "netdev-vport.h"
54 #include "netlink.h"
55 #include "odp-execute.h"
56 #include "odp-util.h"
57 #include "openvswitch/dynamic-string.h"
58 #include "openvswitch/list.h"
59 #include "openvswitch/match.h"
60 #include "openvswitch/ofp-parse.h"
61 #include "openvswitch/ofp-print.h"
62 #include "openvswitch/ofpbuf.h"
63 #include "openvswitch/shash.h"
64 #include "openvswitch/vlog.h"
65 #include "ovs-numa.h"
66 #include "ovs-rcu.h"
67 #include "packets.h"
68 #include "openvswitch/poll-loop.h"
69 #include "pvector.h"
70 #include "random.h"
71 #include "seq.h"
72 #include "smap.h"
73 #include "sset.h"
74 #include "timeval.h"
75 #include "tnl-neigh-cache.h"
76 #include "tnl-ports.h"
77 #include "unixctl.h"
78 #include "util.h"
79
80 VLOG_DEFINE_THIS_MODULE(dpif_netdev);
81
82 #define FLOW_DUMP_MAX_BATCH 50
83 /* Use per thread recirc_depth to prevent recirculation loop. */
84 #define MAX_RECIRC_DEPTH 6
85 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
86
87 /* Use instant packet send by default. */
88 #define DEFAULT_TX_FLUSH_INTERVAL 0
89
90 /* Configuration parameters. */
91 enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
92 enum { MAX_METERS = 65536 }; /* Maximum number of meters. */
93 enum { MAX_BANDS = 8 }; /* Maximum number of bands / meter. */
94 enum { N_METER_LOCKS = 64 }; /* Maximum number of meters. */
95
96 /* Protects against changes to 'dp_netdevs'. */
97 static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
98
99 /* Contains all 'struct dp_netdev's. */
100 static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex)
101 = SHASH_INITIALIZER(&dp_netdevs);
102
103 static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600);
104
105 #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
106 | CS_INVALID | CS_REPLY_DIR | CS_TRACKED \
107 | CS_SRC_NAT | CS_DST_NAT)
108 #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
109
110 static struct odp_support dp_netdev_support = {
111 .max_vlan_headers = SIZE_MAX,
112 .max_mpls_depth = SIZE_MAX,
113 .recirc = true,
114 .ct_state = true,
115 .ct_zone = true,
116 .ct_mark = true,
117 .ct_label = true,
118 .ct_state_nat = true,
119 .ct_orig_tuple = true,
120 .ct_orig_tuple6 = true,
121 };
122
123 /* Stores a miniflow with inline values */
124
125 struct netdev_flow_key {
126 uint32_t hash; /* Hash function differs for different users. */
127 uint32_t len; /* Length of the following miniflow (incl. map). */
128 struct miniflow mf;
129 uint64_t buf[FLOW_MAX_PACKET_U64S];
130 };
131
132 /* Exact match cache for frequently used flows
133 *
134 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
135 * search its entries for a miniflow that matches exactly the miniflow of the
136 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
137 *
138 * A cache entry holds a reference to its 'dp_netdev_flow'.
139 *
140 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
141 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
142 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
143 * value is the index of a cache entry where the miniflow could be.
144 *
145 *
146 * Thread-safety
147 * =============
148 *
149 * Each pmd_thread has its own private exact match cache.
150 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
151 */
152
153 #define EM_FLOW_HASH_SHIFT 13
154 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
155 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
156 #define EM_FLOW_HASH_SEGS 2
157
158 /* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */
159 #define DEFAULT_EM_FLOW_INSERT_INV_PROB 100
160 #define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
161 DEFAULT_EM_FLOW_INSERT_INV_PROB)
162
163 struct emc_entry {
164 struct dp_netdev_flow *flow;
165 struct netdev_flow_key key; /* key.hash used for emc hash value. */
166 };
167
168 struct emc_cache {
169 struct emc_entry entries[EM_FLOW_HASH_ENTRIES];
170 int sweep_idx; /* For emc_cache_slow_sweep(). */
171 };
172
173 /* Iterate in the exact match cache through every entry that might contain a
174 * miniflow with hash 'HASH'. */
175 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
176 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
177 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
178 i__ < EM_FLOW_HASH_SEGS; \
179 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
180 \f
181 /* Simple non-wildcarding single-priority classifier. */
182
183 /* Time in microseconds between successive optimizations of the dpcls
184 * subtable vector */
185 #define DPCLS_OPTIMIZATION_INTERVAL 1000000LL
186
187 /* Time in microseconds of the interval in which rxq processing cycles used
188 * in rxq to pmd assignments is measured and stored. */
189 #define PMD_RXQ_INTERVAL_LEN 10000000LL
190
191 /* Number of intervals for which cycles are stored
192 * and used during rxq to pmd assignment. */
193 #define PMD_RXQ_INTERVAL_MAX 6
194
195 struct dpcls {
196 struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */
197 odp_port_t in_port;
198 struct cmap subtables_map;
199 struct pvector subtables;
200 };
201
202 /* A rule to be inserted to the classifier. */
203 struct dpcls_rule {
204 struct cmap_node cmap_node; /* Within struct dpcls_subtable 'rules'. */
205 struct netdev_flow_key *mask; /* Subtable's mask. */
206 struct netdev_flow_key flow; /* Matching key. */
207 /* 'flow' must be the last field, additional space is allocated here. */
208 };
209
210 static void dpcls_init(struct dpcls *);
211 static void dpcls_destroy(struct dpcls *);
212 static void dpcls_sort_subtable_vector(struct dpcls *);
213 static void dpcls_insert(struct dpcls *, struct dpcls_rule *,
214 const struct netdev_flow_key *mask);
215 static void dpcls_remove(struct dpcls *, struct dpcls_rule *);
216 static bool dpcls_lookup(struct dpcls *cls,
217 const struct netdev_flow_key keys[],
218 struct dpcls_rule **rules, size_t cnt,
219 int *num_lookups_p);
220 \f
221 /* Set of supported meter flags */
222 #define DP_SUPPORTED_METER_FLAGS_MASK \
223 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
224
225 /* Set of supported meter band types */
226 #define DP_SUPPORTED_METER_BAND_TYPES \
227 ( 1 << OFPMBT13_DROP )
228
229 struct dp_meter_band {
230 struct ofputil_meter_band up; /* type, prec_level, pad, rate, burst_size */
231 uint32_t bucket; /* In 1/1000 packets (for PKTPS), or in bits (for KBPS) */
232 uint64_t packet_count;
233 uint64_t byte_count;
234 };
235
236 struct dp_meter {
237 uint16_t flags;
238 uint16_t n_bands;
239 uint32_t max_delta_t;
240 uint64_t used;
241 uint64_t packet_count;
242 uint64_t byte_count;
243 struct dp_meter_band bands[];
244 };
245
246 /* Datapath based on the network device interface from netdev.h.
247 *
248 *
249 * Thread-safety
250 * =============
251 *
252 * Some members, marked 'const', are immutable. Accessing other members
253 * requires synchronization, as noted in more detail below.
254 *
255 * Acquisition order is, from outermost to innermost:
256 *
257 * dp_netdev_mutex (global)
258 * port_mutex
259 * non_pmd_mutex
260 */
261 struct dp_netdev {
262 const struct dpif_class *const class;
263 const char *const name;
264 struct dpif *dpif;
265 struct ovs_refcount ref_cnt;
266 atomic_flag destroyed;
267
268 /* Ports.
269 *
270 * Any lookup into 'ports' or any access to the dp_netdev_ports found
271 * through 'ports' requires taking 'port_mutex'. */
272 struct ovs_mutex port_mutex;
273 struct hmap ports;
274 struct seq *port_seq; /* Incremented whenever a port changes. */
275
276 /* The time that a packet can wait in output batch for sending. */
277 atomic_uint32_t tx_flush_interval;
278
279 /* Meters. */
280 struct ovs_mutex meter_locks[N_METER_LOCKS];
281 struct dp_meter *meters[MAX_METERS]; /* Meter bands. */
282
283 /* Probability of EMC insertions is a factor of 'emc_insert_min'.*/
284 OVS_ALIGNED_VAR(CACHE_LINE_SIZE) atomic_uint32_t emc_insert_min;
285 /* Enable collection of PMD performance metrics. */
286 atomic_bool pmd_perf_metrics;
287
288 /* Protects access to ofproto-dpif-upcall interface during revalidator
289 * thread synchronization. */
290 struct fat_rwlock upcall_rwlock;
291 upcall_callback *upcall_cb; /* Callback function for executing upcalls. */
292 void *upcall_aux;
293
294 /* Callback function for notifying the purging of dp flows (during
295 * reseting pmd deletion). */
296 dp_purge_callback *dp_purge_cb;
297 void *dp_purge_aux;
298
299 /* Stores all 'struct dp_netdev_pmd_thread's. */
300 struct cmap poll_threads;
301 /* id pool for per thread static_tx_qid. */
302 struct id_pool *tx_qid_pool;
303 struct ovs_mutex tx_qid_pool_mutex;
304
305 /* Protects the access of the 'struct dp_netdev_pmd_thread'
306 * instance for non-pmd thread. */
307 struct ovs_mutex non_pmd_mutex;
308
309 /* Each pmd thread will store its pointer to
310 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
311 ovsthread_key_t per_pmd_key;
312
313 struct seq *reconfigure_seq;
314 uint64_t last_reconfigure_seq;
315
316 /* Cpu mask for pin of pmd threads. */
317 char *pmd_cmask;
318
319 uint64_t last_tnl_conf_seq;
320
321 struct conntrack conntrack;
322 };
323
324 static void meter_lock(const struct dp_netdev *dp, uint32_t meter_id)
325 OVS_ACQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS])
326 {
327 ovs_mutex_lock(&dp->meter_locks[meter_id % N_METER_LOCKS]);
328 }
329
330 static void meter_unlock(const struct dp_netdev *dp, uint32_t meter_id)
331 OVS_RELEASES(dp->meter_locks[meter_id % N_METER_LOCKS])
332 {
333 ovs_mutex_unlock(&dp->meter_locks[meter_id % N_METER_LOCKS]);
334 }
335
336
337 static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
338 odp_port_t)
339 OVS_REQUIRES(dp->port_mutex);
340
341 enum rxq_cycles_counter_type {
342 RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and
343 processing packets during the current
344 interval. */
345 RXQ_CYCLES_PROC_HIST, /* Total cycles of all intervals that are used
346 during rxq to pmd assignment. */
347 RXQ_N_CYCLES
348 };
349
350 #define XPS_TIMEOUT 500000LL /* In microseconds. */
351
352 /* Contained by struct dp_netdev_port's 'rxqs' member. */
353 struct dp_netdev_rxq {
354 struct dp_netdev_port *port;
355 struct netdev_rxq *rx;
356 unsigned core_id; /* Core to which this queue should be
357 pinned. OVS_CORE_UNSPEC if the
358 queue doesn't need to be pinned to a
359 particular core. */
360 unsigned intrvl_idx; /* Write index for 'cycles_intrvl'. */
361 struct dp_netdev_pmd_thread *pmd; /* pmd thread that polls this queue. */
362 bool is_vhost; /* Is rxq of a vhost port. */
363
364 /* Counters of cycles spent successfully polling and processing pkts. */
365 atomic_ullong cycles[RXQ_N_CYCLES];
366 /* We store PMD_RXQ_INTERVAL_MAX intervals of data for an rxq and then
367 sum them to yield the cycles used for an rxq. */
368 atomic_ullong cycles_intrvl[PMD_RXQ_INTERVAL_MAX];
369 };
370
371 /* A port in a netdev-based datapath. */
372 struct dp_netdev_port {
373 odp_port_t port_no;
374 bool dynamic_txqs; /* If true XPS will be used. */
375 bool need_reconfigure; /* True if we should reconfigure netdev. */
376 struct netdev *netdev;
377 struct hmap_node node; /* Node in dp_netdev's 'ports'. */
378 struct netdev_saved_flags *sf;
379 struct dp_netdev_rxq *rxqs;
380 unsigned n_rxq; /* Number of elements in 'rxqs' */
381 unsigned *txq_used; /* Number of threads that use each tx queue. */
382 struct ovs_mutex txq_used_mutex;
383 char *type; /* Port type as requested by user. */
384 char *rxq_affinity_list; /* Requested affinity of rx queues. */
385 };
386
387 /* Contained by struct dp_netdev_flow's 'stats' member. */
388 struct dp_netdev_flow_stats {
389 atomic_llong used; /* Last used time, in monotonic msecs. */
390 atomic_ullong packet_count; /* Number of packets matched. */
391 atomic_ullong byte_count; /* Number of bytes matched. */
392 atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
393 };
394
395 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
396 *
397 *
398 * Thread-safety
399 * =============
400 *
401 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
402 * its pmd thread's classifier. The text below calls this classifier 'cls'.
403 *
404 * Motivation
405 * ----------
406 *
407 * The thread safety rules described here for "struct dp_netdev_flow" are
408 * motivated by two goals:
409 *
410 * - Prevent threads that read members of "struct dp_netdev_flow" from
411 * reading bad data due to changes by some thread concurrently modifying
412 * those members.
413 *
414 * - Prevent two threads making changes to members of a given "struct
415 * dp_netdev_flow" from interfering with each other.
416 *
417 *
418 * Rules
419 * -----
420 *
421 * A flow 'flow' may be accessed without a risk of being freed during an RCU
422 * grace period. Code that needs to hold onto a flow for a while
423 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
424 *
425 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
426 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
427 * from modification.
428 *
429 * Some members, marked 'const', are immutable. Accessing other members
430 * requires synchronization, as noted in more detail below.
431 */
432 struct dp_netdev_flow {
433 const struct flow flow; /* Unmasked flow that created this entry. */
434 /* Hash table index by unmasked flow. */
435 const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */
436 /* 'flow_table'. */
437 const ovs_u128 ufid; /* Unique flow identifier. */
438 const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */
439 /* flow. */
440
441 /* Number of references.
442 * The classifier owns one reference.
443 * Any thread trying to keep a rule from being freed should hold its own
444 * reference. */
445 struct ovs_refcount ref_cnt;
446
447 bool dead;
448
449 /* Statistics. */
450 struct dp_netdev_flow_stats stats;
451
452 /* Actions. */
453 OVSRCU_TYPE(struct dp_netdev_actions *) actions;
454
455 /* While processing a group of input packets, the datapath uses the next
456 * member to store a pointer to the output batch for the flow. It is
457 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
458 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
459 struct packet_batch_per_flow *batch;
460
461 /* Packet classification. */
462 struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */
463 /* 'cr' must be the last member. */
464 };
465
466 static void dp_netdev_flow_unref(struct dp_netdev_flow *);
467 static bool dp_netdev_flow_ref(struct dp_netdev_flow *);
468 static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t,
469 struct flow *, bool);
470
471 /* A set of datapath actions within a "struct dp_netdev_flow".
472 *
473 *
474 * Thread-safety
475 * =============
476 *
477 * A struct dp_netdev_actions 'actions' is protected with RCU. */
478 struct dp_netdev_actions {
479 /* These members are immutable: they do not change during the struct's
480 * lifetime. */
481 unsigned int size; /* Size of 'actions', in bytes. */
482 struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
483 };
484
485 struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *,
486 size_t);
487 struct dp_netdev_actions *dp_netdev_flow_get_actions(
488 const struct dp_netdev_flow *);
489 static void dp_netdev_actions_free(struct dp_netdev_actions *);
490
491 struct polled_queue {
492 struct dp_netdev_rxq *rxq;
493 odp_port_t port_no;
494 };
495
496 /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
497 struct rxq_poll {
498 struct dp_netdev_rxq *rxq;
499 struct hmap_node node;
500 };
501
502 /* Contained by struct dp_netdev_pmd_thread's 'send_port_cache',
503 * 'tnl_port_cache' or 'tx_ports'. */
504 struct tx_port {
505 struct dp_netdev_port *port;
506 int qid;
507 long long last_used;
508 struct hmap_node node;
509 long long flush_time;
510 struct dp_packet_batch output_pkts;
511 struct dp_netdev_rxq *output_pkts_rxqs[NETDEV_MAX_BURST];
512 };
513
514 /* A set of properties for the current processing loop that is not directly
515 * associated with the pmd thread itself, but with the packets being
516 * processed or the short-term system configuration (for example, time).
517 * Contained by struct dp_netdev_pmd_thread's 'ctx' member. */
518 struct dp_netdev_pmd_thread_ctx {
519 /* Latest measured time. See 'pmd_thread_ctx_time_update()'. */
520 long long now;
521 /* RX queue from which last packet was received. */
522 struct dp_netdev_rxq *last_rxq;
523 };
524
525 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
526 * the performance overhead of interrupt processing. Therefore netdev can
527 * not implement rx-wait for these devices. dpif-netdev needs to poll
528 * these device to check for recv buffer. pmd-thread does polling for
529 * devices assigned to itself.
530 *
531 * DPDK used PMD for accessing NIC.
532 *
533 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
534 * I/O of all non-pmd threads. There will be no actual thread created
535 * for the instance.
536 *
537 * Each struct has its own flow cache and classifier per managed ingress port.
538 * For packets received on ingress port, a look up is done on corresponding PMD
539 * thread's flow cache and in case of a miss, lookup is performed in the
540 * corresponding classifier of port. Packets are executed with the found
541 * actions in either case.
542 * */
543 struct dp_netdev_pmd_thread {
544 struct dp_netdev *dp;
545 struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
546 struct cmap_node node; /* In 'dp->poll_threads'. */
547
548 pthread_cond_t cond; /* For synchronizing pmd thread reload. */
549 struct ovs_mutex cond_mutex; /* Mutex for condition variable. */
550
551 /* Per thread exact-match cache. Note, the instance for cpu core
552 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
553 * need to be protected by 'non_pmd_mutex'. Every other instance
554 * will only be accessed by its own pmd thread. */
555 struct emc_cache flow_cache;
556
557 /* Flow-Table and classifiers
558 *
559 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
560 * changes to 'classifiers' must be made while still holding the
561 * 'flow_mutex'.
562 */
563 struct ovs_mutex flow_mutex;
564 struct cmap flow_table OVS_GUARDED; /* Flow table. */
565
566 /* One classifier per in_port polled by the pmd */
567 struct cmap classifiers;
568 /* Periodically sort subtable vectors according to hit frequencies */
569 long long int next_optimization;
570 /* End of the next time interval for which processing cycles
571 are stored for each polled rxq. */
572 long long int rxq_next_cycle_store;
573
574 /* Last interval timestamp. */
575 uint64_t intrvl_tsc_prev;
576 /* Last interval cycles. */
577 atomic_ullong intrvl_cycles;
578
579 /* Current context of the PMD thread. */
580 struct dp_netdev_pmd_thread_ctx ctx;
581
582 struct latch exit_latch; /* For terminating the pmd thread. */
583 struct seq *reload_seq;
584 uint64_t last_reload_seq;
585 atomic_bool reload; /* Do we need to reload ports? */
586 pthread_t thread;
587 unsigned core_id; /* CPU core id of this pmd thread. */
588 int numa_id; /* numa node id of this pmd thread. */
589 bool isolated;
590
591 /* Queue id used by this pmd thread to send packets on all netdevs if
592 * XPS disabled for this netdev. All static_tx_qid's are unique and less
593 * than 'cmap_count(dp->poll_threads)'. */
594 uint32_t static_tx_qid;
595
596 /* Number of filled output batches. */
597 int n_output_batches;
598
599 struct ovs_mutex port_mutex; /* Mutex for 'poll_list' and 'tx_ports'. */
600 /* List of rx queues to poll. */
601 struct hmap poll_list OVS_GUARDED;
602 /* Map of 'tx_port's used for transmission. Written by the main thread,
603 * read by the pmd thread. */
604 struct hmap tx_ports OVS_GUARDED;
605
606 /* These are thread-local copies of 'tx_ports'. One contains only tunnel
607 * ports (that support push_tunnel/pop_tunnel), the other contains ports
608 * with at least one txq (that support send). A port can be in both.
609 *
610 * There are two separate maps to make sure that we don't try to execute
611 * OUTPUT on a device which has 0 txqs or PUSH/POP on a non-tunnel device.
612 *
613 * The instances for cpu core NON_PMD_CORE_ID can be accessed by multiple
614 * threads, and thusly need to be protected by 'non_pmd_mutex'. Every
615 * other instance will only be accessed by its own pmd thread. */
616 struct hmap tnl_port_cache;
617 struct hmap send_port_cache;
618
619 /* Keep track of detailed PMD performance statistics. */
620 struct pmd_perf_stats perf_stats;
621
622 /* Set to true if the pmd thread needs to be reloaded. */
623 bool need_reload;
624 };
625
626 /* Interface to netdev-based datapath. */
627 struct dpif_netdev {
628 struct dpif dpif;
629 struct dp_netdev *dp;
630 uint64_t last_port_seq;
631 };
632
633 static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
634 struct dp_netdev_port **portp)
635 OVS_REQUIRES(dp->port_mutex);
636 static int get_port_by_name(struct dp_netdev *dp, const char *devname,
637 struct dp_netdev_port **portp)
638 OVS_REQUIRES(dp->port_mutex);
639 static void dp_netdev_free(struct dp_netdev *)
640 OVS_REQUIRES(dp_netdev_mutex);
641 static int do_add_port(struct dp_netdev *dp, const char *devname,
642 const char *type, odp_port_t port_no)
643 OVS_REQUIRES(dp->port_mutex);
644 static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
645 OVS_REQUIRES(dp->port_mutex);
646 static int dpif_netdev_open(const struct dpif_class *, const char *name,
647 bool create, struct dpif **);
648 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
649 struct dp_packet_batch *,
650 bool should_steal,
651 const struct flow *flow,
652 const struct nlattr *actions,
653 size_t actions_len);
654 static void dp_netdev_input(struct dp_netdev_pmd_thread *,
655 struct dp_packet_batch *, odp_port_t port_no);
656 static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
657 struct dp_packet_batch *);
658
659 static void dp_netdev_disable_upcall(struct dp_netdev *);
660 static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd);
661 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
662 struct dp_netdev *dp, unsigned core_id,
663 int numa_id);
664 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
665 static void dp_netdev_set_nonpmd(struct dp_netdev *dp)
666 OVS_REQUIRES(dp->port_mutex);
667
668 static void *pmd_thread_main(void *);
669 static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
670 unsigned core_id);
671 static struct dp_netdev_pmd_thread *
672 dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
673 static void dp_netdev_del_pmd(struct dp_netdev *dp,
674 struct dp_netdev_pmd_thread *pmd);
675 static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd);
676 static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd);
677 static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
678 struct dp_netdev_port *port)
679 OVS_REQUIRES(pmd->port_mutex);
680 static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
681 struct tx_port *tx)
682 OVS_REQUIRES(pmd->port_mutex);
683 static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
684 struct dp_netdev_rxq *rxq)
685 OVS_REQUIRES(pmd->port_mutex);
686 static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd,
687 struct rxq_poll *poll)
688 OVS_REQUIRES(pmd->port_mutex);
689 static int
690 dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread *pmd,
691 bool force);
692
693 static void reconfigure_datapath(struct dp_netdev *dp)
694 OVS_REQUIRES(dp->port_mutex);
695 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
696 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
697 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);
698 static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
699 OVS_REQUIRES(pmd->port_mutex);
700 static inline void
701 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
702 struct polled_queue *poll_list, int poll_cnt);
703 static void
704 dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
705 enum rxq_cycles_counter_type type,
706 unsigned long long cycles);
707 static uint64_t
708 dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
709 enum rxq_cycles_counter_type type);
710 static void
711 dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
712 unsigned long long cycles);
713 static uint64_t
714 dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx);
715 static void
716 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
717 bool purge);
718 static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
719 struct tx_port *tx);
720
721 static inline bool emc_entry_alive(struct emc_entry *ce);
722 static void emc_clear_entry(struct emc_entry *ce);
723
724 static void dp_netdev_request_reconfigure(struct dp_netdev *dp);
725 static inline bool
726 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread *pmd);
727
728 static void
729 emc_cache_init(struct emc_cache *flow_cache)
730 {
731 int i;
732
733 flow_cache->sweep_idx = 0;
734 for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
735 flow_cache->entries[i].flow = NULL;
736 flow_cache->entries[i].key.hash = 0;
737 flow_cache->entries[i].key.len = sizeof(struct miniflow);
738 flowmap_init(&flow_cache->entries[i].key.mf.map);
739 }
740 }
741
742 static void
743 emc_cache_uninit(struct emc_cache *flow_cache)
744 {
745 int i;
746
747 for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
748 emc_clear_entry(&flow_cache->entries[i]);
749 }
750 }
751
752 /* Check and clear dead flow references slowly (one entry at each
753 * invocation). */
754 static void
755 emc_cache_slow_sweep(struct emc_cache *flow_cache)
756 {
757 struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx];
758
759 if (!emc_entry_alive(entry)) {
760 emc_clear_entry(entry);
761 }
762 flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
763 }
764
765 /* Updates the time in PMD threads context and should be called in three cases:
766 *
767 * 1. PMD structure initialization:
768 * - dp_netdev_configure_pmd()
769 *
770 * 2. Before processing of the new packet batch:
771 * - dpif_netdev_execute()
772 * - dp_netdev_process_rxq_port()
773 *
774 * 3. At least once per polling iteration in main polling threads if no
775 * packets received on current iteration:
776 * - dpif_netdev_run()
777 * - pmd_thread_main()
778 *
779 * 'pmd->ctx.now' should be used without update in all other cases if possible.
780 */
781 static inline void
782 pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread *pmd)
783 {
784 pmd->ctx.now = time_usec();
785 }
786
787 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
788 bool
789 dpif_is_netdev(const struct dpif *dpif)
790 {
791 return dpif->dpif_class->open == dpif_netdev_open;
792 }
793
794 static struct dpif_netdev *
795 dpif_netdev_cast(const struct dpif *dpif)
796 {
797 ovs_assert(dpif_is_netdev(dpif));
798 return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
799 }
800
801 static struct dp_netdev *
802 get_dp_netdev(const struct dpif *dpif)
803 {
804 return dpif_netdev_cast(dpif)->dp;
805 }
806 \f
807 enum pmd_info_type {
808 PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */
809 PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */
810 PMD_INFO_SHOW_RXQ, /* Show poll lists of pmd threads. */
811 PMD_INFO_PERF_SHOW, /* Show pmd performance details. */
812 };
813
814 static void
815 format_pmd_thread(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
816 {
817 ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID)
818 ? "main thread" : "pmd thread");
819 if (pmd->numa_id != OVS_NUMA_UNSPEC) {
820 ds_put_format(reply, " numa_id %d", pmd->numa_id);
821 }
822 if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) {
823 ds_put_format(reply, " core_id %u", pmd->core_id);
824 }
825 ds_put_cstr(reply, ":\n");
826 }
827
828 static void
829 pmd_info_show_stats(struct ds *reply,
830 struct dp_netdev_pmd_thread *pmd)
831 {
832 uint64_t stats[PMD_N_STATS];
833 uint64_t total_cycles, total_packets;
834 double passes_per_pkt = 0;
835 double lookups_per_hit = 0;
836 double packets_per_batch = 0;
837
838 pmd_perf_read_counters(&pmd->perf_stats, stats);
839 total_cycles = stats[PMD_CYCLES_ITER_IDLE]
840 + stats[PMD_CYCLES_ITER_BUSY];
841 total_packets = stats[PMD_STAT_RECV];
842
843 format_pmd_thread(reply, pmd);
844
845 if (total_packets > 0) {
846 passes_per_pkt = (total_packets + stats[PMD_STAT_RECIRC])
847 / (double) total_packets;
848 }
849 if (stats[PMD_STAT_MASKED_HIT] > 0) {
850 lookups_per_hit = stats[PMD_STAT_MASKED_LOOKUP]
851 / (double) stats[PMD_STAT_MASKED_HIT];
852 }
853 if (stats[PMD_STAT_SENT_BATCHES] > 0) {
854 packets_per_batch = stats[PMD_STAT_SENT_PKTS]
855 / (double) stats[PMD_STAT_SENT_BATCHES];
856 }
857
858 ds_put_format(reply,
859 " packets received: %"PRIu64"\n"
860 " packet recirculations: %"PRIu64"\n"
861 " avg. datapath passes per packet: %.02f\n"
862 " emc hits: %"PRIu64"\n"
863 " megaflow hits: %"PRIu64"\n"
864 " avg. subtable lookups per megaflow hit: %.02f\n"
865 " miss with success upcall: %"PRIu64"\n"
866 " miss with failed upcall: %"PRIu64"\n"
867 " avg. packets per output batch: %.02f\n",
868 total_packets, stats[PMD_STAT_RECIRC],
869 passes_per_pkt, stats[PMD_STAT_EXACT_HIT],
870 stats[PMD_STAT_MASKED_HIT], lookups_per_hit,
871 stats[PMD_STAT_MISS], stats[PMD_STAT_LOST],
872 packets_per_batch);
873
874 if (total_cycles == 0) {
875 return;
876 }
877
878 ds_put_format(reply,
879 " idle cycles: %"PRIu64" (%.02f%%)\n"
880 " processing cycles: %"PRIu64" (%.02f%%)\n",
881 stats[PMD_CYCLES_ITER_IDLE],
882 stats[PMD_CYCLES_ITER_IDLE] / (double) total_cycles * 100,
883 stats[PMD_CYCLES_ITER_BUSY],
884 stats[PMD_CYCLES_ITER_BUSY] / (double) total_cycles * 100);
885
886 if (total_packets == 0) {
887 return;
888 }
889
890 ds_put_format(reply,
891 " avg cycles per packet: %.02f (%"PRIu64"/%"PRIu64")\n",
892 total_cycles / (double) total_packets,
893 total_cycles, total_packets);
894
895 ds_put_format(reply,
896 " avg processing cycles per packet: "
897 "%.02f (%"PRIu64"/%"PRIu64")\n",
898 stats[PMD_CYCLES_ITER_BUSY] / (double) total_packets,
899 stats[PMD_CYCLES_ITER_BUSY], total_packets);
900 }
901
902 static void
903 pmd_info_show_perf(struct ds *reply,
904 struct dp_netdev_pmd_thread *pmd,
905 struct pmd_perf_params *par)
906 {
907 if (pmd->core_id != NON_PMD_CORE_ID) {
908 char *time_str =
909 xastrftime_msec("%H:%M:%S.###", time_wall_msec(), true);
910 long long now = time_msec();
911 double duration = (now - pmd->perf_stats.start_ms) / 1000.0;
912
913 ds_put_cstr(reply, "\n");
914 ds_put_format(reply, "Time: %s\n", time_str);
915 ds_put_format(reply, "Measurement duration: %.3f s\n", duration);
916 ds_put_cstr(reply, "\n");
917 format_pmd_thread(reply, pmd);
918 ds_put_cstr(reply, "\n");
919 pmd_perf_format_overall_stats(reply, &pmd->perf_stats, duration);
920 if (pmd_perf_metrics_enabled(pmd)) {
921 /* Prevent parallel clearing of perf metrics. */
922 ovs_mutex_lock(&pmd->perf_stats.clear_mutex);
923 if (par->histograms) {
924 ds_put_cstr(reply, "\n");
925 pmd_perf_format_histograms(reply, &pmd->perf_stats);
926 }
927 if (par->iter_hist_len > 0) {
928 ds_put_cstr(reply, "\n");
929 pmd_perf_format_iteration_history(reply, &pmd->perf_stats,
930 par->iter_hist_len);
931 }
932 if (par->ms_hist_len > 0) {
933 ds_put_cstr(reply, "\n");
934 pmd_perf_format_ms_history(reply, &pmd->perf_stats,
935 par->ms_hist_len);
936 }
937 ovs_mutex_unlock(&pmd->perf_stats.clear_mutex);
938 }
939 free(time_str);
940 }
941 }
942
943 static int
944 compare_poll_list(const void *a_, const void *b_)
945 {
946 const struct rxq_poll *a = a_;
947 const struct rxq_poll *b = b_;
948
949 const char *namea = netdev_rxq_get_name(a->rxq->rx);
950 const char *nameb = netdev_rxq_get_name(b->rxq->rx);
951
952 int cmp = strcmp(namea, nameb);
953 if (!cmp) {
954 return netdev_rxq_get_queue_id(a->rxq->rx)
955 - netdev_rxq_get_queue_id(b->rxq->rx);
956 } else {
957 return cmp;
958 }
959 }
960
961 static void
962 sorted_poll_list(struct dp_netdev_pmd_thread *pmd, struct rxq_poll **list,
963 size_t *n)
964 {
965 struct rxq_poll *ret, *poll;
966 size_t i;
967
968 *n = hmap_count(&pmd->poll_list);
969 if (!*n) {
970 ret = NULL;
971 } else {
972 ret = xcalloc(*n, sizeof *ret);
973 i = 0;
974 HMAP_FOR_EACH (poll, node, &pmd->poll_list) {
975 ret[i] = *poll;
976 i++;
977 }
978 ovs_assert(i == *n);
979 qsort(ret, *n, sizeof *ret, compare_poll_list);
980 }
981
982 *list = ret;
983 }
984
985 static void
986 pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
987 {
988 if (pmd->core_id != NON_PMD_CORE_ID) {
989 struct rxq_poll *list;
990 size_t n_rxq;
991 uint64_t total_cycles = 0;
992
993 ds_put_format(reply,
994 "pmd thread numa_id %d core_id %u:\n isolated : %s\n",
995 pmd->numa_id, pmd->core_id, (pmd->isolated)
996 ? "true" : "false");
997
998 ovs_mutex_lock(&pmd->port_mutex);
999 sorted_poll_list(pmd, &list, &n_rxq);
1000
1001 /* Get the total pmd cycles for an interval. */
1002 atomic_read_relaxed(&pmd->intrvl_cycles, &total_cycles);
1003 /* Estimate the cycles to cover all intervals. */
1004 total_cycles *= PMD_RXQ_INTERVAL_MAX;
1005
1006 for (int i = 0; i < n_rxq; i++) {
1007 struct dp_netdev_rxq *rxq = list[i].rxq;
1008 const char *name = netdev_rxq_get_name(rxq->rx);
1009 uint64_t proc_cycles = 0;
1010
1011 for (int j = 0; j < PMD_RXQ_INTERVAL_MAX; j++) {
1012 proc_cycles += dp_netdev_rxq_get_intrvl_cycles(rxq, j);
1013 }
1014 ds_put_format(reply, " port: %-16s queue-id: %2d", name,
1015 netdev_rxq_get_queue_id(list[i].rxq->rx));
1016 ds_put_format(reply, " pmd usage: ");
1017 if (total_cycles) {
1018 ds_put_format(reply, "%2"PRIu64"",
1019 proc_cycles * 100 / total_cycles);
1020 ds_put_cstr(reply, " %");
1021 } else {
1022 ds_put_format(reply, "%s", "NOT AVAIL");
1023 }
1024 ds_put_cstr(reply, "\n");
1025 }
1026 ovs_mutex_unlock(&pmd->port_mutex);
1027 free(list);
1028 }
1029 }
1030
1031 static int
1032 compare_poll_thread_list(const void *a_, const void *b_)
1033 {
1034 const struct dp_netdev_pmd_thread *a, *b;
1035
1036 a = *(struct dp_netdev_pmd_thread **)a_;
1037 b = *(struct dp_netdev_pmd_thread **)b_;
1038
1039 if (a->core_id < b->core_id) {
1040 return -1;
1041 }
1042 if (a->core_id > b->core_id) {
1043 return 1;
1044 }
1045 return 0;
1046 }
1047
1048 /* Create a sorted list of pmd's from the dp->poll_threads cmap. We can use
1049 * this list, as long as we do not go to quiescent state. */
1050 static void
1051 sorted_poll_thread_list(struct dp_netdev *dp,
1052 struct dp_netdev_pmd_thread ***list,
1053 size_t *n)
1054 {
1055 struct dp_netdev_pmd_thread *pmd;
1056 struct dp_netdev_pmd_thread **pmd_list;
1057 size_t k = 0, n_pmds;
1058
1059 n_pmds = cmap_count(&dp->poll_threads);
1060 pmd_list = xcalloc(n_pmds, sizeof *pmd_list);
1061
1062 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
1063 if (k >= n_pmds) {
1064 break;
1065 }
1066 pmd_list[k++] = pmd;
1067 }
1068
1069 qsort(pmd_list, k, sizeof *pmd_list, compare_poll_thread_list);
1070
1071 *list = pmd_list;
1072 *n = k;
1073 }
1074
1075 static void
1076 dpif_netdev_pmd_rebalance(struct unixctl_conn *conn, int argc,
1077 const char *argv[], void *aux OVS_UNUSED)
1078 {
1079 struct ds reply = DS_EMPTY_INITIALIZER;
1080 struct dp_netdev *dp = NULL;
1081
1082 ovs_mutex_lock(&dp_netdev_mutex);
1083
1084 if (argc == 2) {
1085 dp = shash_find_data(&dp_netdevs, argv[1]);
1086 } else if (shash_count(&dp_netdevs) == 1) {
1087 /* There's only one datapath */
1088 dp = shash_first(&dp_netdevs)->data;
1089 }
1090
1091 if (!dp) {
1092 ovs_mutex_unlock(&dp_netdev_mutex);
1093 unixctl_command_reply_error(conn,
1094 "please specify an existing datapath");
1095 return;
1096 }
1097
1098 dp_netdev_request_reconfigure(dp);
1099 ovs_mutex_unlock(&dp_netdev_mutex);
1100 ds_put_cstr(&reply, "pmd rxq rebalance requested.\n");
1101 unixctl_command_reply(conn, ds_cstr(&reply));
1102 ds_destroy(&reply);
1103 }
1104
1105 static void
1106 dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[],
1107 void *aux)
1108 {
1109 struct ds reply = DS_EMPTY_INITIALIZER;
1110 struct dp_netdev_pmd_thread **pmd_list;
1111 struct dp_netdev *dp = NULL;
1112 enum pmd_info_type type = *(enum pmd_info_type *) aux;
1113 unsigned int core_id;
1114 bool filter_on_pmd = false;
1115 size_t n;
1116
1117 ovs_mutex_lock(&dp_netdev_mutex);
1118
1119 while (argc > 1) {
1120 if (!strcmp(argv[1], "-pmd") && argc > 2) {
1121 if (str_to_uint(argv[2], 10, &core_id)) {
1122 filter_on_pmd = true;
1123 }
1124 argc -= 2;
1125 argv += 2;
1126 } else {
1127 dp = shash_find_data(&dp_netdevs, argv[1]);
1128 argc -= 1;
1129 argv += 1;
1130 }
1131 }
1132
1133 if (!dp) {
1134 if (shash_count(&dp_netdevs) == 1) {
1135 /* There's only one datapath */
1136 dp = shash_first(&dp_netdevs)->data;
1137 } else {
1138 ovs_mutex_unlock(&dp_netdev_mutex);
1139 unixctl_command_reply_error(conn,
1140 "please specify an existing datapath");
1141 return;
1142 }
1143 }
1144
1145 sorted_poll_thread_list(dp, &pmd_list, &n);
1146 for (size_t i = 0; i < n; i++) {
1147 struct dp_netdev_pmd_thread *pmd = pmd_list[i];
1148 if (!pmd) {
1149 break;
1150 }
1151 if (filter_on_pmd && pmd->core_id != core_id) {
1152 continue;
1153 }
1154 if (type == PMD_INFO_SHOW_RXQ) {
1155 pmd_info_show_rxq(&reply, pmd);
1156 } else if (type == PMD_INFO_CLEAR_STATS) {
1157 pmd_perf_stats_clear(&pmd->perf_stats);
1158 } else if (type == PMD_INFO_SHOW_STATS) {
1159 pmd_info_show_stats(&reply, pmd);
1160 } else if (type == PMD_INFO_PERF_SHOW) {
1161 pmd_info_show_perf(&reply, pmd, (struct pmd_perf_params *)aux);
1162 }
1163 }
1164 free(pmd_list);
1165
1166 ovs_mutex_unlock(&dp_netdev_mutex);
1167
1168 unixctl_command_reply(conn, ds_cstr(&reply));
1169 ds_destroy(&reply);
1170 }
1171
1172 static void
1173 pmd_perf_show_cmd(struct unixctl_conn *conn, int argc,
1174 const char *argv[],
1175 void *aux OVS_UNUSED)
1176 {
1177 struct pmd_perf_params par;
1178 long int it_hist = 0, ms_hist = 0;
1179 par.histograms = true;
1180
1181 while (argc > 1) {
1182 if (!strcmp(argv[1], "-nh")) {
1183 par.histograms = false;
1184 argc -= 1;
1185 argv += 1;
1186 } else if (!strcmp(argv[1], "-it") && argc > 2) {
1187 it_hist = strtol(argv[2], NULL, 10);
1188 if (it_hist < 0) {
1189 it_hist = 0;
1190 } else if (it_hist > HISTORY_LEN) {
1191 it_hist = HISTORY_LEN;
1192 }
1193 argc -= 2;
1194 argv += 2;
1195 } else if (!strcmp(argv[1], "-ms") && argc > 2) {
1196 ms_hist = strtol(argv[2], NULL, 10);
1197 if (ms_hist < 0) {
1198 ms_hist = 0;
1199 } else if (ms_hist > HISTORY_LEN) {
1200 ms_hist = HISTORY_LEN;
1201 }
1202 argc -= 2;
1203 argv += 2;
1204 } else {
1205 break;
1206 }
1207 }
1208 par.iter_hist_len = it_hist;
1209 par.ms_hist_len = ms_hist;
1210 par.command_type = PMD_INFO_PERF_SHOW;
1211 dpif_netdev_pmd_info(conn, argc, argv, &par);
1212 }
1213 \f
1214 static int
1215 dpif_netdev_init(void)
1216 {
1217 static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS,
1218 clear_aux = PMD_INFO_CLEAR_STATS,
1219 poll_aux = PMD_INFO_SHOW_RXQ;
1220
1221 unixctl_command_register("dpif-netdev/pmd-stats-show", "[-pmd core] [dp]",
1222 0, 3, dpif_netdev_pmd_info,
1223 (void *)&show_aux);
1224 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[-pmd core] [dp]",
1225 0, 3, dpif_netdev_pmd_info,
1226 (void *)&clear_aux);
1227 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[-pmd core] [dp]",
1228 0, 3, dpif_netdev_pmd_info,
1229 (void *)&poll_aux);
1230 unixctl_command_register("dpif-netdev/pmd-perf-show",
1231 "[-nh] [-it iter-history-len]"
1232 " [-ms ms-history-len]"
1233 " [-pmd core] [dp]",
1234 0, 8, pmd_perf_show_cmd,
1235 NULL);
1236 unixctl_command_register("dpif-netdev/pmd-rxq-rebalance", "[dp]",
1237 0, 1, dpif_netdev_pmd_rebalance,
1238 NULL);
1239 unixctl_command_register("dpif-netdev/pmd-perf-log-set",
1240 "on|off [-b before] [-a after] [-e|-ne] "
1241 "[-us usec] [-q qlen]",
1242 0, 10, pmd_perf_log_set_cmd,
1243 NULL);
1244 return 0;
1245 }
1246
1247 static int
1248 dpif_netdev_enumerate(struct sset *all_dps,
1249 const struct dpif_class *dpif_class)
1250 {
1251 struct shash_node *node;
1252
1253 ovs_mutex_lock(&dp_netdev_mutex);
1254 SHASH_FOR_EACH(node, &dp_netdevs) {
1255 struct dp_netdev *dp = node->data;
1256 if (dpif_class != dp->class) {
1257 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
1258 * If the class doesn't match, skip this dpif. */
1259 continue;
1260 }
1261 sset_add(all_dps, node->name);
1262 }
1263 ovs_mutex_unlock(&dp_netdev_mutex);
1264
1265 return 0;
1266 }
1267
1268 static bool
1269 dpif_netdev_class_is_dummy(const struct dpif_class *class)
1270 {
1271 return class != &dpif_netdev_class;
1272 }
1273
1274 static const char *
1275 dpif_netdev_port_open_type(const struct dpif_class *class, const char *type)
1276 {
1277 return strcmp(type, "internal") ? type
1278 : dpif_netdev_class_is_dummy(class) ? "dummy-internal"
1279 : "tap";
1280 }
1281
1282 static struct dpif *
1283 create_dpif_netdev(struct dp_netdev *dp)
1284 {
1285 uint16_t netflow_id = hash_string(dp->name, 0);
1286 struct dpif_netdev *dpif;
1287
1288 ovs_refcount_ref(&dp->ref_cnt);
1289
1290 dpif = xmalloc(sizeof *dpif);
1291 dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id);
1292 dpif->dp = dp;
1293 dpif->last_port_seq = seq_read(dp->port_seq);
1294
1295 return &dpif->dpif;
1296 }
1297
1298 /* Choose an unused, non-zero port number and return it on success.
1299 * Return ODPP_NONE on failure. */
1300 static odp_port_t
1301 choose_port(struct dp_netdev *dp, const char *name)
1302 OVS_REQUIRES(dp->port_mutex)
1303 {
1304 uint32_t port_no;
1305
1306 if (dp->class != &dpif_netdev_class) {
1307 const char *p;
1308 int start_no = 0;
1309
1310 /* If the port name begins with "br", start the number search at
1311 * 100 to make writing tests easier. */
1312 if (!strncmp(name, "br", 2)) {
1313 start_no = 100;
1314 }
1315
1316 /* If the port name contains a number, try to assign that port number.
1317 * This can make writing unit tests easier because port numbers are
1318 * predictable. */
1319 for (p = name; *p != '\0'; p++) {
1320 if (isdigit((unsigned char) *p)) {
1321 port_no = start_no + strtol(p, NULL, 10);
1322 if (port_no > 0 && port_no != odp_to_u32(ODPP_NONE)
1323 && !dp_netdev_lookup_port(dp, u32_to_odp(port_no))) {
1324 return u32_to_odp(port_no);
1325 }
1326 break;
1327 }
1328 }
1329 }
1330
1331 for (port_no = 1; port_no <= UINT16_MAX; port_no++) {
1332 if (!dp_netdev_lookup_port(dp, u32_to_odp(port_no))) {
1333 return u32_to_odp(port_no);
1334 }
1335 }
1336
1337 return ODPP_NONE;
1338 }
1339
1340 static int
1341 create_dp_netdev(const char *name, const struct dpif_class *class,
1342 struct dp_netdev **dpp)
1343 OVS_REQUIRES(dp_netdev_mutex)
1344 {
1345 struct dp_netdev *dp;
1346 int error;
1347
1348 dp = xzalloc(sizeof *dp);
1349 shash_add(&dp_netdevs, name, dp);
1350
1351 *CONST_CAST(const struct dpif_class **, &dp->class) = class;
1352 *CONST_CAST(const char **, &dp->name) = xstrdup(name);
1353 ovs_refcount_init(&dp->ref_cnt);
1354 atomic_flag_clear(&dp->destroyed);
1355
1356 ovs_mutex_init(&dp->port_mutex);
1357 hmap_init(&dp->ports);
1358 dp->port_seq = seq_create();
1359 fat_rwlock_init(&dp->upcall_rwlock);
1360
1361 dp->reconfigure_seq = seq_create();
1362 dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq);
1363
1364 for (int i = 0; i < N_METER_LOCKS; ++i) {
1365 ovs_mutex_init_adaptive(&dp->meter_locks[i]);
1366 }
1367
1368 /* Disable upcalls by default. */
1369 dp_netdev_disable_upcall(dp);
1370 dp->upcall_aux = NULL;
1371 dp->upcall_cb = NULL;
1372
1373 conntrack_init(&dp->conntrack);
1374
1375 atomic_init(&dp->emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN);
1376 atomic_init(&dp->tx_flush_interval, DEFAULT_TX_FLUSH_INTERVAL);
1377
1378 cmap_init(&dp->poll_threads);
1379
1380 ovs_mutex_init(&dp->tx_qid_pool_mutex);
1381 /* We need 1 Tx queue for each possible core + 1 for non-PMD threads. */
1382 dp->tx_qid_pool = id_pool_create(0, ovs_numa_get_n_cores() + 1);
1383
1384 ovs_mutex_init_recursive(&dp->non_pmd_mutex);
1385 ovsthread_key_create(&dp->per_pmd_key, NULL);
1386
1387 ovs_mutex_lock(&dp->port_mutex);
1388 /* non-PMD will be created before all other threads and will
1389 * allocate static_tx_qid = 0. */
1390 dp_netdev_set_nonpmd(dp);
1391
1392 error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
1393 "internal"),
1394 ODPP_LOCAL);
1395 ovs_mutex_unlock(&dp->port_mutex);
1396 if (error) {
1397 dp_netdev_free(dp);
1398 return error;
1399 }
1400
1401 dp->last_tnl_conf_seq = seq_read(tnl_conf_seq);
1402 *dpp = dp;
1403 return 0;
1404 }
1405
1406 static void
1407 dp_netdev_request_reconfigure(struct dp_netdev *dp)
1408 {
1409 seq_change(dp->reconfigure_seq);
1410 }
1411
1412 static bool
1413 dp_netdev_is_reconf_required(struct dp_netdev *dp)
1414 {
1415 return seq_read(dp->reconfigure_seq) != dp->last_reconfigure_seq;
1416 }
1417
1418 static int
1419 dpif_netdev_open(const struct dpif_class *class, const char *name,
1420 bool create, struct dpif **dpifp)
1421 {
1422 struct dp_netdev *dp;
1423 int error;
1424
1425 ovs_mutex_lock(&dp_netdev_mutex);
1426 dp = shash_find_data(&dp_netdevs, name);
1427 if (!dp) {
1428 error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
1429 } else {
1430 error = (dp->class != class ? EINVAL
1431 : create ? EEXIST
1432 : 0);
1433 }
1434 if (!error) {
1435 *dpifp = create_dpif_netdev(dp);
1436 dp->dpif = *dpifp;
1437 }
1438 ovs_mutex_unlock(&dp_netdev_mutex);
1439
1440 return error;
1441 }
1442
1443 static void
1444 dp_netdev_destroy_upcall_lock(struct dp_netdev *dp)
1445 OVS_NO_THREAD_SAFETY_ANALYSIS
1446 {
1447 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
1448 ovs_assert(fat_rwlock_tryrdlock(&dp->upcall_rwlock));
1449
1450 /* Before freeing a lock we should release it */
1451 fat_rwlock_unlock(&dp->upcall_rwlock);
1452 fat_rwlock_destroy(&dp->upcall_rwlock);
1453 }
1454
1455 static void
1456 dp_delete_meter(struct dp_netdev *dp, uint32_t meter_id)
1457 OVS_REQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS])
1458 {
1459 if (dp->meters[meter_id]) {
1460 free(dp->meters[meter_id]);
1461 dp->meters[meter_id] = NULL;
1462 }
1463 }
1464
1465 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
1466 * through the 'dp_netdevs' shash while freeing 'dp'. */
1467 static void
1468 dp_netdev_free(struct dp_netdev *dp)
1469 OVS_REQUIRES(dp_netdev_mutex)
1470 {
1471 struct dp_netdev_port *port, *next;
1472
1473 shash_find_and_delete(&dp_netdevs, dp->name);
1474
1475 ovs_mutex_lock(&dp->port_mutex);
1476 HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
1477 do_del_port(dp, port);
1478 }
1479 ovs_mutex_unlock(&dp->port_mutex);
1480
1481 dp_netdev_destroy_all_pmds(dp, true);
1482 cmap_destroy(&dp->poll_threads);
1483
1484 ovs_mutex_destroy(&dp->tx_qid_pool_mutex);
1485 id_pool_destroy(dp->tx_qid_pool);
1486
1487 ovs_mutex_destroy(&dp->non_pmd_mutex);
1488 ovsthread_key_delete(dp->per_pmd_key);
1489
1490 conntrack_destroy(&dp->conntrack);
1491
1492
1493 seq_destroy(dp->reconfigure_seq);
1494
1495 seq_destroy(dp->port_seq);
1496 hmap_destroy(&dp->ports);
1497 ovs_mutex_destroy(&dp->port_mutex);
1498
1499 /* Upcalls must be disabled at this point */
1500 dp_netdev_destroy_upcall_lock(dp);
1501
1502 int i;
1503
1504 for (i = 0; i < MAX_METERS; ++i) {
1505 meter_lock(dp, i);
1506 dp_delete_meter(dp, i);
1507 meter_unlock(dp, i);
1508 }
1509 for (i = 0; i < N_METER_LOCKS; ++i) {
1510 ovs_mutex_destroy(&dp->meter_locks[i]);
1511 }
1512
1513 free(dp->pmd_cmask);
1514 free(CONST_CAST(char *, dp->name));
1515 free(dp);
1516 }
1517
1518 static void
1519 dp_netdev_unref(struct dp_netdev *dp)
1520 {
1521 if (dp) {
1522 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1523 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1524 ovs_mutex_lock(&dp_netdev_mutex);
1525 if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
1526 dp_netdev_free(dp);
1527 }
1528 ovs_mutex_unlock(&dp_netdev_mutex);
1529 }
1530 }
1531
1532 static void
1533 dpif_netdev_close(struct dpif *dpif)
1534 {
1535 struct dp_netdev *dp = get_dp_netdev(dpif);
1536
1537 dp_netdev_unref(dp);
1538 free(dpif);
1539 }
1540
1541 static int
1542 dpif_netdev_destroy(struct dpif *dpif)
1543 {
1544 struct dp_netdev *dp = get_dp_netdev(dpif);
1545
1546 if (!atomic_flag_test_and_set(&dp->destroyed)) {
1547 if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
1548 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1549 OVS_NOT_REACHED();
1550 }
1551 }
1552
1553 return 0;
1554 }
1555
1556 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1557 * load/store semantics. While the increment is not atomic, the load and
1558 * store operations are, making it impossible to read inconsistent values.
1559 *
1560 * This is used to update thread local stats counters. */
1561 static void
1562 non_atomic_ullong_add(atomic_ullong *var, unsigned long long n)
1563 {
1564 unsigned long long tmp;
1565
1566 atomic_read_relaxed(var, &tmp);
1567 tmp += n;
1568 atomic_store_relaxed(var, tmp);
1569 }
1570
1571 static int
1572 dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
1573 {
1574 struct dp_netdev *dp = get_dp_netdev(dpif);
1575 struct dp_netdev_pmd_thread *pmd;
1576 uint64_t pmd_stats[PMD_N_STATS];
1577
1578 stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0;
1579 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
1580 stats->n_flows += cmap_count(&pmd->flow_table);
1581 pmd_perf_read_counters(&pmd->perf_stats, pmd_stats);
1582 stats->n_hit += pmd_stats[PMD_STAT_EXACT_HIT];
1583 stats->n_hit += pmd_stats[PMD_STAT_MASKED_HIT];
1584 stats->n_missed += pmd_stats[PMD_STAT_MISS];
1585 stats->n_lost += pmd_stats[PMD_STAT_LOST];
1586 }
1587 stats->n_masks = UINT32_MAX;
1588 stats->n_mask_hit = UINT64_MAX;
1589
1590 return 0;
1591 }
1592
1593 static void
1594 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
1595 {
1596 if (pmd->core_id == NON_PMD_CORE_ID) {
1597 ovs_mutex_lock(&pmd->dp->non_pmd_mutex);
1598 ovs_mutex_lock(&pmd->port_mutex);
1599 pmd_load_cached_ports(pmd);
1600 ovs_mutex_unlock(&pmd->port_mutex);
1601 ovs_mutex_unlock(&pmd->dp->non_pmd_mutex);
1602 return;
1603 }
1604
1605 ovs_mutex_lock(&pmd->cond_mutex);
1606 seq_change(pmd->reload_seq);
1607 atomic_store_relaxed(&pmd->reload, true);
1608 ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
1609 ovs_mutex_unlock(&pmd->cond_mutex);
1610 }
1611
1612 static uint32_t
1613 hash_port_no(odp_port_t port_no)
1614 {
1615 return hash_int(odp_to_u32(port_no), 0);
1616 }
1617
1618 static int
1619 port_create(const char *devname, const char *type,
1620 odp_port_t port_no, struct dp_netdev_port **portp)
1621 {
1622 struct netdev_saved_flags *sf;
1623 struct dp_netdev_port *port;
1624 enum netdev_flags flags;
1625 struct netdev *netdev;
1626 int error;
1627
1628 *portp = NULL;
1629
1630 /* Open and validate network device. */
1631 error = netdev_open(devname, type, &netdev);
1632 if (error) {
1633 return error;
1634 }
1635 /* XXX reject non-Ethernet devices */
1636
1637 netdev_get_flags(netdev, &flags);
1638 if (flags & NETDEV_LOOPBACK) {
1639 VLOG_ERR("%s: cannot add a loopback device", devname);
1640 error = EINVAL;
1641 goto out;
1642 }
1643
1644 error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf);
1645 if (error) {
1646 VLOG_ERR("%s: cannot set promisc flag", devname);
1647 goto out;
1648 }
1649
1650 port = xzalloc(sizeof *port);
1651 port->port_no = port_no;
1652 port->netdev = netdev;
1653 port->type = xstrdup(type);
1654 port->sf = sf;
1655 port->need_reconfigure = true;
1656 ovs_mutex_init(&port->txq_used_mutex);
1657
1658 *portp = port;
1659
1660 return 0;
1661
1662 out:
1663 netdev_close(netdev);
1664 return error;
1665 }
1666
1667 static int
1668 do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
1669 odp_port_t port_no)
1670 OVS_REQUIRES(dp->port_mutex)
1671 {
1672 struct dp_netdev_port *port;
1673 int error;
1674
1675 /* Reject devices already in 'dp'. */
1676 if (!get_port_by_name(dp, devname, &port)) {
1677 return EEXIST;
1678 }
1679
1680 error = port_create(devname, type, port_no, &port);
1681 if (error) {
1682 return error;
1683 }
1684
1685 hmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
1686 seq_change(dp->port_seq);
1687
1688 reconfigure_datapath(dp);
1689
1690 return 0;
1691 }
1692
1693 static int
1694 dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
1695 odp_port_t *port_nop)
1696 {
1697 struct dp_netdev *dp = get_dp_netdev(dpif);
1698 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1699 const char *dpif_port;
1700 odp_port_t port_no;
1701 int error;
1702
1703 ovs_mutex_lock(&dp->port_mutex);
1704 dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
1705 if (*port_nop != ODPP_NONE) {
1706 port_no = *port_nop;
1707 error = dp_netdev_lookup_port(dp, *port_nop) ? EBUSY : 0;
1708 } else {
1709 port_no = choose_port(dp, dpif_port);
1710 error = port_no == ODPP_NONE ? EFBIG : 0;
1711 }
1712 if (!error) {
1713 *port_nop = port_no;
1714 error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
1715 }
1716 ovs_mutex_unlock(&dp->port_mutex);
1717
1718 return error;
1719 }
1720
1721 static int
1722 dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
1723 {
1724 struct dp_netdev *dp = get_dp_netdev(dpif);
1725 int error;
1726
1727 ovs_mutex_lock(&dp->port_mutex);
1728 if (port_no == ODPP_LOCAL) {
1729 error = EINVAL;
1730 } else {
1731 struct dp_netdev_port *port;
1732
1733 error = get_port_by_number(dp, port_no, &port);
1734 if (!error) {
1735 do_del_port(dp, port);
1736 }
1737 }
1738 ovs_mutex_unlock(&dp->port_mutex);
1739
1740 return error;
1741 }
1742
1743 static bool
1744 is_valid_port_number(odp_port_t port_no)
1745 {
1746 return port_no != ODPP_NONE;
1747 }
1748
1749 static struct dp_netdev_port *
1750 dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
1751 OVS_REQUIRES(dp->port_mutex)
1752 {
1753 struct dp_netdev_port *port;
1754
1755 HMAP_FOR_EACH_WITH_HASH (port, node, hash_port_no(port_no), &dp->ports) {
1756 if (port->port_no == port_no) {
1757 return port;
1758 }
1759 }
1760 return NULL;
1761 }
1762
1763 static int
1764 get_port_by_number(struct dp_netdev *dp,
1765 odp_port_t port_no, struct dp_netdev_port **portp)
1766 OVS_REQUIRES(dp->port_mutex)
1767 {
1768 if (!is_valid_port_number(port_no)) {
1769 *portp = NULL;
1770 return EINVAL;
1771 } else {
1772 *portp = dp_netdev_lookup_port(dp, port_no);
1773 return *portp ? 0 : ENODEV;
1774 }
1775 }
1776
1777 static void
1778 port_destroy(struct dp_netdev_port *port)
1779 {
1780 if (!port) {
1781 return;
1782 }
1783
1784 netdev_close(port->netdev);
1785 netdev_restore_flags(port->sf);
1786
1787 for (unsigned i = 0; i < port->n_rxq; i++) {
1788 netdev_rxq_close(port->rxqs[i].rx);
1789 }
1790 ovs_mutex_destroy(&port->txq_used_mutex);
1791 free(port->rxq_affinity_list);
1792 free(port->txq_used);
1793 free(port->rxqs);
1794 free(port->type);
1795 free(port);
1796 }
1797
1798 static int
1799 get_port_by_name(struct dp_netdev *dp,
1800 const char *devname, struct dp_netdev_port **portp)
1801 OVS_REQUIRES(dp->port_mutex)
1802 {
1803 struct dp_netdev_port *port;
1804
1805 HMAP_FOR_EACH (port, node, &dp->ports) {
1806 if (!strcmp(netdev_get_name(port->netdev), devname)) {
1807 *portp = port;
1808 return 0;
1809 }
1810 }
1811
1812 /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non
1813 * existing port. */
1814 return ENODEV;
1815 }
1816
1817 /* Returns 'true' if there is a port with pmd netdev. */
1818 static bool
1819 has_pmd_port(struct dp_netdev *dp)
1820 OVS_REQUIRES(dp->port_mutex)
1821 {
1822 struct dp_netdev_port *port;
1823
1824 HMAP_FOR_EACH (port, node, &dp->ports) {
1825 if (netdev_is_pmd(port->netdev)) {
1826 return true;
1827 }
1828 }
1829
1830 return false;
1831 }
1832
1833 static void
1834 do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
1835 OVS_REQUIRES(dp->port_mutex)
1836 {
1837 hmap_remove(&dp->ports, &port->node);
1838 seq_change(dp->port_seq);
1839
1840 reconfigure_datapath(dp);
1841
1842 port_destroy(port);
1843 }
1844
1845 static void
1846 answer_port_query(const struct dp_netdev_port *port,
1847 struct dpif_port *dpif_port)
1848 {
1849 dpif_port->name = xstrdup(netdev_get_name(port->netdev));
1850 dpif_port->type = xstrdup(port->type);
1851 dpif_port->port_no = port->port_no;
1852 }
1853
1854 static int
1855 dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
1856 struct dpif_port *dpif_port)
1857 {
1858 struct dp_netdev *dp = get_dp_netdev(dpif);
1859 struct dp_netdev_port *port;
1860 int error;
1861
1862 ovs_mutex_lock(&dp->port_mutex);
1863 error = get_port_by_number(dp, port_no, &port);
1864 if (!error && dpif_port) {
1865 answer_port_query(port, dpif_port);
1866 }
1867 ovs_mutex_unlock(&dp->port_mutex);
1868
1869 return error;
1870 }
1871
1872 static int
1873 dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
1874 struct dpif_port *dpif_port)
1875 {
1876 struct dp_netdev *dp = get_dp_netdev(dpif);
1877 struct dp_netdev_port *port;
1878 int error;
1879
1880 ovs_mutex_lock(&dp->port_mutex);
1881 error = get_port_by_name(dp, devname, &port);
1882 if (!error && dpif_port) {
1883 answer_port_query(port, dpif_port);
1884 }
1885 ovs_mutex_unlock(&dp->port_mutex);
1886
1887 return error;
1888 }
1889
1890 static void
1891 dp_netdev_flow_free(struct dp_netdev_flow *flow)
1892 {
1893 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow));
1894 free(flow);
1895 }
1896
1897 static void dp_netdev_flow_unref(struct dp_netdev_flow *flow)
1898 {
1899 if (ovs_refcount_unref_relaxed(&flow->ref_cnt) == 1) {
1900 ovsrcu_postpone(dp_netdev_flow_free, flow);
1901 }
1902 }
1903
1904 static uint32_t
1905 dp_netdev_flow_hash(const ovs_u128 *ufid)
1906 {
1907 return ufid->u32[0];
1908 }
1909
1910 static inline struct dpcls *
1911 dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread *pmd,
1912 odp_port_t in_port)
1913 {
1914 struct dpcls *cls;
1915 uint32_t hash = hash_port_no(in_port);
1916 CMAP_FOR_EACH_WITH_HASH (cls, node, hash, &pmd->classifiers) {
1917 if (cls->in_port == in_port) {
1918 /* Port classifier exists already */
1919 return cls;
1920 }
1921 }
1922 return NULL;
1923 }
1924
1925 static inline struct dpcls *
1926 dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread *pmd,
1927 odp_port_t in_port)
1928 OVS_REQUIRES(pmd->flow_mutex)
1929 {
1930 struct dpcls *cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
1931 uint32_t hash = hash_port_no(in_port);
1932
1933 if (!cls) {
1934 /* Create new classifier for in_port */
1935 cls = xmalloc(sizeof(*cls));
1936 dpcls_init(cls);
1937 cls->in_port = in_port;
1938 cmap_insert(&pmd->classifiers, &cls->node, hash);
1939 VLOG_DBG("Creating dpcls %p for in_port %d", cls, in_port);
1940 }
1941 return cls;
1942 }
1943
1944 static void
1945 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd,
1946 struct dp_netdev_flow *flow)
1947 OVS_REQUIRES(pmd->flow_mutex)
1948 {
1949 struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node);
1950 struct dpcls *cls;
1951 odp_port_t in_port = flow->flow.in_port.odp_port;
1952
1953 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
1954 ovs_assert(cls != NULL);
1955 dpcls_remove(cls, &flow->cr);
1956 cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid));
1957 flow->dead = true;
1958
1959 dp_netdev_flow_unref(flow);
1960 }
1961
1962 static void
1963 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd)
1964 {
1965 struct dp_netdev_flow *netdev_flow;
1966
1967 ovs_mutex_lock(&pmd->flow_mutex);
1968 CMAP_FOR_EACH (netdev_flow, node, &pmd->flow_table) {
1969 dp_netdev_pmd_remove_flow(pmd, netdev_flow);
1970 }
1971 ovs_mutex_unlock(&pmd->flow_mutex);
1972 }
1973
1974 static int
1975 dpif_netdev_flow_flush(struct dpif *dpif)
1976 {
1977 struct dp_netdev *dp = get_dp_netdev(dpif);
1978 struct dp_netdev_pmd_thread *pmd;
1979
1980 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
1981 dp_netdev_pmd_flow_flush(pmd);
1982 }
1983
1984 return 0;
1985 }
1986
1987 struct dp_netdev_port_state {
1988 struct hmap_position position;
1989 char *name;
1990 };
1991
1992 static int
1993 dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
1994 {
1995 *statep = xzalloc(sizeof(struct dp_netdev_port_state));
1996 return 0;
1997 }
1998
1999 static int
2000 dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
2001 struct dpif_port *dpif_port)
2002 {
2003 struct dp_netdev_port_state *state = state_;
2004 struct dp_netdev *dp = get_dp_netdev(dpif);
2005 struct hmap_node *node;
2006 int retval;
2007
2008 ovs_mutex_lock(&dp->port_mutex);
2009 node = hmap_at_position(&dp->ports, &state->position);
2010 if (node) {
2011 struct dp_netdev_port *port;
2012
2013 port = CONTAINER_OF(node, struct dp_netdev_port, node);
2014
2015 free(state->name);
2016 state->name = xstrdup(netdev_get_name(port->netdev));
2017 dpif_port->name = state->name;
2018 dpif_port->type = port->type;
2019 dpif_port->port_no = port->port_no;
2020
2021 retval = 0;
2022 } else {
2023 retval = EOF;
2024 }
2025 ovs_mutex_unlock(&dp->port_mutex);
2026
2027 return retval;
2028 }
2029
2030 static int
2031 dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
2032 {
2033 struct dp_netdev_port_state *state = state_;
2034 free(state->name);
2035 free(state);
2036 return 0;
2037 }
2038
2039 static int
2040 dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
2041 {
2042 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
2043 uint64_t new_port_seq;
2044 int error;
2045
2046 new_port_seq = seq_read(dpif->dp->port_seq);
2047 if (dpif->last_port_seq != new_port_seq) {
2048 dpif->last_port_seq = new_port_seq;
2049 error = ENOBUFS;
2050 } else {
2051 error = EAGAIN;
2052 }
2053
2054 return error;
2055 }
2056
2057 static void
2058 dpif_netdev_port_poll_wait(const struct dpif *dpif_)
2059 {
2060 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
2061
2062 seq_wait(dpif->dp->port_seq, dpif->last_port_seq);
2063 }
2064
2065 static struct dp_netdev_flow *
2066 dp_netdev_flow_cast(const struct dpcls_rule *cr)
2067 {
2068 return cr ? CONTAINER_OF(cr, struct dp_netdev_flow, cr) : NULL;
2069 }
2070
2071 static bool dp_netdev_flow_ref(struct dp_netdev_flow *flow)
2072 {
2073 return ovs_refcount_try_ref_rcu(&flow->ref_cnt);
2074 }
2075
2076 /* netdev_flow_key utilities.
2077 *
2078 * netdev_flow_key is basically a miniflow. We use these functions
2079 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
2080 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
2081 *
2082 * - Since we are dealing exclusively with miniflows created by
2083 * miniflow_extract(), if the map is different the miniflow is different.
2084 * Therefore we can be faster by comparing the map and the miniflow in a
2085 * single memcmp().
2086 * - These functions can be inlined by the compiler. */
2087
2088 /* Given the number of bits set in miniflow's maps, returns the size of the
2089 * 'netdev_flow_key.mf' */
2090 static inline size_t
2091 netdev_flow_key_size(size_t flow_u64s)
2092 {
2093 return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s);
2094 }
2095
2096 static inline bool
2097 netdev_flow_key_equal(const struct netdev_flow_key *a,
2098 const struct netdev_flow_key *b)
2099 {
2100 /* 'b->len' may be not set yet. */
2101 return a->hash == b->hash && !memcmp(&a->mf, &b->mf, a->len);
2102 }
2103
2104 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
2105 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
2106 * generated by miniflow_extract. */
2107 static inline bool
2108 netdev_flow_key_equal_mf(const struct netdev_flow_key *key,
2109 const struct miniflow *mf)
2110 {
2111 return !memcmp(&key->mf, mf, key->len);
2112 }
2113
2114 static inline void
2115 netdev_flow_key_clone(struct netdev_flow_key *dst,
2116 const struct netdev_flow_key *src)
2117 {
2118 memcpy(dst, src,
2119 offsetof(struct netdev_flow_key, mf) + src->len);
2120 }
2121
2122 /* Initialize a netdev_flow_key 'mask' from 'match'. */
2123 static inline void
2124 netdev_flow_mask_init(struct netdev_flow_key *mask,
2125 const struct match *match)
2126 {
2127 uint64_t *dst = miniflow_values(&mask->mf);
2128 struct flowmap fmap;
2129 uint32_t hash = 0;
2130 size_t idx;
2131
2132 /* Only check masks that make sense for the flow. */
2133 flow_wc_map(&match->flow, &fmap);
2134 flowmap_init(&mask->mf.map);
2135
2136 FLOWMAP_FOR_EACH_INDEX(idx, fmap) {
2137 uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx);
2138
2139 if (mask_u64) {
2140 flowmap_set(&mask->mf.map, idx, 1);
2141 *dst++ = mask_u64;
2142 hash = hash_add64(hash, mask_u64);
2143 }
2144 }
2145
2146 map_t map;
2147
2148 FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) {
2149 hash = hash_add64(hash, map);
2150 }
2151
2152 size_t n = dst - miniflow_get_values(&mask->mf);
2153
2154 mask->hash = hash_finish(hash, n * 8);
2155 mask->len = netdev_flow_key_size(n);
2156 }
2157
2158 /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
2159 static inline void
2160 netdev_flow_key_init_masked(struct netdev_flow_key *dst,
2161 const struct flow *flow,
2162 const struct netdev_flow_key *mask)
2163 {
2164 uint64_t *dst_u64 = miniflow_values(&dst->mf);
2165 const uint64_t *mask_u64 = miniflow_get_values(&mask->mf);
2166 uint32_t hash = 0;
2167 uint64_t value;
2168
2169 dst->len = mask->len;
2170 dst->mf = mask->mf; /* Copy maps. */
2171
2172 FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) {
2173 *dst_u64 = value & *mask_u64++;
2174 hash = hash_add64(hash, *dst_u64++);
2175 }
2176 dst->hash = hash_finish(hash,
2177 (dst_u64 - miniflow_get_values(&dst->mf)) * 8);
2178 }
2179
2180 /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
2181 #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
2182 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
2183
2184 /* Returns a hash value for the bits of 'key' where there are 1-bits in
2185 * 'mask'. */
2186 static inline uint32_t
2187 netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key,
2188 const struct netdev_flow_key *mask)
2189 {
2190 const uint64_t *p = miniflow_get_values(&mask->mf);
2191 uint32_t hash = 0;
2192 uint64_t value;
2193
2194 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) {
2195 hash = hash_add64(hash, value & *p++);
2196 }
2197
2198 return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8);
2199 }
2200
2201 static inline bool
2202 emc_entry_alive(struct emc_entry *ce)
2203 {
2204 return ce->flow && !ce->flow->dead;
2205 }
2206
2207 static void
2208 emc_clear_entry(struct emc_entry *ce)
2209 {
2210 if (ce->flow) {
2211 dp_netdev_flow_unref(ce->flow);
2212 ce->flow = NULL;
2213 }
2214 }
2215
2216 static inline void
2217 emc_change_entry(struct emc_entry *ce, struct dp_netdev_flow *flow,
2218 const struct netdev_flow_key *key)
2219 {
2220 if (ce->flow != flow) {
2221 if (ce->flow) {
2222 dp_netdev_flow_unref(ce->flow);
2223 }
2224
2225 if (dp_netdev_flow_ref(flow)) {
2226 ce->flow = flow;
2227 } else {
2228 ce->flow = NULL;
2229 }
2230 }
2231 if (key) {
2232 netdev_flow_key_clone(&ce->key, key);
2233 }
2234 }
2235
2236 static inline void
2237 emc_insert(struct emc_cache *cache, const struct netdev_flow_key *key,
2238 struct dp_netdev_flow *flow)
2239 {
2240 struct emc_entry *to_be_replaced = NULL;
2241 struct emc_entry *current_entry;
2242
2243 EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) {
2244 if (netdev_flow_key_equal(&current_entry->key, key)) {
2245 /* We found the entry with the 'mf' miniflow */
2246 emc_change_entry(current_entry, flow, NULL);
2247 return;
2248 }
2249
2250 /* Replacement policy: put the flow in an empty (not alive) entry, or
2251 * in the first entry where it can be */
2252 if (!to_be_replaced
2253 || (emc_entry_alive(to_be_replaced)
2254 && !emc_entry_alive(current_entry))
2255 || current_entry->key.hash < to_be_replaced->key.hash) {
2256 to_be_replaced = current_entry;
2257 }
2258 }
2259 /* We didn't find the miniflow in the cache.
2260 * The 'to_be_replaced' entry is where the new flow will be stored */
2261
2262 emc_change_entry(to_be_replaced, flow, key);
2263 }
2264
2265 static inline void
2266 emc_probabilistic_insert(struct dp_netdev_pmd_thread *pmd,
2267 const struct netdev_flow_key *key,
2268 struct dp_netdev_flow *flow)
2269 {
2270 /* Insert an entry into the EMC based on probability value 'min'. By
2271 * default the value is UINT32_MAX / 100 which yields an insertion
2272 * probability of 1/100 ie. 1% */
2273
2274 uint32_t min;
2275 atomic_read_relaxed(&pmd->dp->emc_insert_min, &min);
2276
2277 if (min && random_uint32() <= min) {
2278 emc_insert(&pmd->flow_cache, key, flow);
2279 }
2280 }
2281
2282 static inline struct dp_netdev_flow *
2283 emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key)
2284 {
2285 struct emc_entry *current_entry;
2286
2287 EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) {
2288 if (current_entry->key.hash == key->hash
2289 && emc_entry_alive(current_entry)
2290 && netdev_flow_key_equal_mf(&current_entry->key, &key->mf)) {
2291
2292 /* We found the entry with the 'key->mf' miniflow */
2293 return current_entry->flow;
2294 }
2295 }
2296
2297 return NULL;
2298 }
2299
2300 static struct dp_netdev_flow *
2301 dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread *pmd,
2302 const struct netdev_flow_key *key,
2303 int *lookup_num_p)
2304 {
2305 struct dpcls *cls;
2306 struct dpcls_rule *rule;
2307 odp_port_t in_port = u32_to_odp(MINIFLOW_GET_U32(&key->mf,
2308 in_port.odp_port));
2309 struct dp_netdev_flow *netdev_flow = NULL;
2310
2311 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
2312 if (OVS_LIKELY(cls)) {
2313 dpcls_lookup(cls, key, &rule, 1, lookup_num_p);
2314 netdev_flow = dp_netdev_flow_cast(rule);
2315 }
2316 return netdev_flow;
2317 }
2318
2319 static struct dp_netdev_flow *
2320 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd,
2321 const ovs_u128 *ufidp, const struct nlattr *key,
2322 size_t key_len)
2323 {
2324 struct dp_netdev_flow *netdev_flow;
2325 struct flow flow;
2326 ovs_u128 ufid;
2327
2328 /* If a UFID is not provided, determine one based on the key. */
2329 if (!ufidp && key && key_len
2330 && !dpif_netdev_flow_from_nlattrs(key, key_len, &flow, false)) {
2331 dpif_flow_hash(pmd->dp->dpif, &flow, sizeof flow, &ufid);
2332 ufidp = &ufid;
2333 }
2334
2335 if (ufidp) {
2336 CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp),
2337 &pmd->flow_table) {
2338 if (ovs_u128_equals(netdev_flow->ufid, *ufidp)) {
2339 return netdev_flow;
2340 }
2341 }
2342 }
2343
2344 return NULL;
2345 }
2346
2347 static void
2348 get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
2349 struct dpif_flow_stats *stats)
2350 {
2351 struct dp_netdev_flow *netdev_flow;
2352 unsigned long long n;
2353 long long used;
2354 uint16_t flags;
2355
2356 netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_);
2357
2358 atomic_read_relaxed(&netdev_flow->stats.packet_count, &n);
2359 stats->n_packets = n;
2360 atomic_read_relaxed(&netdev_flow->stats.byte_count, &n);
2361 stats->n_bytes = n;
2362 atomic_read_relaxed(&netdev_flow->stats.used, &used);
2363 stats->used = used;
2364 atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
2365 stats->tcp_flags = flags;
2366 }
2367
2368 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
2369 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
2370 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
2371 * protect them. */
2372 static void
2373 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
2374 struct ofpbuf *key_buf, struct ofpbuf *mask_buf,
2375 struct dpif_flow *flow, bool terse)
2376 {
2377 if (terse) {
2378 memset(flow, 0, sizeof *flow);
2379 } else {
2380 struct flow_wildcards wc;
2381 struct dp_netdev_actions *actions;
2382 size_t offset;
2383 struct odp_flow_key_parms odp_parms = {
2384 .flow = &netdev_flow->flow,
2385 .mask = &wc.masks,
2386 .support = dp_netdev_support,
2387 };
2388
2389 miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks);
2390 /* in_port is exact matched, but we have left it out from the mask for
2391 * optimnization reasons. Add in_port back to the mask. */
2392 wc.masks.in_port.odp_port = ODPP_NONE;
2393
2394 /* Key */
2395 offset = key_buf->size;
2396 flow->key = ofpbuf_tail(key_buf);
2397 odp_flow_key_from_flow(&odp_parms, key_buf);
2398 flow->key_len = key_buf->size - offset;
2399
2400 /* Mask */
2401 offset = mask_buf->size;
2402 flow->mask = ofpbuf_tail(mask_buf);
2403 odp_parms.key_buf = key_buf;
2404 odp_flow_key_from_mask(&odp_parms, mask_buf);
2405 flow->mask_len = mask_buf->size - offset;
2406
2407 /* Actions */
2408 actions = dp_netdev_flow_get_actions(netdev_flow);
2409 flow->actions = actions->actions;
2410 flow->actions_len = actions->size;
2411 }
2412
2413 flow->ufid = netdev_flow->ufid;
2414 flow->ufid_present = true;
2415 flow->pmd_id = netdev_flow->pmd_id;
2416 get_dpif_flow_stats(netdev_flow, &flow->stats);
2417 }
2418
2419 static int
2420 dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len,
2421 const struct nlattr *mask_key,
2422 uint32_t mask_key_len, const struct flow *flow,
2423 struct flow_wildcards *wc, bool probe)
2424 {
2425 enum odp_key_fitness fitness;
2426
2427 fitness = odp_flow_key_to_mask(mask_key, mask_key_len, wc, flow);
2428 if (fitness) {
2429 if (!probe) {
2430 /* This should not happen: it indicates that
2431 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
2432 * disagree on the acceptable form of a mask. Log the problem
2433 * as an error, with enough details to enable debugging. */
2434 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2435
2436 if (!VLOG_DROP_ERR(&rl)) {
2437 struct ds s;
2438
2439 ds_init(&s);
2440 odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s,
2441 true);
2442 VLOG_ERR("internal error parsing flow mask %s (%s)",
2443 ds_cstr(&s), odp_key_fitness_to_string(fitness));
2444 ds_destroy(&s);
2445 }
2446 }
2447
2448 return EINVAL;
2449 }
2450
2451 return 0;
2452 }
2453
2454 static int
2455 dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len,
2456 struct flow *flow, bool probe)
2457 {
2458 if (odp_flow_key_to_flow(key, key_len, flow)) {
2459 if (!probe) {
2460 /* This should not happen: it indicates that
2461 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
2462 * the acceptable form of a flow. Log the problem as an error,
2463 * with enough details to enable debugging. */
2464 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2465
2466 if (!VLOG_DROP_ERR(&rl)) {
2467 struct ds s;
2468
2469 ds_init(&s);
2470 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
2471 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
2472 ds_destroy(&s);
2473 }
2474 }
2475
2476 return EINVAL;
2477 }
2478
2479 if (flow->ct_state & DP_NETDEV_CS_UNSUPPORTED_MASK) {
2480 return EINVAL;
2481 }
2482
2483 return 0;
2484 }
2485
2486 static int
2487 dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get)
2488 {
2489 struct dp_netdev *dp = get_dp_netdev(dpif);
2490 struct dp_netdev_flow *netdev_flow;
2491 struct dp_netdev_pmd_thread *pmd;
2492 struct hmapx to_find = HMAPX_INITIALIZER(&to_find);
2493 struct hmapx_node *node;
2494 int error = EINVAL;
2495
2496 if (get->pmd_id == PMD_ID_NULL) {
2497 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2498 if (dp_netdev_pmd_try_ref(pmd) && !hmapx_add(&to_find, pmd)) {
2499 dp_netdev_pmd_unref(pmd);
2500 }
2501 }
2502 } else {
2503 pmd = dp_netdev_get_pmd(dp, get->pmd_id);
2504 if (!pmd) {
2505 goto out;
2506 }
2507 hmapx_add(&to_find, pmd);
2508 }
2509
2510 if (!hmapx_count(&to_find)) {
2511 goto out;
2512 }
2513
2514 HMAPX_FOR_EACH (node, &to_find) {
2515 pmd = (struct dp_netdev_pmd_thread *) node->data;
2516 netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key,
2517 get->key_len);
2518 if (netdev_flow) {
2519 dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer,
2520 get->flow, false);
2521 error = 0;
2522 break;
2523 } else {
2524 error = ENOENT;
2525 }
2526 }
2527
2528 HMAPX_FOR_EACH (node, &to_find) {
2529 pmd = (struct dp_netdev_pmd_thread *) node->data;
2530 dp_netdev_pmd_unref(pmd);
2531 }
2532 out:
2533 hmapx_destroy(&to_find);
2534 return error;
2535 }
2536
2537 static struct dp_netdev_flow *
2538 dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
2539 struct match *match, const ovs_u128 *ufid,
2540 const struct nlattr *actions, size_t actions_len)
2541 OVS_REQUIRES(pmd->flow_mutex)
2542 {
2543 struct dp_netdev_flow *flow;
2544 struct netdev_flow_key mask;
2545 struct dpcls *cls;
2546
2547 /* Make sure in_port is exact matched before we read it. */
2548 ovs_assert(match->wc.masks.in_port.odp_port == ODPP_NONE);
2549 odp_port_t in_port = match->flow.in_port.odp_port;
2550
2551 /* As we select the dpcls based on the port number, each netdev flow
2552 * belonging to the same dpcls will have the same odp_port value.
2553 * For performance reasons we wildcard odp_port here in the mask. In the
2554 * typical case dp_hash is also wildcarded, and the resulting 8-byte
2555 * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and
2556 * will not be part of the subtable mask.
2557 * This will speed up the hash computation during dpcls_lookup() because
2558 * there is one less call to hash_add64() in this case. */
2559 match->wc.masks.in_port.odp_port = 0;
2560 netdev_flow_mask_init(&mask, match);
2561 match->wc.masks.in_port.odp_port = ODPP_NONE;
2562
2563 /* Make sure wc does not have metadata. */
2564 ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata)
2565 && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs));
2566
2567 /* Do not allocate extra space. */
2568 flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
2569 memset(&flow->stats, 0, sizeof flow->stats);
2570 flow->dead = false;
2571 flow->batch = NULL;
2572 *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id;
2573 *CONST_CAST(struct flow *, &flow->flow) = match->flow;
2574 *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid;
2575 ovs_refcount_init(&flow->ref_cnt);
2576 ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len));
2577
2578 netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask);
2579
2580 /* Select dpcls for in_port. Relies on in_port to be exact match. */
2581 cls = dp_netdev_pmd_find_dpcls(pmd, in_port);
2582 dpcls_insert(cls, &flow->cr, &mask);
2583
2584 cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node),
2585 dp_netdev_flow_hash(&flow->ufid));
2586
2587 if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl)))) {
2588 struct ds ds = DS_EMPTY_INITIALIZER;
2589 struct ofpbuf key_buf, mask_buf;
2590 struct odp_flow_key_parms odp_parms = {
2591 .flow = &match->flow,
2592 .mask = &match->wc.masks,
2593 .support = dp_netdev_support,
2594 };
2595
2596 ofpbuf_init(&key_buf, 0);
2597 ofpbuf_init(&mask_buf, 0);
2598
2599 odp_flow_key_from_flow(&odp_parms, &key_buf);
2600 odp_parms.key_buf = &key_buf;
2601 odp_flow_key_from_mask(&odp_parms, &mask_buf);
2602
2603 ds_put_cstr(&ds, "flow_add: ");
2604 odp_format_ufid(ufid, &ds);
2605 ds_put_cstr(&ds, " ");
2606 odp_flow_format(key_buf.data, key_buf.size,
2607 mask_buf.data, mask_buf.size,
2608 NULL, &ds, false);
2609 ds_put_cstr(&ds, ", actions:");
2610 format_odp_actions(&ds, actions, actions_len, NULL);
2611
2612 VLOG_DBG("%s", ds_cstr(&ds));
2613
2614 ofpbuf_uninit(&key_buf);
2615 ofpbuf_uninit(&mask_buf);
2616
2617 /* Add a printout of the actual match installed. */
2618 struct match m;
2619 ds_clear(&ds);
2620 ds_put_cstr(&ds, "flow match: ");
2621 miniflow_expand(&flow->cr.flow.mf, &m.flow);
2622 miniflow_expand(&flow->cr.mask->mf, &m.wc.masks);
2623 memset(&m.tun_md, 0, sizeof m.tun_md);
2624 match_format(&m, NULL, &ds, OFP_DEFAULT_PRIORITY);
2625
2626 VLOG_DBG("%s", ds_cstr(&ds));
2627
2628 ds_destroy(&ds);
2629 }
2630
2631 return flow;
2632 }
2633
2634 static int
2635 flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
2636 struct netdev_flow_key *key,
2637 struct match *match,
2638 ovs_u128 *ufid,
2639 const struct dpif_flow_put *put,
2640 struct dpif_flow_stats *stats)
2641 {
2642 struct dp_netdev_flow *netdev_flow;
2643 int error = 0;
2644
2645 if (stats) {
2646 memset(stats, 0, sizeof *stats);
2647 }
2648
2649 ovs_mutex_lock(&pmd->flow_mutex);
2650 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL);
2651 if (!netdev_flow) {
2652 if (put->flags & DPIF_FP_CREATE) {
2653 if (cmap_count(&pmd->flow_table) < MAX_FLOWS) {
2654 dp_netdev_flow_add(pmd, match, ufid, put->actions,
2655 put->actions_len);
2656 error = 0;
2657 } else {
2658 error = EFBIG;
2659 }
2660 } else {
2661 error = ENOENT;
2662 }
2663 } else {
2664 if (put->flags & DPIF_FP_MODIFY) {
2665 struct dp_netdev_actions *new_actions;
2666 struct dp_netdev_actions *old_actions;
2667
2668 new_actions = dp_netdev_actions_create(put->actions,
2669 put->actions_len);
2670
2671 old_actions = dp_netdev_flow_get_actions(netdev_flow);
2672 ovsrcu_set(&netdev_flow->actions, new_actions);
2673
2674 if (stats) {
2675 get_dpif_flow_stats(netdev_flow, stats);
2676 }
2677 if (put->flags & DPIF_FP_ZERO_STATS) {
2678 /* XXX: The userspace datapath uses thread local statistics
2679 * (for flows), which should be updated only by the owning
2680 * thread. Since we cannot write on stats memory here,
2681 * we choose not to support this flag. Please note:
2682 * - This feature is currently used only by dpctl commands with
2683 * option --clear.
2684 * - Should the need arise, this operation can be implemented
2685 * by keeping a base value (to be update here) for each
2686 * counter, and subtracting it before outputting the stats */
2687 error = EOPNOTSUPP;
2688 }
2689
2690 ovsrcu_postpone(dp_netdev_actions_free, old_actions);
2691 } else if (put->flags & DPIF_FP_CREATE) {
2692 error = EEXIST;
2693 } else {
2694 /* Overlapping flow. */
2695 error = EINVAL;
2696 }
2697 }
2698 ovs_mutex_unlock(&pmd->flow_mutex);
2699 return error;
2700 }
2701
2702 static int
2703 dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
2704 {
2705 struct dp_netdev *dp = get_dp_netdev(dpif);
2706 struct netdev_flow_key key, mask;
2707 struct dp_netdev_pmd_thread *pmd;
2708 struct match match;
2709 ovs_u128 ufid;
2710 int error;
2711 bool probe = put->flags & DPIF_FP_PROBE;
2712
2713 if (put->stats) {
2714 memset(put->stats, 0, sizeof *put->stats);
2715 }
2716 error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow,
2717 probe);
2718 if (error) {
2719 return error;
2720 }
2721 error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len,
2722 put->mask, put->mask_len,
2723 &match.flow, &match.wc, probe);
2724 if (error) {
2725 return error;
2726 }
2727
2728 if (put->ufid) {
2729 ufid = *put->ufid;
2730 } else {
2731 dpif_flow_hash(dpif, &match.flow, sizeof match.flow, &ufid);
2732 }
2733
2734 /* Must produce a netdev_flow_key for lookup.
2735 * Use the same method as employed to create the key when adding
2736 * the flow to the dplcs to make sure they match. */
2737 netdev_flow_mask_init(&mask, &match);
2738 netdev_flow_key_init_masked(&key, &match.flow, &mask);
2739
2740 if (put->pmd_id == PMD_ID_NULL) {
2741 if (cmap_count(&dp->poll_threads) == 0) {
2742 return EINVAL;
2743 }
2744 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2745 struct dpif_flow_stats pmd_stats;
2746 int pmd_error;
2747
2748 pmd_error = flow_put_on_pmd(pmd, &key, &match, &ufid, put,
2749 &pmd_stats);
2750 if (pmd_error) {
2751 error = pmd_error;
2752 } else if (put->stats) {
2753 put->stats->n_packets += pmd_stats.n_packets;
2754 put->stats->n_bytes += pmd_stats.n_bytes;
2755 put->stats->used = MAX(put->stats->used, pmd_stats.used);
2756 put->stats->tcp_flags |= pmd_stats.tcp_flags;
2757 }
2758 }
2759 } else {
2760 pmd = dp_netdev_get_pmd(dp, put->pmd_id);
2761 if (!pmd) {
2762 return EINVAL;
2763 }
2764 error = flow_put_on_pmd(pmd, &key, &match, &ufid, put, put->stats);
2765 dp_netdev_pmd_unref(pmd);
2766 }
2767
2768 return error;
2769 }
2770
2771 static int
2772 flow_del_on_pmd(struct dp_netdev_pmd_thread *pmd,
2773 struct dpif_flow_stats *stats,
2774 const struct dpif_flow_del *del)
2775 {
2776 struct dp_netdev_flow *netdev_flow;
2777 int error = 0;
2778
2779 ovs_mutex_lock(&pmd->flow_mutex);
2780 netdev_flow = dp_netdev_pmd_find_flow(pmd, del->ufid, del->key,
2781 del->key_len);
2782 if (netdev_flow) {
2783 if (stats) {
2784 get_dpif_flow_stats(netdev_flow, stats);
2785 }
2786 dp_netdev_pmd_remove_flow(pmd, netdev_flow);
2787 } else {
2788 error = ENOENT;
2789 }
2790 ovs_mutex_unlock(&pmd->flow_mutex);
2791
2792 return error;
2793 }
2794
2795 static int
2796 dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
2797 {
2798 struct dp_netdev *dp = get_dp_netdev(dpif);
2799 struct dp_netdev_pmd_thread *pmd;
2800 int error = 0;
2801
2802 if (del->stats) {
2803 memset(del->stats, 0, sizeof *del->stats);
2804 }
2805
2806 if (del->pmd_id == PMD_ID_NULL) {
2807 if (cmap_count(&dp->poll_threads) == 0) {
2808 return EINVAL;
2809 }
2810 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2811 struct dpif_flow_stats pmd_stats;
2812 int pmd_error;
2813
2814 pmd_error = flow_del_on_pmd(pmd, &pmd_stats, del);
2815 if (pmd_error) {
2816 error = pmd_error;
2817 } else if (del->stats) {
2818 del->stats->n_packets += pmd_stats.n_packets;
2819 del->stats->n_bytes += pmd_stats.n_bytes;
2820 del->stats->used = MAX(del->stats->used, pmd_stats.used);
2821 del->stats->tcp_flags |= pmd_stats.tcp_flags;
2822 }
2823 }
2824 } else {
2825 pmd = dp_netdev_get_pmd(dp, del->pmd_id);
2826 if (!pmd) {
2827 return EINVAL;
2828 }
2829 error = flow_del_on_pmd(pmd, del->stats, del);
2830 dp_netdev_pmd_unref(pmd);
2831 }
2832
2833
2834 return error;
2835 }
2836
2837 struct dpif_netdev_flow_dump {
2838 struct dpif_flow_dump up;
2839 struct cmap_position poll_thread_pos;
2840 struct cmap_position flow_pos;
2841 struct dp_netdev_pmd_thread *cur_pmd;
2842 int status;
2843 struct ovs_mutex mutex;
2844 };
2845
2846 static struct dpif_netdev_flow_dump *
2847 dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump)
2848 {
2849 return CONTAINER_OF(dump, struct dpif_netdev_flow_dump, up);
2850 }
2851
2852 static struct dpif_flow_dump *
2853 dpif_netdev_flow_dump_create(const struct dpif *dpif_, bool terse,
2854 char *type OVS_UNUSED)
2855 {
2856 struct dpif_netdev_flow_dump *dump;
2857
2858 dump = xzalloc(sizeof *dump);
2859 dpif_flow_dump_init(&dump->up, dpif_);
2860 dump->up.terse = terse;
2861 ovs_mutex_init(&dump->mutex);
2862
2863 return &dump->up;
2864 }
2865
2866 static int
2867 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump *dump_)
2868 {
2869 struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
2870
2871 ovs_mutex_destroy(&dump->mutex);
2872 free(dump);
2873 return 0;
2874 }
2875
2876 struct dpif_netdev_flow_dump_thread {
2877 struct dpif_flow_dump_thread up;
2878 struct dpif_netdev_flow_dump *dump;
2879 struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
2880 struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
2881 };
2882
2883 static struct dpif_netdev_flow_dump_thread *
2884 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
2885 {
2886 return CONTAINER_OF(thread, struct dpif_netdev_flow_dump_thread, up);
2887 }
2888
2889 static struct dpif_flow_dump_thread *
2890 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump *dump_)
2891 {
2892 struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
2893 struct dpif_netdev_flow_dump_thread *thread;
2894
2895 thread = xmalloc(sizeof *thread);
2896 dpif_flow_dump_thread_init(&thread->up, &dump->up);
2897 thread->dump = dump;
2898 return &thread->up;
2899 }
2900
2901 static void
2902 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
2903 {
2904 struct dpif_netdev_flow_dump_thread *thread
2905 = dpif_netdev_flow_dump_thread_cast(thread_);
2906
2907 free(thread);
2908 }
2909
2910 static int
2911 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
2912 struct dpif_flow *flows, int max_flows)
2913 {
2914 struct dpif_netdev_flow_dump_thread *thread
2915 = dpif_netdev_flow_dump_thread_cast(thread_);
2916 struct dpif_netdev_flow_dump *dump = thread->dump;
2917 struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH];
2918 int n_flows = 0;
2919 int i;
2920
2921 ovs_mutex_lock(&dump->mutex);
2922 if (!dump->status) {
2923 struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
2924 struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
2925 struct dp_netdev_pmd_thread *pmd = dump->cur_pmd;
2926 int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
2927
2928 /* First call to dump_next(), extracts the first pmd thread.
2929 * If there is no pmd thread, returns immediately. */
2930 if (!pmd) {
2931 pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos);
2932 if (!pmd) {
2933 ovs_mutex_unlock(&dump->mutex);
2934 return n_flows;
2935
2936 }
2937 }
2938
2939 do {
2940 for (n_flows = 0; n_flows < flow_limit; n_flows++) {
2941 struct cmap_node *node;
2942
2943 node = cmap_next_position(&pmd->flow_table, &dump->flow_pos);
2944 if (!node) {
2945 break;
2946 }
2947 netdev_flows[n_flows] = CONTAINER_OF(node,
2948 struct dp_netdev_flow,
2949 node);
2950 }
2951 /* When finishing dumping the current pmd thread, moves to
2952 * the next. */
2953 if (n_flows < flow_limit) {
2954 memset(&dump->flow_pos, 0, sizeof dump->flow_pos);
2955 dp_netdev_pmd_unref(pmd);
2956 pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos);
2957 if (!pmd) {
2958 dump->status = EOF;
2959 break;
2960 }
2961 }
2962 /* Keeps the reference to next caller. */
2963 dump->cur_pmd = pmd;
2964
2965 /* If the current dump is empty, do not exit the loop, since the
2966 * remaining pmds could have flows to be dumped. Just dumps again
2967 * on the new 'pmd'. */
2968 } while (!n_flows);
2969 }
2970 ovs_mutex_unlock(&dump->mutex);
2971
2972 for (i = 0; i < n_flows; i++) {
2973 struct odputil_keybuf *maskbuf = &thread->maskbuf[i];
2974 struct odputil_keybuf *keybuf = &thread->keybuf[i];
2975 struct dp_netdev_flow *netdev_flow = netdev_flows[i];
2976 struct dpif_flow *f = &flows[i];
2977 struct ofpbuf key, mask;
2978
2979 ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
2980 ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
2981 dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f,
2982 dump->up.terse);
2983 }
2984
2985 return n_flows;
2986 }
2987
2988 static int
2989 dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
2990 OVS_NO_THREAD_SAFETY_ANALYSIS
2991 {
2992 struct dp_netdev *dp = get_dp_netdev(dpif);
2993 struct dp_netdev_pmd_thread *pmd;
2994 struct dp_packet_batch pp;
2995
2996 if (dp_packet_size(execute->packet) < ETH_HEADER_LEN ||
2997 dp_packet_size(execute->packet) > UINT16_MAX) {
2998 return EINVAL;
2999 }
3000
3001 /* Tries finding the 'pmd'. If NULL is returned, that means
3002 * the current thread is a non-pmd thread and should use
3003 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
3004 pmd = ovsthread_getspecific(dp->per_pmd_key);
3005 if (!pmd) {
3006 pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
3007 if (!pmd) {
3008 return EBUSY;
3009 }
3010 }
3011
3012 if (execute->probe) {
3013 /* If this is part of a probe, Drop the packet, since executing
3014 * the action may actually cause spurious packets be sent into
3015 * the network. */
3016 if (pmd->core_id == NON_PMD_CORE_ID) {
3017 dp_netdev_pmd_unref(pmd);
3018 }
3019 return 0;
3020 }
3021
3022 /* If the current thread is non-pmd thread, acquires
3023 * the 'non_pmd_mutex'. */
3024 if (pmd->core_id == NON_PMD_CORE_ID) {
3025 ovs_mutex_lock(&dp->non_pmd_mutex);
3026 }
3027
3028 /* Update current time in PMD context. */
3029 pmd_thread_ctx_time_update(pmd);
3030
3031 /* The action processing expects the RSS hash to be valid, because
3032 * it's always initialized at the beginning of datapath processing.
3033 * In this case, though, 'execute->packet' may not have gone through
3034 * the datapath at all, it may have been generated by the upper layer
3035 * (OpenFlow packet-out, BFD frame, ...). */
3036 if (!dp_packet_rss_valid(execute->packet)) {
3037 dp_packet_set_rss_hash(execute->packet,
3038 flow_hash_5tuple(execute->flow, 0));
3039 }
3040
3041 dp_packet_batch_init_packet(&pp, execute->packet);
3042 dp_netdev_execute_actions(pmd, &pp, false, execute->flow,
3043 execute->actions, execute->actions_len);
3044 dp_netdev_pmd_flush_output_packets(pmd, true);
3045
3046 if (pmd->core_id == NON_PMD_CORE_ID) {
3047 ovs_mutex_unlock(&dp->non_pmd_mutex);
3048 dp_netdev_pmd_unref(pmd);
3049 }
3050
3051 return 0;
3052 }
3053
3054 static void
3055 dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
3056 {
3057 size_t i;
3058
3059 for (i = 0; i < n_ops; i++) {
3060 struct dpif_op *op = ops[i];
3061
3062 switch (op->type) {
3063 case DPIF_OP_FLOW_PUT:
3064 op->error = dpif_netdev_flow_put(dpif, &op->flow_put);
3065 break;
3066
3067 case DPIF_OP_FLOW_DEL:
3068 op->error = dpif_netdev_flow_del(dpif, &op->flow_del);
3069 break;
3070
3071 case DPIF_OP_EXECUTE:
3072 op->error = dpif_netdev_execute(dpif, &op->execute);
3073 break;
3074
3075 case DPIF_OP_FLOW_GET:
3076 op->error = dpif_netdev_flow_get(dpif, &op->flow_get);
3077 break;
3078 }
3079 }
3080 }
3081
3082 /* Applies datapath configuration from the database. Some of the changes are
3083 * actually applied in dpif_netdev_run(). */
3084 static int
3085 dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config)
3086 {
3087 struct dp_netdev *dp = get_dp_netdev(dpif);
3088 const char *cmask = smap_get(other_config, "pmd-cpu-mask");
3089 unsigned long long insert_prob =
3090 smap_get_ullong(other_config, "emc-insert-inv-prob",
3091 DEFAULT_EM_FLOW_INSERT_INV_PROB);
3092 uint32_t insert_min, cur_min;
3093 uint32_t tx_flush_interval, cur_tx_flush_interval;
3094
3095 tx_flush_interval = smap_get_int(other_config, "tx-flush-interval",
3096 DEFAULT_TX_FLUSH_INTERVAL);
3097 atomic_read_relaxed(&dp->tx_flush_interval, &cur_tx_flush_interval);
3098 if (tx_flush_interval != cur_tx_flush_interval) {
3099 atomic_store_relaxed(&dp->tx_flush_interval, tx_flush_interval);
3100 VLOG_INFO("Flushing interval for tx queues set to %"PRIu32" us",
3101 tx_flush_interval);
3102 }
3103
3104 if (!nullable_string_is_equal(dp->pmd_cmask, cmask)) {
3105 free(dp->pmd_cmask);
3106 dp->pmd_cmask = nullable_xstrdup(cmask);
3107 dp_netdev_request_reconfigure(dp);
3108 }
3109
3110 atomic_read_relaxed(&dp->emc_insert_min, &cur_min);
3111 if (insert_prob <= UINT32_MAX) {
3112 insert_min = insert_prob == 0 ? 0 : UINT32_MAX / insert_prob;
3113 } else {
3114 insert_min = DEFAULT_EM_FLOW_INSERT_MIN;
3115 insert_prob = DEFAULT_EM_FLOW_INSERT_INV_PROB;
3116 }
3117
3118 if (insert_min != cur_min) {
3119 atomic_store_relaxed(&dp->emc_insert_min, insert_min);
3120 if (insert_min == 0) {
3121 VLOG_INFO("EMC has been disabled");
3122 } else {
3123 VLOG_INFO("EMC insertion probability changed to 1/%llu (~%.2f%%)",
3124 insert_prob, (100 / (float)insert_prob));
3125 }
3126 }
3127
3128 bool perf_enabled = smap_get_bool(other_config, "pmd-perf-metrics", false);
3129 bool cur_perf_enabled;
3130 atomic_read_relaxed(&dp->pmd_perf_metrics, &cur_perf_enabled);
3131 if (perf_enabled != cur_perf_enabled) {
3132 atomic_store_relaxed(&dp->pmd_perf_metrics, perf_enabled);
3133 if (perf_enabled) {
3134 VLOG_INFO("PMD performance metrics collection enabled");
3135 } else {
3136 VLOG_INFO("PMD performance metrics collection disabled");
3137 }
3138 }
3139
3140 return 0;
3141 }
3142
3143 /* Parses affinity list and returns result in 'core_ids'. */
3144 static int
3145 parse_affinity_list(const char *affinity_list, unsigned *core_ids, int n_rxq)
3146 {
3147 unsigned i;
3148 char *list, *copy, *key, *value;
3149 int error = 0;
3150
3151 for (i = 0; i < n_rxq; i++) {
3152 core_ids[i] = OVS_CORE_UNSPEC;
3153 }
3154
3155 if (!affinity_list) {
3156 return 0;
3157 }
3158
3159 list = copy = xstrdup(affinity_list);
3160
3161 while (ofputil_parse_key_value(&list, &key, &value)) {
3162 int rxq_id, core_id;
3163
3164 if (!str_to_int(key, 0, &rxq_id) || rxq_id < 0
3165 || !str_to_int(value, 0, &core_id) || core_id < 0) {
3166 error = EINVAL;
3167 break;
3168 }
3169
3170 if (rxq_id < n_rxq) {
3171 core_ids[rxq_id] = core_id;
3172 }
3173 }
3174
3175 free(copy);
3176 return error;
3177 }
3178
3179 /* Parses 'affinity_list' and applies configuration if it is valid. */
3180 static int
3181 dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port *port,
3182 const char *affinity_list)
3183 {
3184 unsigned *core_ids, i;
3185 int error = 0;
3186
3187 core_ids = xmalloc(port->n_rxq * sizeof *core_ids);
3188 if (parse_affinity_list(affinity_list, core_ids, port->n_rxq)) {
3189 error = EINVAL;
3190 goto exit;
3191 }
3192
3193 for (i = 0; i < port->n_rxq; i++) {
3194 port->rxqs[i].core_id = core_ids[i];
3195 }
3196
3197 exit:
3198 free(core_ids);
3199 return error;
3200 }
3201
3202 /* Changes the affinity of port's rx queues. The changes are actually applied
3203 * in dpif_netdev_run(). */
3204 static int
3205 dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
3206 const struct smap *cfg)
3207 {
3208 struct dp_netdev *dp = get_dp_netdev(dpif);
3209 struct dp_netdev_port *port;
3210 int error = 0;
3211 const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity");
3212
3213 ovs_mutex_lock(&dp->port_mutex);
3214 error = get_port_by_number(dp, port_no, &port);
3215 if (error || !netdev_is_pmd(port->netdev)
3216 || nullable_string_is_equal(affinity_list, port->rxq_affinity_list)) {
3217 goto unlock;
3218 }
3219
3220 error = dpif_netdev_port_set_rxq_affinity(port, affinity_list);
3221 if (error) {
3222 goto unlock;
3223 }
3224 free(port->rxq_affinity_list);
3225 port->rxq_affinity_list = nullable_xstrdup(affinity_list);
3226
3227 dp_netdev_request_reconfigure(dp);
3228 unlock:
3229 ovs_mutex_unlock(&dp->port_mutex);
3230 return error;
3231 }
3232
3233 static int
3234 dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
3235 uint32_t queue_id, uint32_t *priority)
3236 {
3237 *priority = queue_id;
3238 return 0;
3239 }
3240
3241 \f
3242 /* Creates and returns a new 'struct dp_netdev_actions', whose actions are
3243 * a copy of the 'size' bytes of 'actions' input parameters. */
3244 struct dp_netdev_actions *
3245 dp_netdev_actions_create(const struct nlattr *actions, size_t size)
3246 {
3247 struct dp_netdev_actions *netdev_actions;
3248
3249 netdev_actions = xmalloc(sizeof *netdev_actions + size);
3250 memcpy(netdev_actions->actions, actions, size);
3251 netdev_actions->size = size;
3252
3253 return netdev_actions;
3254 }
3255
3256 struct dp_netdev_actions *
3257 dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow)
3258 {
3259 return ovsrcu_get(struct dp_netdev_actions *, &flow->actions);
3260 }
3261
3262 static void
3263 dp_netdev_actions_free(struct dp_netdev_actions *actions)
3264 {
3265 free(actions);
3266 }
3267 \f
3268 static void
3269 dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
3270 enum rxq_cycles_counter_type type,
3271 unsigned long long cycles)
3272 {
3273 atomic_store_relaxed(&rx->cycles[type], cycles);
3274 }
3275
3276 static void
3277 dp_netdev_rxq_add_cycles(struct dp_netdev_rxq *rx,
3278 enum rxq_cycles_counter_type type,
3279 unsigned long long cycles)
3280 {
3281 non_atomic_ullong_add(&rx->cycles[type], cycles);
3282 }
3283
3284 static uint64_t
3285 dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
3286 enum rxq_cycles_counter_type type)
3287 {
3288 unsigned long long processing_cycles;
3289 atomic_read_relaxed(&rx->cycles[type], &processing_cycles);
3290 return processing_cycles;
3291 }
3292
3293 static void
3294 dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
3295 unsigned long long cycles)
3296 {
3297 unsigned int idx = rx->intrvl_idx++ % PMD_RXQ_INTERVAL_MAX;
3298 atomic_store_relaxed(&rx->cycles_intrvl[idx], cycles);
3299 }
3300
3301 static uint64_t
3302 dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx)
3303 {
3304 unsigned long long processing_cycles;
3305 atomic_read_relaxed(&rx->cycles_intrvl[idx], &processing_cycles);
3306 return processing_cycles;
3307 }
3308
3309 #if ATOMIC_ALWAYS_LOCK_FREE_8B
3310 static inline bool
3311 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread *pmd)
3312 {
3313 bool pmd_perf_enabled;
3314 atomic_read_relaxed(&pmd->dp->pmd_perf_metrics, &pmd_perf_enabled);
3315 return pmd_perf_enabled;
3316 }
3317 #else
3318 /* If stores and reads of 64-bit integers are not atomic, the full PMD
3319 * performance metrics are not available as locked access to 64 bit
3320 * integers would be prohibitively expensive. */
3321 static inline bool
3322 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread *pmd OVS_UNUSED)
3323 {
3324 return false;
3325 }
3326 #endif
3327
3328 static int
3329 dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread *pmd,
3330 struct tx_port *p)
3331 {
3332 int i;
3333 int tx_qid;
3334 int output_cnt;
3335 bool dynamic_txqs;
3336 struct cycle_timer timer;
3337 uint64_t cycles;
3338 uint32_t tx_flush_interval;
3339
3340 cycle_timer_start(&pmd->perf_stats, &timer);
3341
3342 dynamic_txqs = p->port->dynamic_txqs;
3343 if (dynamic_txqs) {
3344 tx_qid = dpif_netdev_xps_get_tx_qid(pmd, p);
3345 } else {
3346 tx_qid = pmd->static_tx_qid;
3347 }
3348
3349 output_cnt = dp_packet_batch_size(&p->output_pkts);
3350 ovs_assert(output_cnt > 0);
3351
3352 netdev_send(p->port->netdev, tx_qid, &p->output_pkts, dynamic_txqs);
3353 dp_packet_batch_init(&p->output_pkts);
3354
3355 /* Update time of the next flush. */
3356 atomic_read_relaxed(&pmd->dp->tx_flush_interval, &tx_flush_interval);
3357 p->flush_time = pmd->ctx.now + tx_flush_interval;
3358
3359 ovs_assert(pmd->n_output_batches > 0);
3360 pmd->n_output_batches--;
3361
3362 pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SENT_PKTS, output_cnt);
3363 pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SENT_BATCHES, 1);
3364
3365 /* Distribute send cycles evenly among transmitted packets and assign to
3366 * their respective rx queues. */
3367 cycles = cycle_timer_stop(&pmd->perf_stats, &timer) / output_cnt;
3368 for (i = 0; i < output_cnt; i++) {
3369 if (p->output_pkts_rxqs[i]) {
3370 dp_netdev_rxq_add_cycles(p->output_pkts_rxqs[i],
3371 RXQ_CYCLES_PROC_CURR, cycles);
3372 }
3373 }
3374
3375 return output_cnt;
3376 }
3377
3378 static int
3379 dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread *pmd,
3380 bool force)
3381 {
3382 struct tx_port *p;
3383 int output_cnt = 0;
3384
3385 if (!pmd->n_output_batches) {
3386 return 0;
3387 }
3388
3389 HMAP_FOR_EACH (p, node, &pmd->send_port_cache) {
3390 if (!dp_packet_batch_is_empty(&p->output_pkts)
3391 && (force || pmd->ctx.now >= p->flush_time)) {
3392 output_cnt += dp_netdev_pmd_flush_output_on_port(pmd, p);
3393 }
3394 }
3395 return output_cnt;
3396 }
3397
3398 static int
3399 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
3400 struct dp_netdev_rxq *rxq,
3401 odp_port_t port_no)
3402 {
3403 struct pmd_perf_stats *s = &pmd->perf_stats;
3404 struct dp_packet_batch batch;
3405 struct cycle_timer timer;
3406 int error;
3407 int batch_cnt = 0;
3408 int rem_qlen = 0, *qlen_p = NULL;
3409 uint64_t cycles;
3410
3411 /* Measure duration for polling and processing rx burst. */
3412 cycle_timer_start(&pmd->perf_stats, &timer);
3413
3414 pmd->ctx.last_rxq = rxq;
3415 dp_packet_batch_init(&batch);
3416
3417 /* Fetch the rx queue length only for vhostuser ports. */
3418 if (pmd_perf_metrics_enabled(pmd) && rxq->is_vhost) {
3419 qlen_p = &rem_qlen;
3420 }
3421
3422 error = netdev_rxq_recv(rxq->rx, &batch, qlen_p);
3423 if (!error) {
3424 /* At least one packet received. */
3425 *recirc_depth_get() = 0;
3426 pmd_thread_ctx_time_update(pmd);
3427 batch_cnt = batch.count;
3428 if (pmd_perf_metrics_enabled(pmd)) {
3429 /* Update batch histogram. */
3430 s->current.batches++;
3431 histogram_add_sample(&s->pkts_per_batch, batch_cnt);
3432 /* Update the maximum vhost rx queue fill level. */
3433 if (rxq->is_vhost && rem_qlen >= 0) {
3434 uint32_t qfill = batch_cnt + rem_qlen;
3435 if (qfill > s->current.max_vhost_qfill) {
3436 s->current.max_vhost_qfill = qfill;
3437 }
3438 }
3439 }
3440 /* Process packet batch. */
3441 dp_netdev_input(pmd, &batch, port_no);
3442
3443 /* Assign processing cycles to rx queue. */
3444 cycles = cycle_timer_stop(&pmd->perf_stats, &timer);
3445 dp_netdev_rxq_add_cycles(rxq, RXQ_CYCLES_PROC_CURR, cycles);
3446
3447 dp_netdev_pmd_flush_output_packets(pmd, false);
3448 } else {
3449 /* Discard cycles. */
3450 cycle_timer_stop(&pmd->perf_stats, &timer);
3451 if (error != EAGAIN && error != EOPNOTSUPP) {
3452 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3453
3454 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
3455 netdev_rxq_get_name(rxq->rx), ovs_strerror(error));
3456 }
3457 }
3458
3459 pmd->ctx.last_rxq = NULL;
3460
3461 return batch_cnt;
3462 }
3463
3464 static struct tx_port *
3465 tx_port_lookup(const struct hmap *hmap, odp_port_t port_no)
3466 {
3467 struct tx_port *tx;
3468
3469 HMAP_FOR_EACH_IN_BUCKET (tx, node, hash_port_no(port_no), hmap) {
3470 if (tx->port->port_no == port_no) {
3471 return tx;
3472 }
3473 }
3474
3475 return NULL;
3476 }
3477
3478 static int
3479 port_reconfigure(struct dp_netdev_port *port)
3480 {
3481 struct netdev *netdev = port->netdev;
3482 int i, err;
3483
3484 /* Closes the existing 'rxq's. */
3485 for (i = 0; i < port->n_rxq; i++) {
3486 netdev_rxq_close(port->rxqs[i].rx);
3487 port->rxqs[i].rx = NULL;
3488 }
3489 unsigned last_nrxq = port->n_rxq;
3490 port->n_rxq = 0;
3491
3492 /* Allows 'netdev' to apply the pending configuration changes. */
3493 if (netdev_is_reconf_required(netdev) || port->need_reconfigure) {
3494 err = netdev_reconfigure(netdev);
3495 if (err && (err != EOPNOTSUPP)) {
3496 VLOG_ERR("Failed to set interface %s new configuration",
3497 netdev_get_name(netdev));
3498 return err;
3499 }
3500 }
3501 /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */
3502 port->rxqs = xrealloc(port->rxqs,
3503 sizeof *port->rxqs * netdev_n_rxq(netdev));
3504 /* Realloc 'used' counters for tx queues. */
3505 free(port->txq_used);
3506 port->txq_used = xcalloc(netdev_n_txq(netdev), sizeof *port->txq_used);
3507
3508 for (i = 0; i < netdev_n_rxq(netdev); i++) {
3509 bool new_queue = i >= last_nrxq;
3510 if (new_queue) {
3511 memset(&port->rxqs[i], 0, sizeof port->rxqs[i]);
3512 }
3513
3514 port->rxqs[i].port = port;
3515 port->rxqs[i].is_vhost = !strncmp(port->type, "dpdkvhost", 9);
3516
3517 err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
3518 if (err) {
3519 return err;
3520 }
3521 port->n_rxq++;
3522 }
3523
3524 /* Parse affinity list to apply configuration for new queues. */
3525 dpif_netdev_port_set_rxq_affinity(port, port->rxq_affinity_list);
3526
3527 /* If reconfiguration was successful mark it as such, so we can use it */
3528 port->need_reconfigure = false;
3529
3530 return 0;
3531 }
3532
3533 struct rr_numa_list {
3534 struct hmap numas; /* Contains 'struct rr_numa' */
3535 };
3536
3537 struct rr_numa {
3538 struct hmap_node node;
3539
3540 int numa_id;
3541
3542 /* Non isolated pmds on numa node 'numa_id' */
3543 struct dp_netdev_pmd_thread **pmds;
3544 int n_pmds;
3545
3546 int cur_index;
3547 bool idx_inc;
3548 };
3549
3550 static struct rr_numa *
3551 rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id)
3552 {
3553 struct rr_numa *numa;
3554
3555 HMAP_FOR_EACH_WITH_HASH (numa, node, hash_int(numa_id, 0), &rr->numas) {
3556 if (numa->numa_id == numa_id) {
3557 return numa;
3558 }
3559 }
3560
3561 return NULL;
3562 }
3563
3564 /* Returns the next node in numa list following 'numa' in round-robin fashion.
3565 * Returns first node if 'numa' is a null pointer or the last node in 'rr'.
3566 * Returns NULL if 'rr' numa list is empty. */
3567 static struct rr_numa *
3568 rr_numa_list_next(struct rr_numa_list *rr, const struct rr_numa *numa)
3569 {
3570 struct hmap_node *node = NULL;
3571
3572 if (numa) {
3573 node = hmap_next(&rr->numas, &numa->node);
3574 }
3575 if (!node) {
3576 node = hmap_first(&rr->numas);
3577 }
3578
3579 return (node) ? CONTAINER_OF(node, struct rr_numa, node) : NULL;
3580 }
3581
3582 static void
3583 rr_numa_list_populate(struct dp_netdev *dp, struct rr_numa_list *rr)
3584 {
3585 struct dp_netdev_pmd_thread *pmd;
3586 struct rr_numa *numa;
3587
3588 hmap_init(&rr->numas);
3589
3590 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3591 if (pmd->core_id == NON_PMD_CORE_ID || pmd->isolated) {
3592 continue;
3593 }
3594
3595 numa = rr_numa_list_lookup(rr, pmd->numa_id);
3596 if (!numa) {
3597 numa = xzalloc(sizeof *numa);
3598 numa->numa_id = pmd->numa_id;
3599 hmap_insert(&rr->numas, &numa->node, hash_int(pmd->numa_id, 0));
3600 }
3601 numa->n_pmds++;
3602 numa->pmds = xrealloc(numa->pmds, numa->n_pmds * sizeof *numa->pmds);
3603 numa->pmds[numa->n_pmds - 1] = pmd;
3604 /* At least one pmd so initialise curr_idx and idx_inc. */
3605 numa->cur_index = 0;
3606 numa->idx_inc = true;
3607 }
3608 }
3609
3610 /* Returns the next pmd from the numa node in
3611 * incrementing or decrementing order. */
3612 static struct dp_netdev_pmd_thread *
3613 rr_numa_get_pmd(struct rr_numa *numa)
3614 {
3615 int numa_idx = numa->cur_index;
3616
3617 if (numa->idx_inc == true) {
3618 /* Incrementing through list of pmds. */
3619 if (numa->cur_index == numa->n_pmds-1) {
3620 /* Reached the last pmd. */
3621 numa->idx_inc = false;
3622 } else {
3623 numa->cur_index++;
3624 }
3625 } else {
3626 /* Decrementing through list of pmds. */
3627 if (numa->cur_index == 0) {
3628 /* Reached the first pmd. */
3629 numa->idx_inc = true;
3630 } else {
3631 numa->cur_index--;
3632 }
3633 }
3634 return numa->pmds[numa_idx];
3635 }
3636
3637 static void
3638 rr_numa_list_destroy(struct rr_numa_list *rr)
3639 {
3640 struct rr_numa *numa;
3641
3642 HMAP_FOR_EACH_POP (numa, node, &rr->numas) {
3643 free(numa->pmds);
3644 free(numa);
3645 }
3646 hmap_destroy(&rr->numas);
3647 }
3648
3649 /* Sort Rx Queues by the processing cycles they are consuming. */
3650 static int
3651 compare_rxq_cycles(const void *a, const void *b)
3652 {
3653 struct dp_netdev_rxq *qa;
3654 struct dp_netdev_rxq *qb;
3655 uint64_t cycles_qa, cycles_qb;
3656
3657 qa = *(struct dp_netdev_rxq **) a;
3658 qb = *(struct dp_netdev_rxq **) b;
3659
3660 cycles_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
3661 cycles_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
3662
3663 if (cycles_qa != cycles_qb) {
3664 return (cycles_qa < cycles_qb) ? 1 : -1;
3665 } else {
3666 /* Cycles are the same so tiebreak on port/queue id.
3667 * Tiebreaking (as opposed to return 0) ensures consistent
3668 * sort results across multiple OS's. */
3669 uint32_t port_qa = odp_to_u32(qa->port->port_no);
3670 uint32_t port_qb = odp_to_u32(qb->port->port_no);
3671 if (port_qa != port_qb) {
3672 return port_qa > port_qb ? 1 : -1;
3673 } else {
3674 return netdev_rxq_get_queue_id(qa->rx)
3675 - netdev_rxq_get_queue_id(qb->rx);
3676 }
3677 }
3678 }
3679
3680 /* Assign pmds to queues. If 'pinned' is true, assign pmds to pinned
3681 * queues and marks the pmds as isolated. Otherwise, assign non isolated
3682 * pmds to unpinned queues.
3683 *
3684 * If 'pinned' is false queues will be sorted by processing cycles they are
3685 * consuming and then assigned to pmds in round robin order.
3686 *
3687 * The function doesn't touch the pmd threads, it just stores the assignment
3688 * in the 'pmd' member of each rxq. */
3689 static void
3690 rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
3691 {
3692 struct dp_netdev_port *port;
3693 struct rr_numa_list rr;
3694 struct rr_numa *non_local_numa = NULL;
3695 struct dp_netdev_rxq ** rxqs = NULL;
3696 int n_rxqs = 0;
3697 struct rr_numa *numa = NULL;
3698 int numa_id;
3699
3700 HMAP_FOR_EACH (port, node, &dp->ports) {
3701 if (!netdev_is_pmd(port->netdev)) {
3702 continue;
3703 }
3704
3705 for (int qid = 0; qid < port->n_rxq; qid++) {
3706 struct dp_netdev_rxq *q = &port->rxqs[qid];
3707
3708 if (pinned && q->core_id != OVS_CORE_UNSPEC) {
3709 struct dp_netdev_pmd_thread *pmd;
3710
3711 pmd = dp_netdev_get_pmd(dp, q->core_id);
3712 if (!pmd) {
3713 VLOG_WARN("There is no PMD thread on core %d. Queue "
3714 "%d on port \'%s\' will not be polled.",
3715 q->core_id, qid, netdev_get_name(port->netdev));
3716 } else {
3717 q->pmd = pmd;
3718 pmd->isolated = true;
3719 dp_netdev_pmd_unref(pmd);
3720 }
3721 } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
3722 uint64_t cycle_hist = 0;
3723
3724 if (n_rxqs == 0) {
3725 rxqs = xmalloc(sizeof *rxqs);
3726 } else {
3727 rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1));
3728 }
3729 /* Sum the queue intervals and store the cycle history. */
3730 for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
3731 cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i);
3732 }
3733 dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST, cycle_hist);
3734
3735 /* Store the queue. */
3736 rxqs[n_rxqs++] = q;
3737 }
3738 }
3739 }
3740
3741 if (n_rxqs > 1) {
3742 /* Sort the queues in order of the processing cycles
3743 * they consumed during their last pmd interval. */
3744 qsort(rxqs, n_rxqs, sizeof *rxqs, compare_rxq_cycles);
3745 }
3746
3747 rr_numa_list_populate(dp, &rr);
3748 /* Assign the sorted queues to pmds in round robin. */
3749 for (int i = 0; i < n_rxqs; i++) {
3750 numa_id = netdev_get_numa_id(rxqs[i]->port->netdev);
3751 numa = rr_numa_list_lookup(&rr, numa_id);
3752 if (!numa) {
3753 /* There are no pmds on the queue's local NUMA node.
3754 Round robin on the NUMA nodes that do have pmds. */
3755 non_local_numa = rr_numa_list_next(&rr, non_local_numa);
3756 if (!non_local_numa) {
3757 VLOG_ERR("There is no available (non-isolated) pmd "
3758 "thread for port \'%s\' queue %d. This queue "
3759 "will not be polled. Is pmd-cpu-mask set to "
3760 "zero? Or are all PMDs isolated to other "
3761 "queues?", netdev_rxq_get_name(rxqs[i]->rx),
3762 netdev_rxq_get_queue_id(rxqs[i]->rx));
3763 continue;
3764 }
3765 rxqs[i]->pmd = rr_numa_get_pmd(non_local_numa);
3766 VLOG_WARN("There's no available (non-isolated) pmd thread "
3767 "on numa node %d. Queue %d on port \'%s\' will "
3768 "be assigned to the pmd on core %d "
3769 "(numa node %d). Expect reduced performance.",
3770 numa_id, netdev_rxq_get_queue_id(rxqs[i]->rx),
3771 netdev_rxq_get_name(rxqs[i]->rx),
3772 rxqs[i]->pmd->core_id, rxqs[i]->pmd->numa_id);
3773 } else {
3774 rxqs[i]->pmd = rr_numa_get_pmd(numa);
3775 VLOG_INFO("Core %d on numa node %d assigned port \'%s\' "
3776 "rx queue %d (measured processing cycles %"PRIu64").",
3777 rxqs[i]->pmd->core_id, numa_id,
3778 netdev_rxq_get_name(rxqs[i]->rx),
3779 netdev_rxq_get_queue_id(rxqs[i]->rx),
3780 dp_netdev_rxq_get_cycles(rxqs[i], RXQ_CYCLES_PROC_HIST));
3781 }
3782 }
3783
3784 rr_numa_list_destroy(&rr);
3785 free(rxqs);
3786 }
3787
3788 static void
3789 reload_affected_pmds(struct dp_netdev *dp)
3790 {
3791 struct dp_netdev_pmd_thread *pmd;
3792
3793 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3794 if (pmd->need_reload) {
3795 dp_netdev_reload_pmd__(pmd);
3796 pmd->need_reload = false;
3797 }
3798 }
3799 }
3800
3801 static void
3802 reconfigure_pmd_threads(struct dp_netdev *dp)
3803 OVS_REQUIRES(dp->port_mutex)
3804 {
3805 struct dp_netdev_pmd_thread *pmd;
3806 struct ovs_numa_dump *pmd_cores;
3807 struct ovs_numa_info_core *core;
3808 struct hmapx to_delete = HMAPX_INITIALIZER(&to_delete);
3809 struct hmapx_node *node;
3810 bool changed = false;
3811 bool need_to_adjust_static_tx_qids = false;
3812
3813 /* The pmd threads should be started only if there's a pmd port in the
3814 * datapath. If the user didn't provide any "pmd-cpu-mask", we start
3815 * NR_PMD_THREADS per numa node. */
3816 if (!has_pmd_port(dp)) {
3817 pmd_cores = ovs_numa_dump_n_cores_per_numa(0);
3818 } else if (dp->pmd_cmask && dp->pmd_cmask[0]) {
3819 pmd_cores = ovs_numa_dump_cores_with_cmask(dp->pmd_cmask);
3820 } else {
3821 pmd_cores = ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS);
3822 }
3823
3824 /* We need to adjust 'static_tx_qid's only if we're reducing number of
3825 * PMD threads. Otherwise, new threads will allocate all the freed ids. */
3826 if (ovs_numa_dump_count(pmd_cores) < cmap_count(&dp->poll_threads) - 1) {
3827 /* Adjustment is required to keep 'static_tx_qid's sequential and
3828 * avoid possible issues, for example, imbalanced tx queue usage
3829 * and unnecessary locking caused by remapping on netdev level. */
3830 need_to_adjust_static_tx_qids = true;
3831 }
3832
3833 /* Check for unwanted pmd threads */
3834 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3835 if (pmd->core_id == NON_PMD_CORE_ID) {
3836 continue;
3837 }
3838 if (!ovs_numa_dump_contains_core(pmd_cores, pmd->numa_id,
3839 pmd->core_id)) {
3840 hmapx_add(&to_delete, pmd);
3841 } else if (need_to_adjust_static_tx_qids) {
3842 pmd->need_reload = true;
3843 }
3844 }
3845
3846 HMAPX_FOR_EACH (node, &to_delete) {
3847 pmd = (struct dp_netdev_pmd_thread *) node->data;
3848 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d destroyed.",
3849 pmd->numa_id, pmd->core_id);
3850 dp_netdev_del_pmd(dp, pmd);
3851 }
3852 changed = !hmapx_is_empty(&to_delete);
3853 hmapx_destroy(&to_delete);
3854
3855 if (need_to_adjust_static_tx_qids) {
3856 /* 'static_tx_qid's are not sequential now.
3857 * Reload remaining threads to fix this. */
3858 reload_affected_pmds(dp);
3859 }
3860
3861 /* Check for required new pmd threads */
3862 FOR_EACH_CORE_ON_DUMP(core, pmd_cores) {
3863 pmd = dp_netdev_get_pmd(dp, core->core_id);
3864 if (!pmd) {
3865 pmd = xzalloc(sizeof *pmd);
3866 dp_netdev_configure_pmd(pmd, dp, core->core_id, core->numa_id);
3867 pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd);
3868 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d created.",
3869 pmd->numa_id, pmd->core_id);
3870 changed = true;
3871 } else {
3872 dp_netdev_pmd_unref(pmd);
3873 }
3874 }
3875
3876 if (changed) {
3877 struct ovs_numa_info_numa *numa;
3878
3879 /* Log the number of pmd threads per numa node. */
3880 FOR_EACH_NUMA_ON_DUMP (numa, pmd_cores) {
3881 VLOG_INFO("There are %"PRIuSIZE" pmd threads on numa node %d",
3882 numa->n_cores, numa->numa_id);
3883 }
3884 }
3885
3886 ovs_numa_dump_destroy(pmd_cores);
3887 }
3888
3889 static void
3890 pmd_remove_stale_ports(struct dp_netdev *dp,
3891 struct dp_netdev_pmd_thread *pmd)
3892 OVS_EXCLUDED(pmd->port_mutex)
3893 OVS_REQUIRES(dp->port_mutex)
3894 {
3895 struct rxq_poll *poll, *poll_next;
3896 struct tx_port *tx, *tx_next;
3897
3898 ovs_mutex_lock(&pmd->port_mutex);
3899 HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
3900 struct dp_netdev_port *port = poll->rxq->port;
3901
3902 if (port->need_reconfigure
3903 || !hmap_contains(&dp->ports, &port->node)) {
3904 dp_netdev_del_rxq_from_pmd(pmd, poll);
3905 }
3906 }
3907 HMAP_FOR_EACH_SAFE (tx, tx_next, node, &pmd->tx_ports) {
3908 struct dp_netdev_port *port = tx->port;
3909
3910 if (port->need_reconfigure
3911 || !hmap_contains(&dp->ports, &port->node)) {
3912 dp_netdev_del_port_tx_from_pmd(pmd, tx);
3913 }
3914 }
3915 ovs_mutex_unlock(&pmd->port_mutex);
3916 }
3917
3918 /* Must be called each time a port is added/removed or the cmask changes.
3919 * This creates and destroys pmd threads, reconfigures ports, opens their
3920 * rxqs and assigns all rxqs/txqs to pmd threads. */
3921 static void
3922 reconfigure_datapath(struct dp_netdev *dp)
3923 OVS_REQUIRES(dp->port_mutex)
3924 {
3925 struct dp_netdev_pmd_thread *pmd;
3926 struct dp_netdev_port *port;
3927 int wanted_txqs;
3928
3929 dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq);
3930
3931 /* Step 1: Adjust the pmd threads based on the datapath ports, the cores
3932 * on the system and the user configuration. */
3933 reconfigure_pmd_threads(dp);
3934
3935 wanted_txqs = cmap_count(&dp->poll_threads);
3936
3937 /* The number of pmd threads might have changed, or a port can be new:
3938 * adjust the txqs. */
3939 HMAP_FOR_EACH (port, node, &dp->ports) {
3940 netdev_set_tx_multiq(port->netdev, wanted_txqs);
3941 }
3942
3943 /* Step 2: Remove from the pmd threads ports that have been removed or
3944 * need reconfiguration. */
3945
3946 /* Check for all the ports that need reconfiguration. We cache this in
3947 * 'port->need_reconfigure', because netdev_is_reconf_required() can
3948 * change at any time. */
3949 HMAP_FOR_EACH (port, node, &dp->ports) {
3950 if (netdev_is_reconf_required(port->netdev)) {
3951 port->need_reconfigure = true;
3952 }
3953 }
3954
3955 /* Remove from the pmd threads all the ports that have been deleted or
3956 * need reconfiguration. */
3957 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3958 pmd_remove_stale_ports(dp, pmd);
3959 }
3960
3961 /* Reload affected pmd threads. We must wait for the pmd threads before
3962 * reconfiguring the ports, because a port cannot be reconfigured while
3963 * it's being used. */
3964 reload_affected_pmds(dp);
3965
3966 /* Step 3: Reconfigure ports. */
3967
3968 /* We only reconfigure the ports that we determined above, because they're
3969 * not being used by any pmd thread at the moment. If a port fails to
3970 * reconfigure we remove it from the datapath. */
3971 struct dp_netdev_port *next_port;
3972 HMAP_FOR_EACH_SAFE (port, next_port, node, &dp->ports) {
3973 int err;
3974
3975 if (!port->need_reconfigure) {
3976 continue;
3977 }
3978
3979 err = port_reconfigure(port);
3980 if (err) {
3981 hmap_remove(&dp->ports, &port->node);
3982 seq_change(dp->port_seq);
3983 port_destroy(port);
3984 } else {
3985 port->dynamic_txqs = netdev_n_txq(port->netdev) < wanted_txqs;
3986 }
3987 }
3988
3989 /* Step 4: Compute new rxq scheduling. We don't touch the pmd threads
3990 * for now, we just update the 'pmd' pointer in each rxq to point to the
3991 * wanted thread according to the scheduling policy. */
3992
3993 /* Reset all the pmd threads to non isolated. */
3994 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3995 pmd->isolated = false;
3996 }
3997
3998 /* Reset all the queues to unassigned */
3999 HMAP_FOR_EACH (port, node, &dp->ports) {
4000 for (int i = 0; i < port->n_rxq; i++) {
4001 port->rxqs[i].pmd = NULL;
4002 }
4003 }
4004
4005 /* Add pinned queues and mark pmd threads isolated. */
4006 rxq_scheduling(dp, true);
4007
4008 /* Add non-pinned queues. */
4009 rxq_scheduling(dp, false);
4010
4011 /* Step 5: Remove queues not compliant with new scheduling. */
4012 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
4013 struct rxq_poll *poll, *poll_next;
4014
4015 ovs_mutex_lock(&pmd->port_mutex);
4016 HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
4017 if (poll->rxq->pmd != pmd) {
4018 dp_netdev_del_rxq_from_pmd(pmd, poll);
4019 }
4020 }
4021 ovs_mutex_unlock(&pmd->port_mutex);
4022 }
4023
4024 /* Reload affected pmd threads. We must wait for the pmd threads to remove
4025 * the old queues before readding them, otherwise a queue can be polled by
4026 * two threads at the same time. */
4027 reload_affected_pmds(dp);
4028
4029 /* Step 6: Add queues from scheduling, if they're not there already. */
4030 HMAP_FOR_EACH (port, node, &dp->ports) {
4031 if (!netdev_is_pmd(port->netdev)) {
4032 continue;
4033 }
4034
4035 for (int qid = 0; qid < port->n_rxq; qid++) {
4036 struct dp_netdev_rxq *q = &port->rxqs[qid];
4037
4038 if (q->pmd) {
4039 ovs_mutex_lock(&q->pmd->port_mutex);
4040 dp_netdev_add_rxq_to_pmd(q->pmd, q);
4041 ovs_mutex_unlock(&q->pmd->port_mutex);
4042 }
4043 }
4044 }
4045
4046 /* Add every port to the tx cache of every pmd thread, if it's not
4047 * there already and if this pmd has at least one rxq to poll. */
4048 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
4049 ovs_mutex_lock(&pmd->port_mutex);
4050 if (hmap_count(&pmd->poll_list) || pmd->core_id == NON_PMD_CORE_ID) {
4051 HMAP_FOR_EACH (port, node, &dp->ports) {
4052 dp_netdev_add_port_tx_to_pmd(pmd, port);
4053 }
4054 }
4055 ovs_mutex_unlock(&pmd->port_mutex);
4056 }
4057
4058 /* Reload affected pmd threads. */
4059 reload_affected_pmds(dp);
4060 }
4061
4062 /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
4063 static bool
4064 ports_require_restart(const struct dp_netdev *dp)
4065 OVS_REQUIRES(dp->port_mutex)
4066 {
4067 struct dp_netdev_port *port;
4068
4069 HMAP_FOR_EACH (port, node, &dp->ports) {
4070 if (netdev_is_reconf_required(port->netdev)) {
4071 return true;
4072 }
4073 }
4074
4075 return false;
4076 }
4077
4078 /* Return true if needs to revalidate datapath flows. */
4079 static bool
4080 dpif_netdev_run(struct dpif *dpif)
4081 {
4082 struct dp_netdev_port *port;
4083 struct dp_netdev *dp = get_dp_netdev(dpif);
4084 struct dp_netdev_pmd_thread *non_pmd;
4085 uint64_t new_tnl_seq;
4086 bool need_to_flush = true;
4087
4088 ovs_mutex_lock(&dp->port_mutex);
4089 non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
4090 if (non_pmd) {
4091 ovs_mutex_lock(&dp->non_pmd_mutex);
4092 HMAP_FOR_EACH (port, node, &dp->ports) {
4093 if (!netdev_is_pmd(port->netdev)) {
4094 int i;
4095
4096 for (i = 0; i < port->n_rxq; i++) {
4097 if (dp_netdev_process_rxq_port(non_pmd,
4098 &port->rxqs[i],
4099 port->port_no)) {
4100 need_to_flush = false;
4101 }
4102 }
4103 }
4104 }
4105 if (need_to_flush) {
4106 /* We didn't receive anything in the process loop.
4107 * Check if we need to send something.
4108 * There was no time updates on current iteration. */
4109 pmd_thread_ctx_time_update(non_pmd);
4110 dp_netdev_pmd_flush_output_packets(non_pmd, false);
4111 }
4112
4113 dpif_netdev_xps_revalidate_pmd(non_pmd, false);
4114 ovs_mutex_unlock(&dp->non_pmd_mutex);
4115
4116 dp_netdev_pmd_unref(non_pmd);
4117 }
4118
4119 if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) {
4120 reconfigure_datapath(dp);
4121 }
4122 ovs_mutex_unlock(&dp->port_mutex);
4123
4124 tnl_neigh_cache_run();
4125 tnl_port_map_run();
4126 new_tnl_seq = seq_read(tnl_conf_seq);
4127
4128 if (dp->last_tnl_conf_seq != new_tnl_seq) {
4129 dp->last_tnl_conf_seq = new_tnl_seq;
4130 return true;
4131 }
4132 return false;
4133 }
4134
4135 static void
4136 dpif_netdev_wait(struct dpif *dpif)
4137 {
4138 struct dp_netdev_port *port;
4139 struct dp_netdev *dp = get_dp_netdev(dpif);
4140
4141 ovs_mutex_lock(&dp_netdev_mutex);
4142 ovs_mutex_lock(&dp->port_mutex);
4143 HMAP_FOR_EACH (port, node, &dp->ports) {
4144 netdev_wait_reconf_required(port->netdev);
4145 if (!netdev_is_pmd(port->netdev)) {
4146 int i;
4147
4148 for (i = 0; i < port->n_rxq; i++) {
4149 netdev_rxq_wait(port->rxqs[i].rx);
4150 }
4151 }
4152 }
4153 ovs_mutex_unlock(&dp->port_mutex);
4154 ovs_mutex_unlock(&dp_netdev_mutex);
4155 seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);
4156 }
4157
4158 static void
4159 pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd)
4160 {
4161 struct tx_port *tx_port_cached;
4162
4163 /* Flush all the queued packets. */
4164 dp_netdev_pmd_flush_output_packets(pmd, true);
4165 /* Free all used tx queue ids. */
4166 dpif_netdev_xps_revalidate_pmd(pmd, true);
4167
4168 HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->tnl_port_cache) {
4169 free(tx_port_cached);
4170 }
4171 HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->send_port_cache) {
4172 free(tx_port_cached);
4173 }
4174 }
4175
4176 /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to
4177 * thread-local copies. Copy to 'pmd->tnl_port_cache' if it is a tunnel
4178 * device, otherwise to 'pmd->send_port_cache' if the port has at least
4179 * one txq. */
4180 static void
4181 pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
4182 OVS_REQUIRES(pmd->port_mutex)
4183 {
4184 struct tx_port *tx_port, *tx_port_cached;
4185
4186 pmd_free_cached_ports(pmd);
4187 hmap_shrink(&pmd->send_port_cache);
4188 hmap_shrink(&pmd->tnl_port_cache);
4189
4190 HMAP_FOR_EACH (tx_port, node, &pmd->tx_ports) {
4191 if (netdev_has_tunnel_push_pop(tx_port->port->netdev)) {
4192 tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached);
4193 hmap_insert(&pmd->tnl_port_cache, &tx_port_cached->node,
4194 hash_port_no(tx_port_cached->port->port_no));
4195 }
4196
4197 if (netdev_n_txq(tx_port->port->netdev)) {
4198 tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached);
4199 hmap_insert(&pmd->send_port_cache, &tx_port_cached->node,
4200 hash_port_no(tx_port_cached->port->port_no));
4201 }
4202 }
4203 }
4204
4205 static void
4206 pmd_alloc_static_tx_qid(struct dp_netdev_pmd_thread *pmd)
4207 {
4208 ovs_mutex_lock(&pmd->dp->tx_qid_pool_mutex);
4209 if (!id_pool_alloc_id(pmd->dp->tx_qid_pool, &pmd->static_tx_qid)) {
4210 VLOG_ABORT("static_tx_qid allocation failed for PMD on core %2d"
4211 ", numa_id %d.", pmd->core_id, pmd->numa_id);
4212 }
4213 ovs_mutex_unlock(&pmd->dp->tx_qid_pool_mutex);
4214
4215 VLOG_DBG("static_tx_qid = %d allocated for PMD thread on core %2d"
4216 ", numa_id %d.", pmd->static_tx_qid, pmd->core_id, pmd->numa_id);
4217 }
4218
4219 static void
4220 pmd_free_static_tx_qid(struct dp_netdev_pmd_thread *pmd)
4221 {
4222 ovs_mutex_lock(&pmd->dp->tx_qid_pool_mutex);
4223 id_pool_free_id(pmd->dp->tx_qid_pool, pmd->static_tx_qid);
4224 ovs_mutex_unlock(&pmd->dp->tx_qid_pool_mutex);
4225 }
4226
4227 static int
4228 pmd_load_queues_and_ports(struct dp_netdev_pmd_thread *pmd,
4229 struct polled_queue **ppoll_list)
4230 {
4231 struct polled_queue *poll_list = *ppoll_list;
4232 struct rxq_poll *poll;
4233 int i;
4234
4235 ovs_mutex_lock(&pmd->port_mutex);
4236 poll_list = xrealloc(poll_list, hmap_count(&pmd->poll_list)
4237 * sizeof *poll_list);
4238
4239 i = 0;
4240 HMAP_FOR_EACH (poll, node, &pmd->poll_list) {
4241 poll_list[i].rxq = poll->rxq;
4242 poll_list[i].port_no = poll->rxq->port->port_no;
4243 i++;
4244 }
4245
4246 pmd_load_cached_ports(pmd);
4247
4248 ovs_mutex_unlock(&pmd->port_mutex);
4249
4250 *ppoll_list = poll_list;
4251 return i;
4252 }
4253
4254 static void *
4255 pmd_thread_main(void *f_)
4256 {
4257 struct dp_netdev_pmd_thread *pmd = f_;
4258 struct pmd_perf_stats *s = &pmd->perf_stats;
4259 unsigned int lc = 0;
4260 struct polled_queue *poll_list;
4261 bool exiting;
4262 int poll_cnt;
4263 int i;
4264 int process_packets = 0;
4265
4266 poll_list = NULL;
4267
4268 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
4269 ovsthread_setspecific(pmd->dp->per_pmd_key, pmd);
4270 ovs_numa_thread_setaffinity_core(pmd->core_id);
4271 dpdk_set_lcore_id(pmd->core_id);
4272 poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
4273 emc_cache_init(&pmd->flow_cache);
4274 reload:
4275 pmd_alloc_static_tx_qid(pmd);
4276
4277 /* List port/core affinity */
4278 for (i = 0; i < poll_cnt; i++) {
4279 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
4280 pmd->core_id, netdev_rxq_get_name(poll_list[i].rxq->rx),
4281 netdev_rxq_get_queue_id(poll_list[i].rxq->rx));
4282 /* Reset the rxq current cycles counter. */
4283 dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR, 0);
4284 }
4285
4286 if (!poll_cnt) {
4287 while (seq_read(pmd->reload_seq) == pmd->last_reload_seq) {
4288 seq_wait(pmd->reload_seq, pmd->last_reload_seq);
4289 poll_block();
4290 }
4291 lc = UINT_MAX;
4292 }
4293
4294 pmd->intrvl_tsc_prev = 0;
4295 atomic_store_relaxed(&pmd->intrvl_cycles, 0);
4296 cycles_counter_update(s);
4297 /* Protect pmd stats from external clearing while polling. */
4298 ovs_mutex_lock(&pmd->perf_stats.stats_mutex);
4299 for (;;) {
4300 uint64_t rx_packets = 0, tx_packets = 0;
4301
4302 pmd_perf_start_iteration(s);
4303
4304 for (i = 0; i < poll_cnt; i++) {
4305 process_packets =
4306 dp_netdev_process_rxq_port(pmd, poll_list[i].rxq,
4307 poll_list[i].port_no);
4308 rx_packets += process_packets;
4309 }
4310
4311 if (!rx_packets) {
4312 /* We didn't receive anything in the process loop.
4313 * Check if we need to send something.
4314 * There was no time updates on current iteration. */
4315 pmd_thread_ctx_time_update(pmd);
4316 tx_packets = dp_netdev_pmd_flush_output_packets(pmd, false);
4317 }
4318
4319 if (lc++ > 1024) {
4320 bool reload;
4321
4322 lc = 0;
4323
4324 coverage_try_clear();
4325 dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
4326 if (!ovsrcu_try_quiesce()) {
4327 emc_cache_slow_sweep(&pmd->flow_cache);
4328 }
4329
4330 atomic_read_relaxed(&pmd->reload, &reload);
4331 if (reload) {
4332 break;
4333 }
4334 }
4335 pmd_perf_end_iteration(s, rx_packets, tx_packets,
4336 pmd_perf_metrics_enabled(pmd));
4337 }
4338 ovs_mutex_unlock(&pmd->perf_stats.stats_mutex);
4339
4340 poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
4341 exiting = latch_is_set(&pmd->exit_latch);
4342 /* Signal here to make sure the pmd finishes
4343 * reloading the updated configuration. */
4344 dp_netdev_pmd_reload_done(pmd);
4345
4346 pmd_free_static_tx_qid(pmd);
4347
4348 if (!exiting) {
4349 goto reload;
4350 }
4351
4352 emc_cache_uninit(&pmd->flow_cache);
4353 free(poll_list);
4354 pmd_free_cached_ports(pmd);
4355 return NULL;
4356 }
4357
4358 static void
4359 dp_netdev_disable_upcall(struct dp_netdev *dp)
4360 OVS_ACQUIRES(dp->upcall_rwlock)
4361 {
4362 fat_rwlock_wrlock(&dp->upcall_rwlock);
4363 }
4364
4365 \f
4366 /* Meters */
4367 static void
4368 dpif_netdev_meter_get_features(const struct dpif * dpif OVS_UNUSED,
4369 struct ofputil_meter_features *features)
4370 {
4371 features->max_meters = MAX_METERS;
4372 features->band_types = DP_SUPPORTED_METER_BAND_TYPES;
4373 features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK;
4374 features->max_bands = MAX_BANDS;
4375 features->max_color = 0;
4376 }
4377
4378 /* Returns false when packet needs to be dropped. */
4379 static void
4380 dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_,
4381 uint32_t meter_id, long long int now)
4382 {
4383 struct dp_meter *meter;
4384 struct dp_meter_band *band;
4385 struct dp_packet *packet;
4386 long long int long_delta_t; /* msec */
4387 uint32_t delta_t; /* msec */
4388 const size_t cnt = dp_packet_batch_size(packets_);
4389 uint32_t bytes, volume;
4390 int exceeded_band[NETDEV_MAX_BURST];
4391 uint32_t exceeded_rate[NETDEV_MAX_BURST];
4392 int exceeded_pkt = cnt; /* First packet that exceeded a band rate. */
4393
4394 if (meter_id >= MAX_METERS) {
4395 return;
4396 }
4397
4398 meter_lock(dp, meter_id);
4399 meter = dp->meters[meter_id];
4400 if (!meter) {
4401 goto out;
4402 }
4403
4404 /* Initialize as negative values. */
4405 memset(exceeded_band, 0xff, cnt * sizeof *exceeded_band);
4406 /* Initialize as zeroes. */
4407 memset(exceeded_rate, 0, cnt * sizeof *exceeded_rate);
4408
4409 /* All packets will hit the meter at the same time. */
4410 long_delta_t = (now - meter->used) / 1000; /* msec */
4411
4412 /* Make sure delta_t will not be too large, so that bucket will not
4413 * wrap around below. */
4414 delta_t = (long_delta_t > (long long int)meter->max_delta_t)
4415 ? meter->max_delta_t : (uint32_t)long_delta_t;
4416
4417 /* Update meter stats. */
4418 meter->used = now;
4419 meter->packet_count += cnt;
4420 bytes = 0;
4421 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
4422 bytes += dp_packet_size(packet);
4423 }
4424 meter->byte_count += bytes;
4425
4426 /* Meters can operate in terms of packets per second or kilobits per
4427 * second. */
4428 if (meter->flags & OFPMF13_PKTPS) {
4429 /* Rate in packets/second, bucket 1/1000 packets. */
4430 /* msec * packets/sec = 1/1000 packets. */
4431 volume = cnt * 1000; /* Take 'cnt' packets from the bucket. */
4432 } else {
4433 /* Rate in kbps, bucket in bits. */
4434 /* msec * kbps = bits */
4435 volume = bytes * 8;
4436 }
4437
4438 /* Update all bands and find the one hit with the highest rate for each
4439 * packet (if any). */
4440 for (int m = 0; m < meter->n_bands; ++m) {
4441 band = &meter->bands[m];
4442
4443 /* Update band's bucket. */
4444 band->bucket += delta_t * band->up.rate;
4445 if (band->bucket > band->up.burst_size) {
4446 band->bucket = band->up.burst_size;
4447 }
4448
4449 /* Drain the bucket for all the packets, if possible. */
4450 if (band->bucket >= volume) {
4451 band->bucket -= volume;
4452 } else {
4453 int band_exceeded_pkt;
4454
4455 /* Band limit hit, must process packet-by-packet. */
4456 if (meter->flags & OFPMF13_PKTPS) {
4457 band_exceeded_pkt = band->bucket / 1000;
4458 band->bucket %= 1000; /* Remainder stays in bucket. */
4459
4460 /* Update the exceeding band for each exceeding packet.
4461 * (Only one band will be fired by a packet, and that
4462 * can be different for each packet.) */
4463 for (int i = band_exceeded_pkt; i < cnt; i++) {
4464 if (band->up.rate > exceeded_rate[i]) {
4465 exceeded_rate[i] = band->up.rate;
4466 exceeded_band[i] = m;
4467 }
4468 }
4469 } else {
4470 /* Packet sizes differ, must process one-by-one. */
4471 band_exceeded_pkt = cnt;
4472 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
4473 uint32_t bits = dp_packet_size(packet) * 8;
4474
4475 if (band->bucket >= bits) {
4476 band->bucket -= bits;
4477 } else {
4478 if (i < band_exceeded_pkt) {
4479 band_exceeded_pkt = i;
4480 }
4481 /* Update the exceeding band for the exceeding packet.
4482 * (Only one band will be fired by a packet, and that
4483 * can be different for each packet.) */
4484 if (band->up.rate > exceeded_rate[i]) {
4485 exceeded_rate[i] = band->up.rate;
4486 exceeded_band[i] = m;
4487 }
4488 }
4489 }
4490 }
4491 /* Remember the first exceeding packet. */
4492 if (exceeded_pkt > band_exceeded_pkt) {
4493 exceeded_pkt = band_exceeded_pkt;
4494 }
4495 }
4496 }
4497
4498 /* Fire the highest rate band exceeded by each packet.
4499 * Drop packets if needed, by swapping packet to the end that will be
4500 * ignored. */
4501 size_t j;
4502 DP_PACKET_BATCH_REFILL_FOR_EACH (j, cnt, packet, packets_) {
4503 if (exceeded_band[j] >= 0) {
4504 /* Meter drop packet. */
4505 band = &meter->bands[exceeded_band[j]];
4506 band->packet_count += 1;
4507 band->byte_count += dp_packet_size(packet);
4508
4509 dp_packet_delete(packet);
4510 } else {
4511 /* Meter accepts packet. */
4512 dp_packet_batch_refill(packets_, packet, j);
4513 }
4514 }
4515 out:
4516 meter_unlock(dp, meter_id);
4517 }
4518
4519 /* Meter set/get/del processing is still single-threaded. */
4520 static int
4521 dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id *meter_id,
4522 struct ofputil_meter_config *config)
4523 {
4524 struct dp_netdev *dp = get_dp_netdev(dpif);
4525 uint32_t mid = meter_id->uint32;
4526 struct dp_meter *meter;
4527 int i;
4528
4529 if (mid >= MAX_METERS) {
4530 return EFBIG; /* Meter_id out of range. */
4531 }
4532
4533 if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK ||
4534 !(config->flags & (OFPMF13_KBPS | OFPMF13_PKTPS))) {
4535 return EBADF; /* Unsupported flags set */
4536 }
4537
4538 /* Validate bands */
4539 if (config->n_bands == 0 || config->n_bands > MAX_BANDS) {
4540 return EINVAL; /* Too many bands */
4541 }
4542
4543 /* Validate rates */
4544 for (i = 0; i < config->n_bands; i++) {
4545 if (config->bands[i].rate == 0) {
4546 return EDOM; /* rate must be non-zero */
4547 }
4548 }
4549
4550 for (i = 0; i < config->n_bands; ++i) {
4551 switch (config->bands[i].type) {
4552 case OFPMBT13_DROP:
4553 break;
4554 default:
4555 return ENODEV; /* Unsupported band type */
4556 }
4557 }
4558
4559 /* Allocate meter */
4560 meter = xzalloc(sizeof *meter
4561 + config->n_bands * sizeof(struct dp_meter_band));
4562 if (meter) {
4563 meter->flags = config->flags;
4564 meter->n_bands = config->n_bands;
4565 meter->max_delta_t = 0;
4566 meter->used = time_usec();
4567
4568 /* set up bands */
4569 for (i = 0; i < config->n_bands; ++i) {
4570 uint32_t band_max_delta_t;
4571
4572 /* Set burst size to a workable value if none specified. */
4573 if (config->bands[i].burst_size == 0) {
4574 config->bands[i].burst_size = config->bands[i].rate;
4575 }
4576
4577 meter->bands[i].up = config->bands[i];
4578 /* Convert burst size to the bucket units: */
4579 /* pkts => 1/1000 packets, kilobits => bits. */
4580 meter->bands[i].up.burst_size *= 1000;
4581 /* Initialize bucket to empty. */
4582 meter->bands[i].bucket = 0;
4583
4584 /* Figure out max delta_t that is enough to fill any bucket. */
4585 band_max_delta_t
4586 = meter->bands[i].up.burst_size / meter->bands[i].up.rate;
4587 if (band_max_delta_t > meter->max_delta_t) {
4588 meter->max_delta_t = band_max_delta_t;
4589 }
4590 }
4591
4592 meter_lock(dp, mid);
4593 dp_delete_meter(dp, mid); /* Free existing meter, if any */
4594 dp->meters[mid] = meter;
4595 meter_unlock(dp, mid);
4596
4597 return 0;
4598 }
4599 return ENOMEM;
4600 }
4601
4602 static int
4603 dpif_netdev_meter_get(const struct dpif *dpif,
4604 ofproto_meter_id meter_id_,
4605 struct ofputil_meter_stats *stats, uint16_t n_bands)
4606 {
4607 const struct dp_netdev *dp = get_dp_netdev(dpif);
4608 const struct dp_meter *meter;
4609 uint32_t meter_id = meter_id_.uint32;
4610
4611 if (meter_id >= MAX_METERS) {
4612 return EFBIG;
4613 }
4614 meter = dp->meters[meter_id];
4615 if (!meter) {
4616 return ENOENT;
4617 }
4618 if (stats) {
4619 int i = 0;
4620
4621 meter_lock(dp, meter_id);
4622 stats->packet_in_count = meter->packet_count;
4623 stats->byte_in_count = meter->byte_count;
4624
4625 for (i = 0; i < n_bands && i < meter->n_bands; ++i) {
4626 stats->bands[i].packet_count = meter->bands[i].packet_count;
4627 stats->bands[i].byte_count = meter->bands[i].byte_count;
4628 }
4629 meter_unlock(dp, meter_id);
4630
4631 stats->n_bands = i;
4632 }
4633 return 0;
4634 }
4635
4636 static int
4637 dpif_netdev_meter_del(struct dpif *dpif,
4638 ofproto_meter_id meter_id_,
4639 struct ofputil_meter_stats *stats, uint16_t n_bands)
4640 {
4641 struct dp_netdev *dp = get_dp_netdev(dpif);
4642 int error;
4643
4644 error = dpif_netdev_meter_get(dpif, meter_id_, stats, n_bands);
4645 if (!error) {
4646 uint32_t meter_id = meter_id_.uint32;
4647
4648 meter_lock(dp, meter_id);
4649 dp_delete_meter(dp, meter_id);
4650 meter_unlock(dp, meter_id);
4651 }
4652 return error;
4653 }
4654
4655 \f
4656 static void
4657 dpif_netdev_disable_upcall(struct dpif *dpif)
4658 OVS_NO_THREAD_SAFETY_ANALYSIS
4659 {
4660 struct dp_netdev *dp = get_dp_netdev(dpif);
4661 dp_netdev_disable_upcall(dp);
4662 }
4663
4664 static void
4665 dp_netdev_enable_upcall(struct dp_netdev *dp)
4666 OVS_RELEASES(dp->upcall_rwlock)
4667 {
4668 fat_rwlock_unlock(&dp->upcall_rwlock);
4669 }
4670
4671 static void
4672 dpif_netdev_enable_upcall(struct dpif *dpif)
4673 OVS_NO_THREAD_SAFETY_ANALYSIS
4674 {
4675 struct dp_netdev *dp = get_dp_netdev(dpif);
4676 dp_netdev_enable_upcall(dp);
4677 }
4678
4679 static void
4680 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
4681 {
4682 ovs_mutex_lock(&pmd->cond_mutex);
4683 atomic_store_relaxed(&pmd->reload, false);
4684 pmd->last_reload_seq = seq_read(pmd->reload_seq);
4685 xpthread_cond_signal(&pmd->cond);
4686 ovs_mutex_unlock(&pmd->cond_mutex);
4687 }
4688
4689 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
4690 * the pointer if succeeds, otherwise, NULL (it can return NULL even if
4691 * 'core_id' is NON_PMD_CORE_ID).
4692 *
4693 * Caller must unrefs the returned reference. */
4694 static struct dp_netdev_pmd_thread *
4695 dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
4696 {
4697 struct dp_netdev_pmd_thread *pmd;
4698 const struct cmap_node *pnode;
4699
4700 pnode = cmap_find(&dp->poll_threads, hash_int(core_id, 0));
4701 if (!pnode) {
4702 return NULL;
4703 }
4704 pmd = CONTAINER_OF(pnode, struct dp_netdev_pmd_thread, node);
4705
4706 return dp_netdev_pmd_try_ref(pmd) ? pmd : NULL;
4707 }
4708
4709 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
4710 static void
4711 dp_netdev_set_nonpmd(struct dp_netdev *dp)
4712 OVS_REQUIRES(dp->port_mutex)
4713 {
4714 struct dp_netdev_pmd_thread *non_pmd;
4715
4716 non_pmd = xzalloc(sizeof *non_pmd);
4717 dp_netdev_configure_pmd(non_pmd, dp, NON_PMD_CORE_ID, OVS_NUMA_UNSPEC);
4718 }
4719
4720 /* Caller must have valid pointer to 'pmd'. */
4721 static bool
4722 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd)
4723 {
4724 return ovs_refcount_try_ref_rcu(&pmd->ref_cnt);
4725 }
4726
4727 static void
4728 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd)
4729 {
4730 if (pmd && ovs_refcount_unref(&pmd->ref_cnt) == 1) {
4731 ovsrcu_postpone(dp_netdev_destroy_pmd, pmd);
4732 }
4733 }
4734
4735 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
4736 * fails, keeps checking for next node until reaching the end of cmap.
4737 *
4738 * Caller must unrefs the returned reference. */
4739 static struct dp_netdev_pmd_thread *
4740 dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos)
4741 {
4742 struct dp_netdev_pmd_thread *next;
4743
4744 do {
4745 struct cmap_node *node;
4746
4747 node = cmap_next_position(&dp->poll_threads, pos);
4748 next = node ? CONTAINER_OF(node, struct dp_netdev_pmd_thread, node)
4749 : NULL;
4750 } while (next && !dp_netdev_pmd_try_ref(next));
4751
4752 return next;
4753 }
4754
4755 /* Configures the 'pmd' based on the input argument. */
4756 static void
4757 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
4758 unsigned core_id, int numa_id)
4759 {
4760 pmd->dp = dp;
4761 pmd->core_id = core_id;
4762 pmd->numa_id = numa_id;
4763 pmd->need_reload = false;
4764 pmd->n_output_batches = 0;
4765
4766 ovs_refcount_init(&pmd->ref_cnt);
4767 latch_init(&pmd->exit_latch);
4768 pmd->reload_seq = seq_create();
4769 pmd->last_reload_seq = seq_read(pmd->reload_seq);
4770 atomic_init(&pmd->reload, false);
4771 xpthread_cond_init(&pmd->cond, NULL);
4772 ovs_mutex_init(&pmd->cond_mutex);
4773 ovs_mutex_init(&pmd->flow_mutex);
4774 ovs_mutex_init(&pmd->port_mutex);
4775 cmap_init(&pmd->flow_table);
4776 cmap_init(&pmd->classifiers);
4777 pmd->ctx.last_rxq = NULL;
4778 pmd_thread_ctx_time_update(pmd);
4779 pmd->next_optimization = pmd->ctx.now + DPCLS_OPTIMIZATION_INTERVAL;
4780 pmd->rxq_next_cycle_store = pmd->ctx.now + PMD_RXQ_INTERVAL_LEN;
4781 hmap_init(&pmd->poll_list);
4782 hmap_init(&pmd->tx_ports);
4783 hmap_init(&pmd->tnl_port_cache);
4784 hmap_init(&pmd->send_port_cache);
4785 /* init the 'flow_cache' since there is no
4786 * actual thread created for NON_PMD_CORE_ID. */
4787 if (core_id == NON_PMD_CORE_ID) {
4788 emc_cache_init(&pmd->flow_cache);
4789 pmd_alloc_static_tx_qid(pmd);
4790 }
4791 pmd_perf_stats_init(&pmd->perf_stats);
4792 cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node),
4793 hash_int(core_id, 0));
4794 }
4795
4796 static void
4797 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
4798 {
4799 struct dpcls *cls;
4800
4801 dp_netdev_pmd_flow_flush(pmd);
4802 hmap_destroy(&pmd->send_port_cache);
4803 hmap_destroy(&pmd->tnl_port_cache);
4804 hmap_destroy(&pmd->tx_ports);
4805 hmap_destroy(&pmd->poll_list);
4806 /* All flows (including their dpcls_rules) have been deleted already */
4807 CMAP_FOR_EACH (cls, node, &pmd->classifiers) {
4808 dpcls_destroy(cls);
4809 ovsrcu_postpone(free, cls);
4810 }
4811 cmap_destroy(&pmd->classifiers);
4812 cmap_destroy(&pmd->flow_table);
4813 ovs_mutex_destroy(&pmd->flow_mutex);
4814 latch_destroy(&pmd->exit_latch);
4815 seq_destroy(pmd->reload_seq);
4816 xpthread_cond_destroy(&pmd->cond);
4817 ovs_mutex_destroy(&pmd->cond_mutex);
4818 ovs_mutex_destroy(&pmd->port_mutex);
4819 free(pmd);
4820 }
4821
4822 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
4823 * and unrefs the struct. */
4824 static void
4825 dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
4826 {
4827 /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize,
4828 * but extra cleanup is necessary */
4829 if (pmd->core_id == NON_PMD_CORE_ID) {
4830 ovs_mutex_lock(&dp->non_pmd_mutex);
4831 emc_cache_uninit(&pmd->flow_cache);
4832 pmd_free_cached_ports(pmd);
4833 pmd_free_static_tx_qid(pmd);
4834 ovs_mutex_unlock(&dp->non_pmd_mutex);
4835 } else {
4836 latch_set(&pmd->exit_latch);
4837 dp_netdev_reload_pmd__(pmd);
4838 xpthread_join(pmd->thread, NULL);
4839 }
4840
4841 dp_netdev_pmd_clear_ports(pmd);
4842
4843 /* Purges the 'pmd''s flows after stopping the thread, but before
4844 * destroying the flows, so that the flow stats can be collected. */
4845 if (dp->dp_purge_cb) {
4846 dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id);
4847 }
4848 cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0));
4849 dp_netdev_pmd_unref(pmd);
4850 }
4851
4852 /* Destroys all pmd threads. If 'non_pmd' is true it also destroys the non pmd
4853 * thread. */
4854 static void
4855 dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd)
4856 {
4857 struct dp_netdev_pmd_thread *pmd;
4858 struct dp_netdev_pmd_thread **pmd_list;
4859 size_t k = 0, n_pmds;
4860
4861 n_pmds = cmap_count(&dp->poll_threads);
4862 pmd_list = xcalloc(n_pmds, sizeof *pmd_list);
4863
4864 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
4865 if (!non_pmd && pmd->core_id == NON_PMD_CORE_ID) {
4866 continue;
4867 }
4868 /* We cannot call dp_netdev_del_pmd(), since it alters
4869 * 'dp->poll_threads' (while we're iterating it) and it
4870 * might quiesce. */
4871 ovs_assert(k < n_pmds);
4872 pmd_list[k++] = pmd;
4873 }
4874
4875 for (size_t i = 0; i < k; i++) {
4876 dp_netdev_del_pmd(dp, pmd_list[i]);
4877 }
4878 free(pmd_list);
4879 }
4880
4881 /* Deletes all rx queues from pmd->poll_list and all the ports from
4882 * pmd->tx_ports. */
4883 static void
4884 dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd)
4885 {
4886 struct rxq_poll *poll;
4887 struct tx_port *port;
4888
4889 ovs_mutex_lock(&pmd->port_mutex);
4890 HMAP_FOR_EACH_POP (poll, node, &pmd->poll_list) {
4891 free(poll);
4892 }
4893 HMAP_FOR_EACH_POP (port, node, &pmd->tx_ports) {
4894 free(port);
4895 }
4896 ovs_mutex_unlock(&pmd->port_mutex);
4897 }
4898
4899 /* Adds rx queue to poll_list of PMD thread, if it's not there already. */
4900 static void
4901 dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
4902 struct dp_netdev_rxq *rxq)
4903 OVS_REQUIRES(pmd->port_mutex)
4904 {
4905 int qid = netdev_rxq_get_queue_id(rxq->rx);
4906 uint32_t hash = hash_2words(odp_to_u32(rxq->port->port_no), qid);
4907 struct rxq_poll *poll;
4908
4909 HMAP_FOR_EACH_WITH_HASH (poll, node, hash, &pmd->poll_list) {
4910 if (poll->rxq == rxq) {
4911 /* 'rxq' is already polled by this thread. Do nothing. */
4912 return;
4913 }
4914 }
4915
4916 poll = xmalloc(sizeof *poll);
4917 poll->rxq = rxq;
4918 hmap_insert(&pmd->poll_list, &poll->node, hash);
4919
4920 pmd->need_reload = true;
4921 }
4922
4923 /* Delete 'poll' from poll_list of PMD thread. */
4924 static void
4925 dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd,
4926 struct rxq_poll *poll)
4927 OVS_REQUIRES(pmd->port_mutex)
4928 {
4929 hmap_remove(&pmd->poll_list, &poll->node);
4930 free(poll);
4931
4932 pmd->need_reload = true;
4933 }
4934
4935 /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the
4936 * changes to take effect. */
4937 static void
4938 dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
4939 struct dp_netdev_port *port)
4940 OVS_REQUIRES(pmd->port_mutex)
4941 {
4942 struct tx_port *tx;
4943
4944 tx = tx_port_lookup(&pmd->tx_ports, port->port_no);
4945 if (tx) {
4946 /* 'port' is already on this thread tx cache. Do nothing. */
4947 return;
4948 }
4949
4950 tx = xzalloc(sizeof *tx);
4951
4952 tx->port = port;
4953 tx->qid = -1;
4954 tx->flush_time = 0LL;
4955 dp_packet_batch_init(&tx->output_pkts);
4956
4957 hmap_insert(&pmd->tx_ports, &tx->node, hash_port_no(tx->port->port_no));
4958 pmd->need_reload = true;
4959 }
4960
4961 /* Del 'tx' from the tx port cache of 'pmd', which must be reloaded for the
4962 * changes to take effect. */
4963 static void
4964 dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
4965 struct tx_port *tx)
4966 OVS_REQUIRES(pmd->port_mutex)
4967 {
4968 hmap_remove(&pmd->tx_ports, &tx->node);
4969 free(tx);
4970 pmd->need_reload = true;
4971 }
4972 \f
4973 static char *
4974 dpif_netdev_get_datapath_version(void)
4975 {
4976 return xstrdup("<built-in>");
4977 }
4978
4979 static void
4980 dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size,
4981 uint16_t tcp_flags, long long now)
4982 {
4983 uint16_t flags;
4984
4985 atomic_store_relaxed(&netdev_flow->stats.used, now);
4986 non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt);
4987 non_atomic_ullong_add(&netdev_flow->stats.byte_count, size);
4988 atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
4989 flags |= tcp_flags;
4990 atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags);
4991 }
4992
4993 static int
4994 dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet_,
4995 struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid,
4996 enum dpif_upcall_type type, const struct nlattr *userdata,
4997 struct ofpbuf *actions, struct ofpbuf *put_actions)
4998 {
4999 struct dp_netdev *dp = pmd->dp;
5000
5001 if (OVS_UNLIKELY(!dp->upcall_cb)) {
5002 return ENODEV;
5003 }
5004
5005 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) {
5006 struct ds ds = DS_EMPTY_INITIALIZER;
5007 char *packet_str;
5008 struct ofpbuf key;
5009 struct odp_flow_key_parms odp_parms = {
5010 .flow = flow,
5011 .mask = wc ? &wc->masks : NULL,
5012 .support = dp_netdev_support,
5013 };
5014
5015 ofpbuf_init(&key, 0);
5016 odp_flow_key_from_flow(&odp_parms, &key);
5017 packet_str = ofp_dp_packet_to_string(packet_);
5018
5019 odp_flow_key_format(key.data, key.size, &ds);
5020
5021 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name,
5022 dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str);
5023
5024 ofpbuf_uninit(&key);
5025 free(packet_str);
5026
5027 ds_destroy(&ds);
5028 }
5029
5030 return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata,
5031 actions, wc, put_actions, dp->upcall_aux);
5032 }
5033
5034 static inline uint32_t
5035 dpif_netdev_packet_get_rss_hash_orig_pkt(struct dp_packet *packet,
5036 const struct miniflow *mf)
5037 {
5038 uint32_t hash;
5039
5040 if (OVS_LIKELY(dp_packet_rss_valid(packet))) {
5041 hash = dp_packet_get_rss_hash(packet);
5042 } else {
5043 hash = miniflow_hash_5tuple(mf, 0);
5044 dp_packet_set_rss_hash(packet, hash);
5045 }
5046
5047 return hash;
5048 }
5049
5050 static inline uint32_t
5051 dpif_netdev_packet_get_rss_hash(struct dp_packet *packet,
5052 const struct miniflow *mf)
5053 {
5054 uint32_t hash, recirc_depth;
5055
5056 if (OVS_LIKELY(dp_packet_rss_valid(packet))) {
5057 hash = dp_packet_get_rss_hash(packet);
5058 } else {
5059 hash = miniflow_hash_5tuple(mf, 0);
5060 dp_packet_set_rss_hash(packet, hash);
5061 }
5062
5063 /* The RSS hash must account for the recirculation depth to avoid
5064 * collisions in the exact match cache */
5065 recirc_depth = *recirc_depth_get_unsafe();
5066 if (OVS_UNLIKELY(recirc_depth)) {
5067 hash = hash_finish(hash, recirc_depth);
5068 dp_packet_set_rss_hash(packet, hash);
5069 }
5070 return hash;
5071 }
5072
5073 struct packet_batch_per_flow {
5074 unsigned int byte_count;
5075 uint16_t tcp_flags;
5076 struct dp_netdev_flow *flow;
5077
5078 struct dp_packet_batch array;
5079 };
5080
5081 static inline void
5082 packet_batch_per_flow_update(struct packet_batch_per_flow *batch,
5083 struct dp_packet *packet,
5084 const struct miniflow *mf)
5085 {
5086 batch->byte_count += dp_packet_size(packet);
5087 batch->tcp_flags |= miniflow_get_tcp_flags(mf);
5088 batch->array.packets[batch->array.count++] = packet;
5089 }
5090
5091 static inline void
5092 packet_batch_per_flow_init(struct packet_batch_per_flow *batch,
5093 struct dp_netdev_flow *flow)
5094 {
5095 flow->batch = batch;
5096
5097 batch->flow = flow;
5098 dp_packet_batch_init(&batch->array);
5099 batch->byte_count = 0;
5100 batch->tcp_flags = 0;
5101 }
5102
5103 static inline void
5104 packet_batch_per_flow_execute(struct packet_batch_per_flow *batch,
5105 struct dp_netdev_pmd_thread *pmd)
5106 {
5107 struct dp_netdev_actions *actions;
5108 struct dp_netdev_flow *flow = batch->flow;
5109
5110 dp_netdev_flow_used(flow, batch->array.count, batch->byte_count,
5111 batch->tcp_flags, pmd->ctx.now / 1000);
5112
5113 actions = dp_netdev_flow_get_actions(flow);
5114
5115 dp_netdev_execute_actions(pmd, &batch->array, true, &flow->flow,
5116 actions->actions, actions->size);
5117 }
5118
5119 static inline void
5120 dp_netdev_queue_batches(struct dp_packet *pkt,
5121 struct dp_netdev_flow *flow, const struct miniflow *mf,
5122 struct packet_batch_per_flow *batches,
5123 size_t *n_batches)
5124 {
5125 struct packet_batch_per_flow *batch = flow->batch;
5126
5127 if (OVS_UNLIKELY(!batch)) {
5128 batch = &batches[(*n_batches)++];
5129 packet_batch_per_flow_init(batch, flow);
5130 }
5131
5132 packet_batch_per_flow_update(batch, pkt, mf);
5133 }
5134
5135 /* Try to process all ('cnt') the 'packets' using only the exact match cache
5136 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
5137 * miniflow is copied into 'keys' and the packet pointer is moved at the
5138 * beginning of the 'packets' array.
5139 *
5140 * The function returns the number of packets that needs to be processed in the
5141 * 'packets' array (they have been moved to the beginning of the vector).
5142 *
5143 * For performance reasons a caller may choose not to initialize the metadata
5144 * in 'packets_'. If 'md_is_valid' is false, the metadata in 'packets'
5145 * is not valid and must be initialized by this function using 'port_no'.
5146 * If 'md_is_valid' is true, the metadata is already valid and 'port_no'
5147 * will be ignored.
5148 */
5149 static inline size_t
5150 emc_processing(struct dp_netdev_pmd_thread *pmd,
5151 struct dp_packet_batch *packets_,
5152 struct netdev_flow_key *keys,
5153 struct packet_batch_per_flow batches[], size_t *n_batches,
5154 bool md_is_valid, odp_port_t port_no)
5155 {
5156 struct emc_cache *flow_cache = &pmd->flow_cache;
5157 struct netdev_flow_key *key = &keys[0];
5158 size_t n_missed = 0, n_dropped = 0;
5159 struct dp_packet *packet;
5160 const size_t cnt = dp_packet_batch_size(packets_);
5161 uint32_t cur_min;
5162 int i;
5163
5164 atomic_read_relaxed(&pmd->dp->emc_insert_min, &cur_min);
5165 pmd_perf_update_counter(&pmd->perf_stats,
5166 md_is_valid ? PMD_STAT_RECIRC : PMD_STAT_RECV,
5167 cnt);
5168
5169 DP_PACKET_BATCH_REFILL_FOR_EACH (i, cnt, packet, packets_) {
5170 struct dp_netdev_flow *flow;
5171
5172 if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
5173 dp_packet_delete(packet);
5174 n_dropped++;
5175 continue;
5176 }
5177
5178 if (i != cnt - 1) {
5179 struct dp_packet **packets = packets_->packets;
5180 /* Prefetch next packet data and metadata. */
5181 OVS_PREFETCH(dp_packet_data(packets[i+1]));
5182 pkt_metadata_prefetch_init(&packets[i+1]->md);
5183 }
5184
5185 if (!md_is_valid) {
5186 pkt_metadata_init(&packet->md, port_no);
5187 }
5188 miniflow_extract(packet, &key->mf);
5189 key->len = 0; /* Not computed yet. */
5190 /* If EMC is disabled skip hash computation and emc_lookup */
5191 if (cur_min) {
5192 if (!md_is_valid) {
5193 key->hash = dpif_netdev_packet_get_rss_hash_orig_pkt(packet,
5194 &key->mf);
5195 } else {
5196 key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf);
5197 }
5198 flow = emc_lookup(flow_cache, key);
5199 } else {
5200 flow = NULL;
5201 }
5202 if (OVS_LIKELY(flow)) {
5203 dp_netdev_queue_batches(packet, flow, &key->mf, batches,
5204 n_batches);
5205 } else {
5206 /* Exact match cache missed. Group missed packets together at
5207 * the beginning of the 'packets' array. */
5208 dp_packet_batch_refill(packets_, packet, i);
5209 /* 'key[n_missed]' contains the key of the current packet and it
5210 * must be returned to the caller. The next key should be extracted
5211 * to 'keys[n_missed + 1]'. */
5212 key = &keys[++n_missed];
5213 }
5214 }
5215
5216 pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_EXACT_HIT,
5217 cnt - n_dropped - n_missed);
5218
5219 return dp_packet_batch_size(packets_);
5220 }
5221
5222 static inline int
5223 handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
5224 struct dp_packet *packet,
5225 const struct netdev_flow_key *key,
5226 struct ofpbuf *actions, struct ofpbuf *put_actions)
5227 {
5228 struct ofpbuf *add_actions;
5229 struct dp_packet_batch b;
5230 struct match match;
5231 ovs_u128 ufid;
5232 int error;
5233 uint64_t cycles = cycles_counter_update(&pmd->perf_stats);
5234
5235 match.tun_md.valid = false;
5236 miniflow_expand(&key->mf, &match.flow);
5237
5238 ofpbuf_clear(actions);
5239 ofpbuf_clear(put_actions);
5240
5241 dpif_flow_hash(pmd->dp->dpif, &match.flow, sizeof match.flow, &ufid);
5242 error = dp_netdev_upcall(pmd, packet, &match.flow, &match.wc,
5243 &ufid, DPIF_UC_MISS, NULL, actions,
5244 put_actions);
5245 if (OVS_UNLIKELY(error && error != ENOSPC)) {
5246 dp_packet_delete(packet);
5247 return error;
5248 }
5249
5250 /* The Netlink encoding of datapath flow keys cannot express
5251 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
5252 * tag is interpreted as exact match on the fact that there is no
5253 * VLAN. Unless we refactor a lot of code that translates between
5254 * Netlink and struct flow representations, we have to do the same
5255 * here. */
5256 if (!match.wc.masks.vlans[0].tci) {
5257 match.wc.masks.vlans[0].tci = htons(0xffff);
5258 }
5259
5260 /* We can't allow the packet batching in the next loop to execute
5261 * the actions. Otherwise, if there are any slow path actions,
5262 * we'll send the packet up twice. */
5263 dp_packet_batch_init_packet(&b, packet);
5264 dp_netdev_execute_actions(pmd, &b, true, &match.flow,
5265 actions->data, actions->size);
5266
5267 add_actions = put_actions->size ? put_actions : actions;
5268 if (OVS_LIKELY(error != ENOSPC)) {
5269 struct dp_netdev_flow *netdev_flow;
5270
5271 /* XXX: There's a race window where a flow covering this packet
5272 * could have already been installed since we last did the flow
5273 * lookup before upcall. This could be solved by moving the
5274 * mutex lock outside the loop, but that's an awful long time
5275 * to be locking everyone out of making flow installs. If we
5276 * move to a per-core classifier, it would be reasonable. */
5277 ovs_mutex_lock(&pmd->flow_mutex);
5278 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL);
5279 if (OVS_LIKELY(!netdev_flow)) {
5280 netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid,
5281 add_actions->data,
5282 add_actions->size);
5283 }
5284 ovs_mutex_unlock(&pmd->flow_mutex);
5285 emc_probabilistic_insert(pmd, key, netdev_flow);
5286 }
5287 if (pmd_perf_metrics_enabled(pmd)) {
5288 /* Update upcall stats. */
5289 cycles = cycles_counter_update(&pmd->perf_stats) - cycles;
5290 struct pmd_perf_stats *s = &pmd->perf_stats;
5291 s->current.upcalls++;
5292 s->current.upcall_cycles += cycles;
5293 histogram_add_sample(&s->cycles_per_upcall, cycles);
5294 }
5295 return error;
5296 }
5297
5298 static inline void
5299 fast_path_processing(struct dp_netdev_pmd_thread *pmd,
5300 struct dp_packet_batch *packets_,
5301 struct netdev_flow_key *keys,
5302 struct packet_batch_per_flow batches[],
5303 size_t *n_batches,
5304 odp_port_t in_port)
5305 {
5306 const size_t cnt = dp_packet_batch_size(packets_);
5307 #if !defined(__CHECKER__) && !defined(_WIN32)
5308 const size_t PKT_ARRAY_SIZE = cnt;
5309 #else
5310 /* Sparse or MSVC doesn't like variable length array. */
5311 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
5312 #endif
5313 struct dp_packet *packet;
5314 struct dpcls *cls;
5315 struct dpcls_rule *rules[PKT_ARRAY_SIZE];
5316 struct dp_netdev *dp = pmd->dp;
5317 int upcall_ok_cnt = 0, upcall_fail_cnt = 0;
5318 int lookup_cnt = 0, add_lookup_cnt;
5319 bool any_miss;
5320
5321 for (size_t i = 0; i < cnt; i++) {
5322 /* Key length is needed in all the cases, hash computed on demand. */
5323 keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf));
5324 }
5325 /* Get the classifier for the in_port */
5326 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
5327 if (OVS_LIKELY(cls)) {
5328 any_miss = !dpcls_lookup(cls, keys, rules, cnt, &lookup_cnt);
5329 } else {
5330 any_miss = true;
5331 memset(rules, 0, sizeof(rules));
5332 }
5333 if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
5334 uint64_t actions_stub[512 / 8], slow_stub[512 / 8];
5335 struct ofpbuf actions, put_actions;
5336
5337 ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub);
5338 ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub);
5339
5340 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
5341 struct dp_netdev_flow *netdev_flow;
5342
5343 if (OVS_LIKELY(rules[i])) {
5344 continue;
5345 }
5346
5347 /* It's possible that an earlier slow path execution installed
5348 * a rule covering this flow. In this case, it's a lot cheaper
5349 * to catch it here than execute a miss. */
5350 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i],
5351 &add_lookup_cnt);
5352 if (netdev_flow) {
5353 lookup_cnt += add_lookup_cnt;
5354 rules[i] = &netdev_flow->cr;
5355 continue;
5356 }
5357
5358 int error = handle_packet_upcall(pmd, packet, &keys[i],
5359 &actions, &put_actions);
5360
5361 if (OVS_UNLIKELY(error)) {
5362 upcall_fail_cnt++;
5363 } else {
5364 upcall_ok_cnt++;
5365 }
5366 }
5367
5368 ofpbuf_uninit(&actions);
5369 ofpbuf_uninit(&put_actions);
5370 fat_rwlock_unlock(&dp->upcall_rwlock);
5371 } else if (OVS_UNLIKELY(any_miss)) {
5372 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
5373 if (OVS_UNLIKELY(!rules[i])) {
5374 dp_packet_delete(packet);
5375 upcall_fail_cnt++;
5376 }
5377 }
5378 }
5379
5380 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
5381 struct dp_netdev_flow *flow;
5382
5383 if (OVS_UNLIKELY(!rules[i])) {
5384 continue;
5385 }
5386
5387 flow = dp_netdev_flow_cast(rules[i]);
5388
5389 emc_probabilistic_insert(pmd, &keys[i], flow);
5390 dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches);
5391 }
5392
5393 pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MASKED_HIT,
5394 cnt - upcall_ok_cnt - upcall_fail_cnt);
5395 pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MASKED_LOOKUP,
5396 lookup_cnt);
5397 pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MISS,
5398 upcall_ok_cnt);
5399 pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_LOST,
5400 upcall_fail_cnt);
5401 }
5402
5403 /* Packets enter the datapath from a port (or from recirculation) here.
5404 *
5405 * When 'md_is_valid' is true the metadata in 'packets' are already valid.
5406 * When false the metadata in 'packets' need to be initialized. */
5407 static void
5408 dp_netdev_input__(struct dp_netdev_pmd_thread *pmd,
5409 struct dp_packet_batch *packets,
5410 bool md_is_valid, odp_port_t port_no)
5411 {
5412 #if !defined(__CHECKER__) && !defined(_WIN32)
5413 const size_t PKT_ARRAY_SIZE = dp_packet_batch_size(packets);
5414 #else
5415 /* Sparse or MSVC doesn't like variable length array. */
5416 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
5417 #endif
5418 OVS_ALIGNED_VAR(CACHE_LINE_SIZE)
5419 struct netdev_flow_key keys[PKT_ARRAY_SIZE];
5420 struct packet_batch_per_flow batches[PKT_ARRAY_SIZE];
5421 size_t n_batches;
5422 odp_port_t in_port;
5423
5424 n_batches = 0;
5425 emc_processing(pmd, packets, keys, batches, &n_batches,
5426 md_is_valid, port_no);
5427 if (!dp_packet_batch_is_empty(packets)) {
5428 /* Get ingress port from first packet's metadata. */
5429 in_port = packets->packets[0]->md.in_port.odp_port;
5430 fast_path_processing(pmd, packets, keys,
5431 batches, &n_batches, in_port);
5432 }
5433
5434 /* All the flow batches need to be reset before any call to
5435 * packet_batch_per_flow_execute() as it could potentially trigger
5436 * recirculation. When a packet matching flow ‘j’ happens to be
5437 * recirculated, the nested call to dp_netdev_input__() could potentially
5438 * classify the packet as matching another flow - say 'k'. It could happen
5439 * that in the previous call to dp_netdev_input__() that same flow 'k' had
5440 * already its own batches[k] still waiting to be served. So if its
5441 * ‘batch’ member is not reset, the recirculated packet would be wrongly
5442 * appended to batches[k] of the 1st call to dp_netdev_input__(). */
5443 size_t i;
5444 for (i = 0; i < n_batches; i++) {
5445 batches[i].flow->batch = NULL;
5446 }
5447
5448 for (i = 0; i < n_batches; i++) {
5449 packet_batch_per_flow_execute(&batches[i], pmd);
5450 }
5451 }
5452
5453 static void
5454 dp_netdev_input(struct dp_netdev_pmd_thread *pmd,
5455 struct dp_packet_batch *packets,
5456 odp_port_t port_no)
5457 {
5458 dp_netdev_input__(pmd, packets, false, port_no);
5459 }
5460
5461 static void
5462 dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
5463 struct dp_packet_batch *packets)
5464 {
5465 dp_netdev_input__(pmd, packets, true, 0);
5466 }
5467
5468 struct dp_netdev_execute_aux {
5469 struct dp_netdev_pmd_thread *pmd;
5470 const struct flow *flow;
5471 };
5472
5473 static void
5474 dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb,
5475 void *aux)
5476 {
5477 struct dp_netdev *dp = get_dp_netdev(dpif);
5478 dp->dp_purge_aux = aux;
5479 dp->dp_purge_cb = cb;
5480 }
5481
5482 static void
5483 dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb,
5484 void *aux)
5485 {
5486 struct dp_netdev *dp = get_dp_netdev(dpif);
5487 dp->upcall_aux = aux;
5488 dp->upcall_cb = cb;
5489 }
5490
5491 static void
5492 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
5493 bool purge)
5494 {
5495 struct tx_port *tx;
5496 struct dp_netdev_port *port;
5497 long long interval;
5498
5499 HMAP_FOR_EACH (tx, node, &pmd->send_port_cache) {
5500 if (!tx->port->dynamic_txqs) {
5501 continue;
5502 }
5503 interval = pmd->ctx.now - tx->last_used;
5504 if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT)) {
5505 port = tx->port;
5506 ovs_mutex_lock(&port->txq_used_mutex);
5507 port->txq_used[tx->qid]--;
5508 ovs_mutex_unlock(&port->txq_used_mutex);
5509 tx->qid = -1;
5510 }
5511 }
5512 }
5513
5514 static int
5515 dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
5516 struct tx_port *tx)
5517 {
5518 struct dp_netdev_port *port;
5519 long long interval;
5520 int i, min_cnt, min_qid;
5521
5522 interval = pmd->ctx.now - tx->last_used;
5523 tx->last_used = pmd->ctx.now;
5524
5525 if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT)) {
5526 return tx->qid;
5527 }
5528
5529 port = tx->port;
5530
5531 ovs_mutex_lock(&port->txq_used_mutex);
5532 if (tx->qid >= 0) {
5533 port->txq_used[tx->qid]--;
5534 tx->qid = -1;
5535 }
5536
5537 min_cnt = -1;
5538 min_qid = 0;
5539 for (i = 0; i < netdev_n_txq(port->netdev); i++) {
5540 if (port->txq_used[i] < min_cnt || min_cnt == -1) {
5541 min_cnt = port->txq_used[i];
5542 min_qid = i;
5543 }
5544 }
5545
5546 port->txq_used[min_qid]++;
5547 tx->qid = min_qid;
5548
5549 ovs_mutex_unlock(&port->txq_used_mutex);
5550
5551 dpif_netdev_xps_revalidate_pmd(pmd, false);
5552
5553 VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
5554 pmd->core_id, tx->qid, netdev_get_name(tx->port->netdev));
5555 return min_qid;
5556 }
5557
5558 static struct tx_port *
5559 pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
5560 odp_port_t port_no)
5561 {
5562 return tx_port_lookup(&pmd->tnl_port_cache, port_no);
5563 }
5564
5565 static struct tx_port *
5566 pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
5567 odp_port_t port_no)
5568 {
5569 return tx_port_lookup(&pmd->send_port_cache, port_no);
5570 }
5571
5572 static int
5573 push_tnl_action(const struct dp_netdev_pmd_thread *pmd,
5574 const struct nlattr *attr,
5575 struct dp_packet_batch *batch)
5576 {
5577 struct tx_port *tun_port;
5578 const struct ovs_action_push_tnl *data;
5579 int err;
5580
5581 data = nl_attr_get(attr);
5582
5583 tun_port = pmd_tnl_port_cache_lookup(pmd, data->tnl_port);
5584 if (!tun_port) {
5585 err = -EINVAL;
5586 goto error;
5587 }
5588 err = netdev_push_header(tun_port->port->netdev, batch, data);
5589 if (!err) {
5590 return 0;
5591 }
5592 error:
5593 dp_packet_delete_batch(batch, true);
5594 return err;
5595 }
5596
5597 static void
5598 dp_execute_userspace_action(struct dp_netdev_pmd_thread *pmd,
5599 struct dp_packet *packet, bool should_steal,
5600 struct flow *flow, ovs_u128 *ufid,
5601 struct ofpbuf *actions,
5602 const struct nlattr *userdata)
5603 {
5604 struct dp_packet_batch b;
5605 int error;
5606
5607 ofpbuf_clear(actions);
5608
5609 error = dp_netdev_upcall(pmd, packet, flow, NULL, ufid,
5610 DPIF_UC_ACTION, userdata, actions,
5611 NULL);
5612 if (!error || error == ENOSPC) {
5613 dp_packet_batch_init_packet(&b, packet);
5614 dp_netdev_execute_actions(pmd, &b, should_steal, flow,
5615 actions->data, actions->size);
5616 } else if (should_steal) {
5617 dp_packet_delete(packet);
5618 }
5619 }
5620
5621 static void
5622 dp_execute_cb(void *aux_, struct dp_packet_batch *packets_,
5623 const struct nlattr *a, bool should_steal)
5624 OVS_NO_THREAD_SAFETY_ANALYSIS
5625 {
5626 struct dp_netdev_execute_aux *aux = aux_;
5627 uint32_t *depth = recirc_depth_get();
5628 struct dp_netdev_pmd_thread *pmd = aux->pmd;
5629 struct dp_netdev *dp = pmd->dp;
5630 int type = nl_attr_type(a);
5631 struct tx_port *p;
5632
5633 switch ((enum ovs_action_attr)type) {
5634 case OVS_ACTION_ATTR_OUTPUT:
5635 p = pmd_send_port_cache_lookup(pmd, nl_attr_get_odp_port(a));
5636 if (OVS_LIKELY(p)) {
5637 struct dp_packet *packet;
5638 struct dp_packet_batch out;
5639
5640 if (!should_steal) {
5641 dp_packet_batch_clone(&out, packets_);
5642 dp_packet_batch_reset_cutlen(packets_);
5643 packets_ = &out;
5644 }
5645 dp_packet_batch_apply_cutlen(packets_);
5646
5647 #ifdef DPDK_NETDEV
5648 if (OVS_UNLIKELY(!dp_packet_batch_is_empty(&p->output_pkts)
5649 && packets_->packets[0]->source
5650 != p->output_pkts.packets[0]->source)) {
5651 /* XXX: netdev-dpdk assumes that all packets in a single
5652 * output batch has the same source. Flush here to
5653 * avoid memory access issues. */
5654 dp_netdev_pmd_flush_output_on_port(pmd, p);
5655 }
5656 #endif
5657 if (dp_packet_batch_size(&p->output_pkts)
5658 + dp_packet_batch_size(packets_) > NETDEV_MAX_BURST) {
5659 /* Flush here to avoid overflow. */
5660 dp_netdev_pmd_flush_output_on_port(pmd, p);
5661 }
5662
5663 if (dp_packet_batch_is_empty(&p->output_pkts)) {
5664 pmd->n_output_batches++;
5665 }
5666
5667 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
5668 p->output_pkts_rxqs[dp_packet_batch_size(&p->output_pkts)] =
5669 pmd->ctx.last_rxq;
5670 dp_packet_batch_add(&p->output_pkts, packet);
5671 }
5672 return;
5673 }
5674 break;
5675
5676 case OVS_ACTION_ATTR_TUNNEL_PUSH:
5677 if (should_steal) {
5678 /* We're requested to push tunnel header, but also we need to take
5679 * the ownership of these packets. Thus, we can avoid performing
5680 * the action, because the caller will not use the result anyway.
5681 * Just break to free the batch. */
5682 break;
5683 }
5684 dp_packet_batch_apply_cutlen(packets_);
5685 push_tnl_action(pmd, a, packets_);
5686 return;
5687
5688 case OVS_ACTION_ATTR_TUNNEL_POP:
5689 if (*depth < MAX_RECIRC_DEPTH) {
5690 struct dp_packet_batch *orig_packets_ = packets_;
5691 odp_port_t portno = nl_attr_get_odp_port(a);
5692
5693 p = pmd_tnl_port_cache_lookup(pmd, portno);
5694 if (p) {
5695 struct dp_packet_batch tnl_pkt;
5696
5697 if (!should_steal) {
5698 dp_packet_batch_clone(&tnl_pkt, packets_);
5699 packets_ = &tnl_pkt;
5700 dp_packet_batch_reset_cutlen(orig_packets_);
5701 }
5702
5703 dp_packet_batch_apply_cutlen(packets_);
5704
5705 netdev_pop_header(p->port->netdev, packets_);
5706 if (dp_packet_batch_is_empty(packets_)) {
5707 return;
5708 }
5709
5710 struct dp_packet *packet;
5711 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
5712 packet->md.in_port.odp_port = portno;
5713 }
5714
5715 (*depth)++;
5716 dp_netdev_recirculate(pmd, packets_);
5717 (*depth)--;
5718 return;
5719 }
5720 }
5721 break;
5722
5723 case OVS_ACTION_ATTR_USERSPACE:
5724 if (!fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
5725 struct dp_packet_batch *orig_packets_ = packets_;
5726 const struct nlattr *userdata;
5727 struct dp_packet_batch usr_pkt;
5728 struct ofpbuf actions;
5729 struct flow flow;
5730 ovs_u128 ufid;
5731 bool clone = false;
5732
5733 userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
5734 ofpbuf_init(&actions, 0);
5735
5736 if (packets_->trunc) {
5737 if (!should_steal) {
5738 dp_packet_batch_clone(&usr_pkt, packets_);
5739 packets_ = &usr_pkt;
5740 clone = true;
5741 dp_packet_batch_reset_cutlen(orig_packets_);
5742 }
5743
5744 dp_packet_batch_apply_cutlen(packets_);
5745 }
5746
5747 struct dp_packet *packet;
5748 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
5749 flow_extract(packet, &flow);
5750 dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid);
5751 dp_execute_userspace_action(pmd, packet, should_steal, &flow,
5752 &ufid, &actions, userdata);
5753 }
5754
5755 if (clone) {
5756 dp_packet_delete_batch(packets_, true);
5757 }
5758
5759 ofpbuf_uninit(&actions);
5760 fat_rwlock_unlock(&dp->upcall_rwlock);
5761
5762 return;
5763 }
5764 break;
5765
5766 case OVS_ACTION_ATTR_RECIRC:
5767 if (*depth < MAX_RECIRC_DEPTH) {
5768 struct dp_packet_batch recirc_pkts;
5769
5770 if (!should_steal) {
5771 dp_packet_batch_clone(&recirc_pkts, packets_);
5772 packets_ = &recirc_pkts;
5773 }
5774
5775 struct dp_packet *packet;
5776 DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) {
5777 packet->md.recirc_id = nl_attr_get_u32(a);
5778 }
5779
5780 (*depth)++;
5781 dp_netdev_recirculate(pmd, packets_);
5782 (*depth)--;
5783
5784 return;
5785 }
5786
5787 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
5788 break;
5789
5790 case OVS_ACTION_ATTR_CT: {
5791 const struct nlattr *b;
5792 bool force = false;
5793 bool commit = false;
5794 unsigned int left;
5795 uint16_t zone = 0;
5796 const char *helper = NULL;
5797 const uint32_t *setmark = NULL;
5798 const struct ovs_key_ct_labels *setlabel = NULL;
5799 struct nat_action_info_t nat_action_info;
5800 struct nat_action_info_t *nat_action_info_ref = NULL;
5801 bool nat_config = false;
5802
5803 NL_ATTR_FOR_EACH_UNSAFE (b, left, nl_attr_get(a),
5804 nl_attr_get_size(a)) {
5805 enum ovs_ct_attr sub_type = nl_attr_type(b);
5806
5807 switch(sub_type) {
5808 case OVS_CT_ATTR_FORCE_COMMIT:
5809 force = true;
5810 /* fall through. */
5811 case OVS_CT_ATTR_COMMIT:
5812 commit = true;
5813 break;
5814 case OVS_CT_ATTR_ZONE:
5815 zone = nl_attr_get_u16(b);
5816 break;
5817 case OVS_CT_ATTR_HELPER:
5818 helper = nl_attr_get_string(b);
5819 break;
5820 case OVS_CT_ATTR_MARK:
5821 setmark = nl_attr_get(b);
5822 break;
5823 case OVS_CT_ATTR_LABELS:
5824 setlabel = nl_attr_get(b);
5825 break;
5826 case OVS_CT_ATTR_EVENTMASK:
5827 /* Silently ignored, as userspace datapath does not generate
5828 * netlink events. */
5829 break;
5830 case OVS_CT_ATTR_NAT: {
5831 const struct nlattr *b_nest;
5832 unsigned int left_nest;
5833 bool ip_min_specified = false;
5834 bool proto_num_min_specified = false;
5835 bool ip_max_specified = false;
5836 bool proto_num_max_specified = false;
5837 memset(&nat_action_info, 0, sizeof nat_action_info);
5838 nat_action_info_ref = &nat_action_info;
5839
5840 NL_NESTED_FOR_EACH_UNSAFE (b_nest, left_nest, b) {
5841 enum ovs_nat_attr sub_type_nest = nl_attr_type(b_nest);
5842
5843 switch (sub_type_nest) {
5844 case OVS_NAT_ATTR_SRC:
5845 case OVS_NAT_ATTR_DST:
5846 nat_config = true;
5847 nat_action_info.nat_action |=
5848 ((sub_type_nest == OVS_NAT_ATTR_SRC)
5849 ? NAT_ACTION_SRC : NAT_ACTION_DST);
5850 break;
5851 case OVS_NAT_ATTR_IP_MIN:
5852 memcpy(&nat_action_info.min_addr,
5853 nl_attr_get(b_nest),
5854 nl_attr_get_size(b_nest));
5855 ip_min_specified = true;
5856 break;
5857 case OVS_NAT_ATTR_IP_MAX:
5858 memcpy(&nat_action_info.max_addr,
5859 nl_attr_get(b_nest),
5860 nl_attr_get_size(b_nest));
5861 ip_max_specified = true;
5862 break;
5863 case OVS_NAT_ATTR_PROTO_MIN:
5864 nat_action_info.min_port =
5865 nl_attr_get_u16(b_nest);
5866 proto_num_min_specified = true;
5867 break;
5868 case OVS_NAT_ATTR_PROTO_MAX:
5869 nat_action_info.max_port =
5870 nl_attr_get_u16(b_nest);
5871 proto_num_max_specified = true;
5872 break;
5873 case OVS_NAT_ATTR_PERSISTENT:
5874 case OVS_NAT_ATTR_PROTO_HASH:
5875 case OVS_NAT_ATTR_PROTO_RANDOM:
5876 break;
5877 case OVS_NAT_ATTR_UNSPEC:
5878 case __OVS_NAT_ATTR_MAX:
5879 OVS_NOT_REACHED();
5880 }
5881 }
5882
5883 if (ip_min_specified && !ip_max_specified) {
5884 nat_action_info.max_addr = nat_action_info.min_addr;
5885 }
5886 if (proto_num_min_specified && !proto_num_max_specified) {
5887 nat_action_info.max_port = nat_action_info.min_port;
5888 }
5889 if (proto_num_min_specified || proto_num_max_specified) {
5890 if (nat_action_info.nat_action & NAT_ACTION_SRC) {
5891 nat_action_info.nat_action |= NAT_ACTION_SRC_PORT;
5892 } else if (nat_action_info.nat_action & NAT_ACTION_DST) {
5893 nat_action_info.nat_action |= NAT_ACTION_DST_PORT;
5894 }
5895 }
5896 break;
5897 }
5898 case OVS_CT_ATTR_UNSPEC:
5899 case __OVS_CT_ATTR_MAX:
5900 OVS_NOT_REACHED();
5901 }
5902 }
5903
5904 /* We won't be able to function properly in this case, hence
5905 * complain loudly. */
5906 if (nat_config && !commit) {
5907 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
5908 VLOG_WARN_RL(&rl, "NAT specified without commit.");
5909 }
5910
5911 conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type, force,
5912 commit, zone, setmark, setlabel, aux->flow->tp_src,
5913 aux->flow->tp_dst, helper, nat_action_info_ref,
5914 pmd->ctx.now / 1000);
5915 break;
5916 }
5917
5918 case OVS_ACTION_ATTR_METER:
5919 dp_netdev_run_meter(pmd->dp, packets_, nl_attr_get_u32(a),
5920 pmd->ctx.now);
5921 break;
5922
5923 case OVS_ACTION_ATTR_PUSH_VLAN:
5924 case OVS_ACTION_ATTR_POP_VLAN:
5925 case OVS_ACTION_ATTR_PUSH_MPLS:
5926 case OVS_ACTION_ATTR_POP_MPLS:
5927 case OVS_ACTION_ATTR_SET:
5928 case OVS_ACTION_ATTR_SET_MASKED:
5929 case OVS_ACTION_ATTR_SAMPLE:
5930 case OVS_ACTION_ATTR_HASH:
5931 case OVS_ACTION_ATTR_UNSPEC:
5932 case OVS_ACTION_ATTR_TRUNC:
5933 case OVS_ACTION_ATTR_PUSH_ETH:
5934 case OVS_ACTION_ATTR_POP_ETH:
5935 case OVS_ACTION_ATTR_CLONE:
5936 case OVS_ACTION_ATTR_PUSH_NSH:
5937 case OVS_ACTION_ATTR_POP_NSH:
5938 case OVS_ACTION_ATTR_CT_CLEAR:
5939 case __OVS_ACTION_ATTR_MAX:
5940 OVS_NOT_REACHED();
5941 }
5942
5943 dp_packet_delete_batch(packets_, should_steal);
5944 }
5945
5946 static void
5947 dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
5948 struct dp_packet_batch *packets,
5949 bool should_steal, const struct flow *flow,
5950 const struct nlattr *actions, size_t actions_len)
5951 {
5952 struct dp_netdev_execute_aux aux = { pmd, flow };
5953
5954 odp_execute_actions(&aux, packets, should_steal, actions,
5955 actions_len, dp_execute_cb);
5956 }
5957
5958 struct dp_netdev_ct_dump {
5959 struct ct_dpif_dump_state up;
5960 struct conntrack_dump dump;
5961 struct conntrack *ct;
5962 struct dp_netdev *dp;
5963 };
5964
5965 static int
5966 dpif_netdev_ct_dump_start(struct dpif *dpif, struct ct_dpif_dump_state **dump_,
5967 const uint16_t *pzone, int *ptot_bkts)
5968 {
5969 struct dp_netdev *dp = get_dp_netdev(dpif);
5970 struct dp_netdev_ct_dump *dump;
5971
5972 dump = xzalloc(sizeof *dump);
5973 dump->dp = dp;
5974 dump->ct = &dp->conntrack;
5975
5976 conntrack_dump_start(&dp->conntrack, &dump->dump, pzone, ptot_bkts);
5977
5978 *dump_ = &dump->up;
5979
5980 return 0;
5981 }
5982
5983 static int
5984 dpif_netdev_ct_dump_next(struct dpif *dpif OVS_UNUSED,
5985 struct ct_dpif_dump_state *dump_,
5986 struct ct_dpif_entry *entry)
5987 {
5988 struct dp_netdev_ct_dump *dump;
5989
5990 INIT_CONTAINER(dump, dump_, up);
5991
5992 return conntrack_dump_next(&dump->dump, entry);
5993 }
5994
5995 static int
5996 dpif_netdev_ct_dump_done(struct dpif *dpif OVS_UNUSED,
5997 struct ct_dpif_dump_state *dump_)
5998 {
5999 struct dp_netdev_ct_dump *dump;
6000 int err;
6001
6002 INIT_CONTAINER(dump, dump_, up);
6003
6004 err = conntrack_dump_done(&dump->dump);
6005
6006 free(dump);
6007
6008 return err;
6009 }
6010
6011 static int
6012 dpif_netdev_ct_flush(struct dpif *dpif, const uint16_t *zone,
6013 const struct ct_dpif_tuple *tuple)
6014 {
6015 struct dp_netdev *dp = get_dp_netdev(dpif);
6016
6017 if (tuple) {
6018 return conntrack_flush_tuple(&dp->conntrack, tuple, zone ? *zone : 0);
6019 }
6020 return conntrack_flush(&dp->conntrack, zone);
6021 }
6022
6023 static int
6024 dpif_netdev_ct_set_maxconns(struct dpif *dpif, uint32_t maxconns)
6025 {
6026 struct dp_netdev *dp = get_dp_netdev(dpif);
6027
6028 return conntrack_set_maxconns(&dp->conntrack, maxconns);
6029 }
6030
6031 static int
6032 dpif_netdev_ct_get_maxconns(struct dpif *dpif, uint32_t *maxconns)
6033 {
6034 struct dp_netdev *dp = get_dp_netdev(dpif);
6035
6036 return conntrack_get_maxconns(&dp->conntrack, maxconns);
6037 }
6038
6039 static int
6040 dpif_netdev_ct_get_nconns(struct dpif *dpif, uint32_t *nconns)
6041 {
6042 struct dp_netdev *dp = get_dp_netdev(dpif);
6043
6044 return conntrack_get_nconns(&dp->conntrack, nconns);
6045 }
6046
6047 const struct dpif_class dpif_netdev_class = {
6048 "netdev",
6049 dpif_netdev_init,
6050 dpif_netdev_enumerate,
6051 dpif_netdev_port_open_type,
6052 dpif_netdev_open,
6053 dpif_netdev_close,
6054 dpif_netdev_destroy,
6055 dpif_netdev_run,
6056 dpif_netdev_wait,
6057 dpif_netdev_get_stats,
6058 dpif_netdev_port_add,
6059 dpif_netdev_port_del,
6060 dpif_netdev_port_set_config,
6061 dpif_netdev_port_query_by_number,
6062 dpif_netdev_port_query_by_name,
6063 NULL, /* port_get_pid */
6064 dpif_netdev_port_dump_start,
6065 dpif_netdev_port_dump_next,
6066 dpif_netdev_port_dump_done,
6067 dpif_netdev_port_poll,
6068 dpif_netdev_port_poll_wait,
6069 dpif_netdev_flow_flush,
6070 dpif_netdev_flow_dump_create,
6071 dpif_netdev_flow_dump_destroy,
6072 dpif_netdev_flow_dump_thread_create,
6073 dpif_netdev_flow_dump_thread_destroy,
6074 dpif_netdev_flow_dump_next,
6075 dpif_netdev_operate,
6076 NULL, /* recv_set */
6077 NULL, /* handlers_set */
6078 dpif_netdev_set_config,
6079 dpif_netdev_queue_to_priority,
6080 NULL, /* recv */
6081 NULL, /* recv_wait */
6082 NULL, /* recv_purge */
6083 dpif_netdev_register_dp_purge_cb,
6084 dpif_netdev_register_upcall_cb,
6085 dpif_netdev_enable_upcall,
6086 dpif_netdev_disable_upcall,
6087 dpif_netdev_get_datapath_version,
6088 dpif_netdev_ct_dump_start,
6089 dpif_netdev_ct_dump_next,
6090 dpif_netdev_ct_dump_done,
6091 dpif_netdev_ct_flush,
6092 dpif_netdev_ct_set_maxconns,
6093 dpif_netdev_ct_get_maxconns,
6094 dpif_netdev_ct_get_nconns,
6095 dpif_netdev_meter_get_features,
6096 dpif_netdev_meter_set,
6097 dpif_netdev_meter_get,
6098 dpif_netdev_meter_del,
6099 };
6100
6101 static void
6102 dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
6103 const char *argv[], void *aux OVS_UNUSED)
6104 {
6105 struct dp_netdev_port *port;
6106 struct dp_netdev *dp;
6107 odp_port_t port_no;
6108
6109 ovs_mutex_lock(&dp_netdev_mutex);
6110 dp = shash_find_data(&dp_netdevs, argv[1]);
6111 if (!dp || !dpif_netdev_class_is_dummy(dp->class)) {
6112 ovs_mutex_unlock(&dp_netdev_mutex);
6113 unixctl_command_reply_error(conn, "unknown datapath or not a dummy");
6114 return;
6115 }
6116 ovs_refcount_ref(&dp->ref_cnt);
6117 ovs_mutex_unlock(&dp_netdev_mutex);
6118
6119 ovs_mutex_lock(&dp->port_mutex);
6120 if (get_port_by_name(dp, argv[2], &port)) {
6121 unixctl_command_reply_error(conn, "unknown port");
6122 goto exit;
6123 }
6124
6125 port_no = u32_to_odp(atoi(argv[3]));
6126 if (!port_no || port_no == ODPP_NONE) {
6127 unixctl_command_reply_error(conn, "bad port number");
6128 goto exit;
6129 }
6130 if (dp_netdev_lookup_port(dp, port_no)) {
6131 unixctl_command_reply_error(conn, "port number already in use");
6132 goto exit;
6133 }
6134
6135 /* Remove port. */
6136 hmap_remove(&dp->ports, &port->node);
6137 reconfigure_datapath(dp);
6138
6139 /* Reinsert with new port number. */
6140 port->port_no = port_no;
6141 hmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
6142 reconfigure_datapath(dp);
6143
6144 seq_change(dp->port_seq);
6145 unixctl_command_reply(conn, NULL);
6146
6147 exit:
6148 ovs_mutex_unlock(&dp->port_mutex);
6149 dp_netdev_unref(dp);
6150 }
6151
6152 static void
6153 dpif_dummy_register__(const char *type)
6154 {
6155 struct dpif_class *class;
6156
6157 class = xmalloc(sizeof *class);
6158 *class = dpif_netdev_class;
6159 class->type = xstrdup(type);
6160 dp_register_provider(class);
6161 }
6162
6163 static void
6164 dpif_dummy_override(const char *type)
6165 {
6166 int error;
6167
6168 /*
6169 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
6170 * a userland-only build. It's useful for testsuite.
6171 */
6172 error = dp_unregister_provider(type);
6173 if (error == 0 || error == EAFNOSUPPORT) {
6174 dpif_dummy_register__(type);
6175 }
6176 }
6177
6178 void
6179 dpif_dummy_register(enum dummy_level level)
6180 {
6181 if (level == DUMMY_OVERRIDE_ALL) {
6182 struct sset types;
6183 const char *type;
6184
6185 sset_init(&types);
6186 dp_enumerate_types(&types);
6187 SSET_FOR_EACH (type, &types) {
6188 dpif_dummy_override(type);
6189 }
6190 sset_destroy(&types);
6191 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
6192 dpif_dummy_override("system");
6193 }
6194
6195 dpif_dummy_register__("dummy");
6196
6197 unixctl_command_register("dpif-dummy/change-port-number",
6198 "dp port new-number",
6199 3, 3, dpif_dummy_change_port_number, NULL);
6200 }
6201 \f
6202 /* Datapath Classifier. */
6203
6204 /* A set of rules that all have the same fields wildcarded. */
6205 struct dpcls_subtable {
6206 /* The fields are only used by writers. */
6207 struct cmap_node cmap_node OVS_GUARDED; /* Within dpcls 'subtables_map'. */
6208
6209 /* These fields are accessed by readers. */
6210 struct cmap rules; /* Contains "struct dpcls_rule"s. */
6211 uint32_t hit_cnt; /* Number of match hits in subtable in current
6212 optimization interval. */
6213 struct netdev_flow_key mask; /* Wildcards for fields (const). */
6214 /* 'mask' must be the last field, additional space is allocated here. */
6215 };
6216
6217 /* Initializes 'cls' as a classifier that initially contains no classification
6218 * rules. */
6219 static void
6220 dpcls_init(struct dpcls *cls)
6221 {
6222 cmap_init(&cls->subtables_map);
6223 pvector_init(&cls->subtables);
6224 }
6225
6226 static void
6227 dpcls_destroy_subtable(struct dpcls *cls, struct dpcls_subtable *subtable)
6228 {
6229 VLOG_DBG("Destroying subtable %p for in_port %d", subtable, cls->in_port);
6230 pvector_remove(&cls->subtables, subtable);
6231 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
6232 subtable->mask.hash);
6233 cmap_destroy(&subtable->rules);
6234 ovsrcu_postpone(free, subtable);
6235 }
6236
6237 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
6238 * caller's responsibility.
6239 * May only be called after all the readers have been terminated. */
6240 static void
6241 dpcls_destroy(struct dpcls *cls)
6242 {
6243 if (cls) {
6244 struct dpcls_subtable *subtable;
6245
6246 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
6247 ovs_assert(cmap_count(&subtable->rules) == 0);
6248 dpcls_destroy_subtable(cls, subtable);
6249 }
6250 cmap_destroy(&cls->subtables_map);
6251 pvector_destroy(&cls->subtables);
6252 }
6253 }
6254
6255 static struct dpcls_subtable *
6256 dpcls_create_subtable(struct dpcls *cls, const struct netdev_flow_key *mask)
6257 {
6258 struct dpcls_subtable *subtable;
6259
6260 /* Need to add one. */
6261 subtable = xmalloc(sizeof *subtable
6262 - sizeof subtable->mask.mf + mask->len);
6263 cmap_init(&subtable->rules);
6264 subtable->hit_cnt = 0;
6265 netdev_flow_key_clone(&subtable->mask, mask);
6266 cmap_insert(&cls->subtables_map, &subtable->cmap_node, mask->hash);
6267 /* Add the new subtable at the end of the pvector (with no hits yet) */
6268 pvector_insert(&cls->subtables, subtable, 0);
6269 VLOG_DBG("Creating %"PRIuSIZE". subtable %p for in_port %d",
6270 cmap_count(&cls->subtables_map), subtable, cls->in_port);
6271 pvector_publish(&cls->subtables);
6272
6273 return subtable;
6274 }
6275
6276 static inline struct dpcls_subtable *
6277 dpcls_find_subtable(struct dpcls *cls, const struct netdev_flow_key *mask)
6278 {
6279 struct dpcls_subtable *subtable;
6280
6281 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, mask->hash,
6282 &cls->subtables_map) {
6283 if (netdev_flow_key_equal(&subtable->mask, mask)) {
6284 return subtable;
6285 }
6286 }
6287 return dpcls_create_subtable(cls, mask);
6288 }
6289
6290
6291 /* Periodically sort the dpcls subtable vectors according to hit counts */
6292 static void
6293 dpcls_sort_subtable_vector(struct dpcls *cls)
6294 {
6295 struct pvector *pvec = &cls->subtables;
6296 struct dpcls_subtable *subtable;
6297
6298 PVECTOR_FOR_EACH (subtable, pvec) {
6299 pvector_change_priority(pvec, subtable, subtable->hit_cnt);
6300 subtable->hit_cnt = 0;
6301 }
6302 pvector_publish(pvec);
6303 }
6304
6305 static inline void
6306 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
6307 struct polled_queue *poll_list, int poll_cnt)
6308 {
6309 struct dpcls *cls;
6310
6311 if (pmd->ctx.now > pmd->rxq_next_cycle_store) {
6312 uint64_t curr_tsc;
6313 /* Get the cycles that were used to process each queue and store. */
6314 for (unsigned i = 0; i < poll_cnt; i++) {
6315 uint64_t rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
6316 RXQ_CYCLES_PROC_CURR);
6317 dp_netdev_rxq_set_intrvl_cycles(poll_list[i].rxq, rxq_cyc_curr);
6318 dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR,
6319 0);
6320 }
6321 curr_tsc = cycles_counter_update(&pmd->perf_stats);
6322 if (pmd->intrvl_tsc_prev) {
6323 /* There is a prev timestamp, store a new intrvl cycle count. */
6324 atomic_store_relaxed(&pmd->intrvl_cycles,
6325 curr_tsc - pmd->intrvl_tsc_prev);
6326 }
6327 pmd->intrvl_tsc_prev = curr_tsc;
6328 /* Start new measuring interval */
6329 pmd->rxq_next_cycle_store = pmd->ctx.now + PMD_RXQ_INTERVAL_LEN;
6330 }
6331
6332 if (pmd->ctx.now > pmd->next_optimization) {
6333 /* Try to obtain the flow lock to block out revalidator threads.
6334 * If not possible, just try next time. */
6335 if (!ovs_mutex_trylock(&pmd->flow_mutex)) {
6336 /* Optimize each classifier */
6337 CMAP_FOR_EACH (cls, node, &pmd->classifiers) {
6338 dpcls_sort_subtable_vector(cls);
6339 }
6340 ovs_mutex_unlock(&pmd->flow_mutex);
6341 /* Start new measuring interval */
6342 pmd->next_optimization = pmd->ctx.now
6343 + DPCLS_OPTIMIZATION_INTERVAL;
6344 }
6345 }
6346 }
6347
6348 /* Insert 'rule' into 'cls'. */
6349 static void
6350 dpcls_insert(struct dpcls *cls, struct dpcls_rule *rule,
6351 const struct netdev_flow_key *mask)
6352 {
6353 struct dpcls_subtable *subtable = dpcls_find_subtable(cls, mask);
6354
6355 /* Refer to subtable's mask, also for later removal. */
6356 rule->mask = &subtable->mask;
6357 cmap_insert(&subtable->rules, &rule->cmap_node, rule->flow.hash);
6358 }
6359
6360 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
6361 static void
6362 dpcls_remove(struct dpcls *cls, struct dpcls_rule *rule)
6363 {
6364 struct dpcls_subtable *subtable;
6365
6366 ovs_assert(rule->mask);
6367
6368 /* Get subtable from reference in rule->mask. */
6369 INIT_CONTAINER(subtable, rule->mask, mask);
6370 if (cmap_remove(&subtable->rules, &rule->cmap_node, rule->flow.hash)
6371 == 0) {
6372 /* Delete empty subtable. */
6373 dpcls_destroy_subtable(cls, subtable);
6374 pvector_publish(&cls->subtables);
6375 }
6376 }
6377
6378 /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
6379 * in 'mask' the values in 'key' and 'target' are the same. */
6380 static inline bool
6381 dpcls_rule_matches_key(const struct dpcls_rule *rule,
6382 const struct netdev_flow_key *target)
6383 {
6384 const uint64_t *keyp = miniflow_get_values(&rule->flow.mf);
6385 const uint64_t *maskp = miniflow_get_values(&rule->mask->mf);
6386 uint64_t value;
6387
6388 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) {
6389 if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) {
6390 return false;
6391 }
6392 }
6393 return true;
6394 }
6395
6396 /* For each miniflow in 'keys' performs a classifier lookup writing the result
6397 * into the corresponding slot in 'rules'. If a particular entry in 'keys' is
6398 * NULL it is skipped.
6399 *
6400 * This function is optimized for use in the userspace datapath and therefore
6401 * does not implement a lot of features available in the standard
6402 * classifier_lookup() function. Specifically, it does not implement
6403 * priorities, instead returning any rule which matches the flow.
6404 *
6405 * Returns true if all miniflows found a corresponding rule. */
6406 static bool
6407 dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[],
6408 struct dpcls_rule **rules, const size_t cnt,
6409 int *num_lookups_p)
6410 {
6411 /* The received 'cnt' miniflows are the search-keys that will be processed
6412 * to find a matching entry into the available subtables.
6413 * The number of bits in map_type is equal to NETDEV_MAX_BURST. */
6414 typedef uint32_t map_type;
6415 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
6416 BUILD_ASSERT_DECL(MAP_BITS >= NETDEV_MAX_BURST);
6417
6418 struct dpcls_subtable *subtable;
6419
6420 map_type keys_map = TYPE_MAXIMUM(map_type); /* Set all bits. */
6421 map_type found_map;
6422 uint32_t hashes[MAP_BITS];
6423 const struct cmap_node *nodes[MAP_BITS];
6424
6425 if (cnt != MAP_BITS) {
6426 keys_map >>= MAP_BITS - cnt; /* Clear extra bits. */
6427 }
6428 memset(rules, 0, cnt * sizeof *rules);
6429
6430 int lookups_match = 0, subtable_pos = 1;
6431
6432 /* The Datapath classifier - aka dpcls - is composed of subtables.
6433 * Subtables are dynamically created as needed when new rules are inserted.
6434 * Each subtable collects rules with matches on a specific subset of packet
6435 * fields as defined by the subtable's mask. We proceed to process every
6436 * search-key against each subtable, but when a match is found for a
6437 * search-key, the search for that key can stop because the rules are
6438 * non-overlapping. */
6439 PVECTOR_FOR_EACH (subtable, &cls->subtables) {
6440 int i;
6441
6442 /* Compute hashes for the remaining keys. Each search-key is
6443 * masked with the subtable's mask to avoid hashing the wildcarded
6444 * bits. */
6445 ULLONG_FOR_EACH_1(i, keys_map) {
6446 hashes[i] = netdev_flow_key_hash_in_mask(&keys[i],
6447 &subtable->mask);
6448 }
6449 /* Lookup. */
6450 found_map = cmap_find_batch(&subtable->rules, keys_map, hashes, nodes);
6451 /* Check results. When the i-th bit of found_map is set, it means
6452 * that a set of nodes with a matching hash value was found for the
6453 * i-th search-key. Due to possible hash collisions we need to check
6454 * which of the found rules, if any, really matches our masked
6455 * search-key. */
6456 ULLONG_FOR_EACH_1(i, found_map) {
6457 struct dpcls_rule *rule;
6458
6459 CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) {
6460 if (OVS_LIKELY(dpcls_rule_matches_key(rule, &keys[i]))) {
6461 rules[i] = rule;
6462 /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap
6463 * within one second optimization interval. */
6464 subtable->hit_cnt++;
6465 lookups_match += subtable_pos;
6466 goto next;
6467 }
6468 }
6469 /* None of the found rules was a match. Reset the i-th bit to
6470 * keep searching this key in the next subtable. */
6471 ULLONG_SET0(found_map, i); /* Did not match. */
6472 next:
6473 ; /* Keep Sparse happy. */
6474 }
6475 keys_map &= ~found_map; /* Clear the found rules. */
6476 if (!keys_map) {
6477 if (num_lookups_p) {
6478 *num_lookups_p = lookups_match;
6479 }
6480 return true; /* All found. */
6481 }
6482 subtable_pos++;
6483 }
6484 if (num_lookups_p) {
6485 *num_lookups_p = lookups_match;
6486 }
6487 return false; /* Some misses. */
6488 }