]> git.proxmox.com Git - mirror_ovs.git/blame - lib/dpif-netdev.c
ovsdb: Remove break after OVS_NOT_REACHED.
[mirror_ovs.git] / lib / dpif-netdev.c
CommitLineData
72865317 1/*
f582b6df 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
72865317
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
db73f716 18#include "dpif-netdev.h"
72865317 19
72865317
BP
20#include <ctype.h>
21#include <errno.h>
22#include <fcntl.h>
23#include <inttypes.h>
7f3adc00 24#include <net/if.h>
7daedce4 25#include <netinet/in.h>
cdee00fd 26#include <stdint.h>
72865317
BP
27#include <stdlib.h>
28#include <string.h>
29#include <sys/ioctl.h>
7daedce4 30#include <sys/socket.h>
72865317 31#include <sys/stat.h>
72865317
BP
32#include <unistd.h>
33
01961bbd
DDP
34#ifdef DPDK_NETDEV
35#include <rte_cycles.h>
36#endif
37
9f861c91 38#include "bitmap.h"
59e6d833 39#include "cmap.h"
5cf3edb3 40#include "conntrack.h"
7daedce4 41#include "coverage.h"
4d4e68ed 42#include "ct-dpif.h"
72865317 43#include "csum.h"
e14deea0 44#include "dp-packet.h"
614c4892 45#include "dpif.h"
72865317 46#include "dpif-provider.h"
614c4892 47#include "dummy.h"
afae68b1 48#include "fat-rwlock.h"
72865317 49#include "flow.h"
762d146a 50#include "hmapx.h"
140dd699 51#include "id-pool.h"
6c3eee82 52#include "latch.h"
72865317 53#include "netdev.h"
de281153 54#include "netdev-vport.h"
cdee00fd 55#include "netlink.h"
f094af7b 56#include "odp-execute.h"
72865317 57#include "odp-util.h"
25d436fb
BW
58#include "openvswitch/dynamic-string.h"
59#include "openvswitch/list.h"
60#include "openvswitch/match.h"
61#include "openvswitch/ofp-print.h"
3eb67853 62#include "openvswitch/ofp-util.h"
64c96779 63#include "openvswitch/ofpbuf.h"
3eb67853 64#include "openvswitch/shash.h"
25d436fb 65#include "openvswitch/vlog.h"
5a034064 66#include "ovs-numa.h"
61e7deb1 67#include "ovs-rcu.h"
72865317 68#include "packets.h"
fd016ae3 69#include "openvswitch/poll-loop.h"
0de8783a 70#include "pvector.h"
26c6b6cd 71#include "random.h"
d33ed218 72#include "seq.h"
3eb67853 73#include "smap.h"
0cbfe35d 74#include "sset.h"
72865317 75#include "timeval.h"
53902038 76#include "tnl-neigh-cache.h"
7f9b8504 77#include "tnl-ports.h"
74cc3969 78#include "unixctl.h"
72865317 79#include "util.h"
7daedce4 80
d98e6007 81VLOG_DEFINE_THIS_MODULE(dpif_netdev);
72865317 82
8bb113da 83#define FLOW_DUMP_MAX_BATCH 50
adcf00ba
AZ
84/* Use per thread recirc_depth to prevent recirculation loop. */
85#define MAX_RECIRC_DEPTH 5
86DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
e4cfed38 87
72865317 88/* Configuration parameters. */
72865317 89enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
4b27db64
JR
90enum { MAX_METERS = 65536 }; /* Maximum number of meters. */
91enum { MAX_BANDS = 8 }; /* Maximum number of bands / meter. */
92enum { N_METER_LOCKS = 64 }; /* Maximum number of meters. */
72865317 93
8a4e3a85
BP
94/* Protects against changes to 'dp_netdevs'. */
95static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
96
97/* Contains all 'struct dp_netdev's. */
98static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex)
99 = SHASH_INITIALIZER(&dp_netdevs);
100
623540e4 101static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600);
6b31e073 102
5cf3edb3 103#define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
4cddb1f0
DB
104 | CS_INVALID | CS_REPLY_DIR | CS_TRACKED \
105 | CS_SRC_NAT | CS_DST_NAT)
5cf3edb3
DDP
106#define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
107
2494ccd7 108static struct odp_support dp_netdev_support = {
f0fb825a 109 .max_vlan_headers = SIZE_MAX,
2494ccd7
JS
110 .max_mpls_depth = SIZE_MAX,
111 .recirc = true,
5cf3edb3
DDP
112 .ct_state = true,
113 .ct_zone = true,
114 .ct_mark = true,
115 .ct_label = true,
2575df07
JP
116 .ct_state_nat = true,
117 .ct_orig_tuple = true,
118 .ct_orig_tuple6 = true,
2494ccd7
JS
119};
120
79df317f 121/* Stores a miniflow with inline values */
9bbf1c3d 122
9bbf1c3d 123struct netdev_flow_key {
caeb4906
JR
124 uint32_t hash; /* Hash function differs for different users. */
125 uint32_t len; /* Length of the following miniflow (incl. map). */
0de8783a 126 struct miniflow mf;
8fd47924 127 uint64_t buf[FLOW_MAX_PACKET_U64S];
9bbf1c3d
DDP
128};
129
130/* Exact match cache for frequently used flows
131 *
132 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
133 * search its entries for a miniflow that matches exactly the miniflow of the
0de8783a 134 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
9bbf1c3d
DDP
135 *
136 * A cache entry holds a reference to its 'dp_netdev_flow'.
137 *
138 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
139 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
140 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
141 * value is the index of a cache entry where the miniflow could be.
142 *
143 *
144 * Thread-safety
145 * =============
146 *
147 * Each pmd_thread has its own private exact match cache.
148 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
149 */
150
fc82e877 151#define EM_FLOW_HASH_SHIFT 13
9bbf1c3d
DDP
152#define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
153#define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
154#define EM_FLOW_HASH_SEGS 2
155
4c30b246
CL
156/* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */
157#define DEFAULT_EM_FLOW_INSERT_INV_PROB 100
158#define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
159 DEFAULT_EM_FLOW_INSERT_INV_PROB)
160
9bbf1c3d 161struct emc_entry {
9bbf1c3d 162 struct dp_netdev_flow *flow;
0de8783a 163 struct netdev_flow_key key; /* key.hash used for emc hash value. */
9bbf1c3d
DDP
164};
165
166struct emc_cache {
167 struct emc_entry entries[EM_FLOW_HASH_ENTRIES];
67ad54cb 168 int sweep_idx; /* For emc_cache_slow_sweep(). */
9bbf1c3d
DDP
169};
170
171/* Iterate in the exact match cache through every entry that might contain a
172 * miniflow with hash 'HASH'. */
173#define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
174 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
175 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
176 i__ < EM_FLOW_HASH_SEGS; \
177 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
0de8783a
JR
178\f
179/* Simple non-wildcarding single-priority classifier. */
180
3453b4d6
JS
181/* Time in ms between successive optimizations of the dpcls subtable vector */
182#define DPCLS_OPTIMIZATION_INTERVAL 1000
183
4809891b
KT
184/* Time in ms of the interval in which rxq processing cycles used in
185 * rxq to pmd assignments is measured and stored. */
186#define PMD_RXQ_INTERVAL_LEN 10000
187
c59e759f
KT
188/* Number of intervals for which cycles are stored
189 * and used during rxq to pmd assignment. */
190#define PMD_RXQ_INTERVAL_MAX 6
191
0de8783a 192struct dpcls {
3453b4d6
JS
193 struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */
194 odp_port_t in_port;
0de8783a 195 struct cmap subtables_map;
da9cfca6 196 struct pvector subtables;
0de8783a 197};
9bbf1c3d 198
0de8783a
JR
199/* A rule to be inserted to the classifier. */
200struct dpcls_rule {
201 struct cmap_node cmap_node; /* Within struct dpcls_subtable 'rules'. */
202 struct netdev_flow_key *mask; /* Subtable's mask. */
203 struct netdev_flow_key flow; /* Matching key. */
204 /* 'flow' must be the last field, additional space is allocated here. */
205};
206
207static void dpcls_init(struct dpcls *);
208static void dpcls_destroy(struct dpcls *);
3453b4d6 209static void dpcls_sort_subtable_vector(struct dpcls *);
0de8783a
JR
210static void dpcls_insert(struct dpcls *, struct dpcls_rule *,
211 const struct netdev_flow_key *mask);
212static void dpcls_remove(struct dpcls *, struct dpcls_rule *);
3453b4d6 213static bool dpcls_lookup(struct dpcls *cls,
0de8783a 214 const struct netdev_flow_key keys[],
3453b4d6
JS
215 struct dpcls_rule **rules, size_t cnt,
216 int *num_lookups_p);
0de8783a 217\f
4b27db64
JR
218/* Set of supported meter flags */
219#define DP_SUPPORTED_METER_FLAGS_MASK \
220 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
221
222/* Set of supported meter band types */
223#define DP_SUPPORTED_METER_BAND_TYPES \
224 ( 1 << OFPMBT13_DROP )
225
226struct dp_meter_band {
227 struct ofputil_meter_band up; /* type, prec_level, pad, rate, burst_size */
228 uint32_t bucket; /* In 1/1000 packets (for PKTPS), or in bits (for KBPS) */
229 uint64_t packet_count;
230 uint64_t byte_count;
231};
232
233struct dp_meter {
234 uint16_t flags;
235 uint16_t n_bands;
236 uint32_t max_delta_t;
237 uint64_t used;
238 uint64_t packet_count;
239 uint64_t byte_count;
240 struct dp_meter_band bands[];
241};
242
8a4e3a85
BP
243/* Datapath based on the network device interface from netdev.h.
244 *
245 *
246 * Thread-safety
247 * =============
248 *
249 * Some members, marked 'const', are immutable. Accessing other members
250 * requires synchronization, as noted in more detail below.
251 *
252 * Acquisition order is, from outermost to innermost:
253 *
254 * dp_netdev_mutex (global)
59e6d833 255 * port_mutex
d0cca6c3 256 * non_pmd_mutex
8a4e3a85 257 */
72865317 258struct dp_netdev {
8a4e3a85
BP
259 const struct dpif_class *const class;
260 const char *const name;
6b31e073 261 struct dpif *dpif;
6a8267c5
BP
262 struct ovs_refcount ref_cnt;
263 atomic_flag destroyed;
72865317 264
8a4e3a85
BP
265 /* Ports.
266 *
e9985d6a
DDP
267 * Any lookup into 'ports' or any access to the dp_netdev_ports found
268 * through 'ports' requires taking 'port_mutex'. */
59e6d833 269 struct ovs_mutex port_mutex;
e9985d6a 270 struct hmap ports;
d33ed218 271 struct seq *port_seq; /* Incremented whenever a port changes. */
6c3eee82 272
4b27db64
JR
273 /* Meters. */
274 struct ovs_mutex meter_locks[N_METER_LOCKS];
275 struct dp_meter *meters[MAX_METERS]; /* Meter bands. */
4b27db64 276
65dcf3da
BB
277 /* Probability of EMC insertions is a factor of 'emc_insert_min'.*/
278 OVS_ALIGNED_VAR(CACHE_LINE_SIZE) atomic_uint32_t emc_insert_min;
279
6b31e073
RW
280 /* Protects access to ofproto-dpif-upcall interface during revalidator
281 * thread synchronization. */
282 struct fat_rwlock upcall_rwlock;
623540e4
EJ
283 upcall_callback *upcall_cb; /* Callback function for executing upcalls. */
284 void *upcall_aux;
6b31e073 285
e4e74c3a
AW
286 /* Callback function for notifying the purging of dp flows (during
287 * reseting pmd deletion). */
288 dp_purge_callback *dp_purge_cb;
289 void *dp_purge_aux;
290
65f13b50
AW
291 /* Stores all 'struct dp_netdev_pmd_thread's. */
292 struct cmap poll_threads;
140dd699
IM
293 /* id pool for per thread static_tx_qid. */
294 struct id_pool *tx_qid_pool;
295 struct ovs_mutex tx_qid_pool_mutex;
65f13b50
AW
296
297 /* Protects the access of the 'struct dp_netdev_pmd_thread'
298 * instance for non-pmd thread. */
299 struct ovs_mutex non_pmd_mutex;
300
301 /* Each pmd thread will store its pointer to
302 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
303 ovsthread_key_t per_pmd_key;
f2eee189 304
a6a426d6
IM
305 struct seq *reconfigure_seq;
306 uint64_t last_reconfigure_seq;
307
a14b8947 308 /* Cpu mask for pin of pmd threads. */
f2eee189 309 char *pmd_cmask;
6e3c6fa4 310
a36de779 311 uint64_t last_tnl_conf_seq;
5cf3edb3
DDP
312
313 struct conntrack conntrack;
72865317
BP
314};
315
4b27db64
JR
316static void meter_lock(const struct dp_netdev *dp, uint32_t meter_id)
317 OVS_ACQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS])
318{
319 ovs_mutex_lock(&dp->meter_locks[meter_id % N_METER_LOCKS]);
320}
321
322static void meter_unlock(const struct dp_netdev *dp, uint32_t meter_id)
323 OVS_RELEASES(dp->meter_locks[meter_id % N_METER_LOCKS])
324{
325 ovs_mutex_unlock(&dp->meter_locks[meter_id % N_METER_LOCKS]);
326}
327
328
8a4e3a85 329static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
e9985d6a
DDP
330 odp_port_t)
331 OVS_REQUIRES(dp->port_mutex);
ff073a71 332
51852a57 333enum dp_stat_type {
abcf3ef4
DDP
334 DP_STAT_EXACT_HIT, /* Packets that had an exact match (emc). */
335 DP_STAT_MASKED_HIT, /* Packets that matched in the flow table. */
51852a57
BP
336 DP_STAT_MISS, /* Packets that did not match. */
337 DP_STAT_LOST, /* Packets not passed up to the client. */
3453b4d6
JS
338 DP_STAT_LOOKUP_HIT, /* Number of subtable lookups for flow table
339 hits */
51852a57
BP
340 DP_N_STATS
341};
342
55e3ca97 343enum pmd_cycles_counter_type {
a2ac666d
CL
344 PMD_CYCLES_IDLE, /* Cycles spent idle or unsuccessful polling */
345 PMD_CYCLES_PROCESSING, /* Cycles spent successfully polling and
346 * processing polled packets */
55e3ca97
DDP
347 PMD_N_CYCLES
348};
349
c59e759f
KT
350enum rxq_cycles_counter_type {
351 RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and
352 processing packets during the current
353 interval. */
354 RXQ_CYCLES_PROC_HIST, /* Total cycles of all intervals that are used
355 during rxq to pmd assignment. */
356 RXQ_N_CYCLES
357};
358
324c8374
IM
359#define XPS_TIMEOUT_MS 500LL
360
3eb67853
IM
361/* Contained by struct dp_netdev_port's 'rxqs' member. */
362struct dp_netdev_rxq {
947dc567
DDP
363 struct dp_netdev_port *port;
364 struct netdev_rxq *rx;
365 unsigned core_id; /* Core to which this queue should be
366 pinned. OVS_CORE_UNSPEC if the
367 queue doesn't need to be pinned to a
368 particular core. */
ee42dd70 369 unsigned intrvl_idx; /* Write index for 'cycles_intrvl'. */
47a45d86 370 struct dp_netdev_pmd_thread *pmd; /* pmd thread that polls this queue. */
c59e759f
KT
371
372 /* Counters of cycles spent successfully polling and processing pkts. */
373 atomic_ullong cycles[RXQ_N_CYCLES];
374 /* We store PMD_RXQ_INTERVAL_MAX intervals of data for an rxq and then
375 sum them to yield the cycles used for an rxq. */
376 atomic_ullong cycles_intrvl[PMD_RXQ_INTERVAL_MAX];
3eb67853
IM
377};
378
72865317
BP
379/* A port in a netdev-based datapath. */
380struct dp_netdev_port {
35303d71 381 odp_port_t port_no;
ca62bb16
BB
382 bool dynamic_txqs; /* If true XPS will be used. */
383 bool need_reconfigure; /* True if we should reconfigure netdev. */
72865317 384 struct netdev *netdev;
e9985d6a 385 struct hmap_node node; /* Node in dp_netdev's 'ports'. */
4b609110 386 struct netdev_saved_flags *sf;
3eb67853 387 struct dp_netdev_rxq *rxqs;
85a4f238 388 unsigned n_rxq; /* Number of elements in 'rxqs' */
47a45d86 389 unsigned *txq_used; /* Number of threads that use each tx queue. */
324c8374 390 struct ovs_mutex txq_used_mutex;
0cbfe35d 391 char *type; /* Port type as requested by user. */
3eb67853 392 char *rxq_affinity_list; /* Requested affinity of rx queues. */
72865317
BP
393};
394
1c1e46ed
AW
395/* Contained by struct dp_netdev_flow's 'stats' member. */
396struct dp_netdev_flow_stats {
eb94da30
DDP
397 atomic_llong used; /* Last used time, in monotonic msecs. */
398 atomic_ullong packet_count; /* Number of packets matched. */
399 atomic_ullong byte_count; /* Number of bytes matched. */
400 atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
1c1e46ed
AW
401};
402
403/* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
8a4e3a85
BP
404 *
405 *
406 * Thread-safety
407 * =============
408 *
409 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
1c1e46ed 410 * its pmd thread's classifier. The text below calls this classifier 'cls'.
8a4e3a85
BP
411 *
412 * Motivation
413 * ----------
414 *
415 * The thread safety rules described here for "struct dp_netdev_flow" are
416 * motivated by two goals:
417 *
418 * - Prevent threads that read members of "struct dp_netdev_flow" from
419 * reading bad data due to changes by some thread concurrently modifying
420 * those members.
421 *
422 * - Prevent two threads making changes to members of a given "struct
423 * dp_netdev_flow" from interfering with each other.
424 *
425 *
426 * Rules
427 * -----
428 *
ed79f89a
DDP
429 * A flow 'flow' may be accessed without a risk of being freed during an RCU
430 * grace period. Code that needs to hold onto a flow for a while
431 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
8a4e3a85
BP
432 *
433 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
ed79f89a
DDP
434 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
435 * from modification.
8a4e3a85
BP
436 *
437 * Some members, marked 'const', are immutable. Accessing other members
438 * requires synchronization, as noted in more detail below.
439 */
72865317 440struct dp_netdev_flow {
11e5cf1f 441 const struct flow flow; /* Unmasked flow that created this entry. */
8a4e3a85 442 /* Hash table index by unmasked flow. */
1c1e46ed
AW
443 const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */
444 /* 'flow_table'. */
70e5ed6f 445 const ovs_u128 ufid; /* Unique flow identifier. */
bd5131ba 446 const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */
1c1e46ed 447 /* flow. */
72865317 448
ed79f89a
DDP
449 /* Number of references.
450 * The classifier owns one reference.
451 * Any thread trying to keep a rule from being freed should hold its own
452 * reference. */
453 struct ovs_refcount ref_cnt;
454
11e5cf1f
DDP
455 bool dead;
456
1c1e46ed
AW
457 /* Statistics. */
458 struct dp_netdev_flow_stats stats;
8a4e3a85 459
45c626a3 460 /* Actions. */
61e7deb1 461 OVSRCU_TYPE(struct dp_netdev_actions *) actions;
0de8783a 462
11e5cf1f
DDP
463 /* While processing a group of input packets, the datapath uses the next
464 * member to store a pointer to the output batch for the flow. It is
465 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
f7ce4811
PS
466 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
467 struct packet_batch_per_flow *batch;
11e5cf1f 468
0de8783a
JR
469 /* Packet classification. */
470 struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */
471 /* 'cr' must be the last member. */
72865317
BP
472};
473
ed79f89a 474static void dp_netdev_flow_unref(struct dp_netdev_flow *);
9bbf1c3d 475static bool dp_netdev_flow_ref(struct dp_netdev_flow *);
70e5ed6f 476static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t,
f0fb825a 477 struct flow *, bool);
8a4e3a85 478
a84cb64a
BP
479/* A set of datapath actions within a "struct dp_netdev_flow".
480 *
481 *
482 * Thread-safety
483 * =============
484 *
45c626a3 485 * A struct dp_netdev_actions 'actions' is protected with RCU. */
a84cb64a 486struct dp_netdev_actions {
a84cb64a
BP
487 /* These members are immutable: they do not change during the struct's
488 * lifetime. */
a84cb64a 489 unsigned int size; /* Size of 'actions', in bytes. */
9ff55ae2 490 struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
a84cb64a
BP
491};
492
493struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *,
494 size_t);
61e7deb1
BP
495struct dp_netdev_actions *dp_netdev_flow_get_actions(
496 const struct dp_netdev_flow *);
497static void dp_netdev_actions_free(struct dp_netdev_actions *);
a84cb64a 498
1c1e46ed
AW
499/* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
500struct dp_netdev_pmd_stats {
501 /* Indexed by DP_STAT_*. */
eb94da30 502 atomic_ullong n[DP_N_STATS];
1c1e46ed
AW
503};
504
55e3ca97
DDP
505/* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */
506struct dp_netdev_pmd_cycles {
507 /* Indexed by PMD_CYCLES_*. */
508 atomic_ullong n[PMD_N_CYCLES];
509};
510
947dc567 511struct polled_queue {
922b28d4 512 struct dp_netdev_rxq *rxq;
947dc567
DDP
513 odp_port_t port_no;
514};
515
ae7ad0a1
IM
516/* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
517struct rxq_poll {
947dc567
DDP
518 struct dp_netdev_rxq *rxq;
519 struct hmap_node node;
ae7ad0a1
IM
520};
521
57eebbb4
DDP
522/* Contained by struct dp_netdev_pmd_thread's 'send_port_cache',
523 * 'tnl_port_cache' or 'tx_ports'. */
d0cca6c3 524struct tx_port {
324c8374
IM
525 struct dp_netdev_port *port;
526 int qid;
527 long long last_used;
d0cca6c3
DDP
528 struct hmap_node node;
529};
530
e4cfed38
PS
531/* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
532 * the performance overhead of interrupt processing. Therefore netdev can
533 * not implement rx-wait for these devices. dpif-netdev needs to poll
534 * these device to check for recv buffer. pmd-thread does polling for
1c1e46ed 535 * devices assigned to itself.
e4cfed38
PS
536 *
537 * DPDK used PMD for accessing NIC.
538 *
65f13b50
AW
539 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
540 * I/O of all non-pmd threads. There will be no actual thread created
541 * for the instance.
1c1e46ed 542 *
1859876c
BB
543 * Each struct has its own flow cache and classifier per managed ingress port.
544 * For packets received on ingress port, a look up is done on corresponding PMD
545 * thread's flow cache and in case of a miss, lookup is performed in the
546 * corresponding classifier of port. Packets are executed with the found
547 * actions in either case.
1c1e46ed 548 * */
65f13b50 549struct dp_netdev_pmd_thread {
a807c157
BB
550 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline0,
551 struct dp_netdev *dp;
552 struct cmap_node node; /* In 'dp->poll_threads'. */
553 pthread_cond_t cond; /* For synchronizing pmd thread
554 reload. */
555 );
556
557 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline1,
558 struct ovs_mutex cond_mutex; /* Mutex for condition variable. */
559 pthread_t thread;
560 unsigned core_id; /* CPU core id of this pmd thread. */
561 int numa_id; /* numa node id of this pmd thread. */
562 );
accf8626 563
65f13b50
AW
564 /* Per thread exact-match cache. Note, the instance for cpu core
565 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
d0cca6c3
DDP
566 * need to be protected by 'non_pmd_mutex'. Every other instance
567 * will only be accessed by its own pmd thread. */
a807c157
BB
568 OVS_ALIGNED_VAR(CACHE_LINE_SIZE) struct emc_cache flow_cache;
569 struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
570
571 /* Queue id used by this pmd thread to send packets on all netdevs if
572 * XPS disabled for this netdev. All static_tx_qid's are unique and less
573 * than 'cmap_count(dp->poll_threads)'. */
574 uint32_t static_tx_qid;
1c1e46ed 575
3453b4d6 576 /* Flow-Table and classifiers
1c1e46ed
AW
577 *
578 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
3453b4d6
JS
579 * changes to 'classifiers' must be made while still holding the
580 * 'flow_mutex'.
1c1e46ed
AW
581 */
582 struct ovs_mutex flow_mutex;
a807c157
BB
583 PADDED_MEMBERS(CACHE_LINE_SIZE,
584 struct cmap flow_table OVS_GUARDED; /* Flow table. */
585
586 /* One classifier per in_port polled by the pmd */
587 struct cmap classifiers;
588 /* Periodically sort subtable vectors according to hit frequencies */
589 long long int next_optimization;
590 /* End of the next time interval for which processing cycles
591 are stored for each polled rxq. */
592 long long int rxq_interval;
593
594 /* Cycles counters */
595 struct dp_netdev_pmd_cycles cycles;
596
597 /* Used to count cycles. See 'cycles_counter_end()'. */
598 unsigned long long last_cycles;
599 struct latch exit_latch; /* For terminating the pmd thread. */
600 );
601
602 PADDED_MEMBERS(CACHE_LINE_SIZE,
603 /* Statistics. */
604 struct dp_netdev_pmd_stats stats;
605
606 struct seq *reload_seq;
607 uint64_t last_reload_seq;
608 atomic_bool reload; /* Do we need to reload ports? */
609 bool isolated;
610
611 /* Set to true if the pmd thread needs to be reloaded. */
612 bool need_reload;
613 /* 5 pad bytes. */
614 );
615
616 PADDED_MEMBERS(CACHE_LINE_SIZE,
617 struct ovs_mutex port_mutex; /* Mutex for 'poll_list'
618 and 'tx_ports'. */
619 /* 16 pad bytes. */
620 );
621 PADDED_MEMBERS(CACHE_LINE_SIZE,
622 /* List of rx queues to poll. */
623 struct hmap poll_list OVS_GUARDED;
624 /* Map of 'tx_port's used for transmission. Written by the main
625 * thread, read by the pmd thread. */
626 struct hmap tx_ports OVS_GUARDED;
627 );
628 PADDED_MEMBERS(CACHE_LINE_SIZE,
629 /* These are thread-local copies of 'tx_ports'. One contains only
630 * tunnel ports (that support push_tunnel/pop_tunnel), the other
631 * contains ports with at least one txq (that support send).
632 * A port can be in both.
633 *
634 * There are two separate maps to make sure that we don't try to
635 * execute OUTPUT on a device which has 0 txqs or PUSH/POP on a
636 * non-tunnel device.
637 *
638 * The instances for cpu core NON_PMD_CORE_ID can be accessed by
639 * multiple threads and thusly need to be protected by 'non_pmd_mutex'.
640 * Every other instance will only be accessed by its own pmd thread. */
641 struct hmap tnl_port_cache;
642 struct hmap send_port_cache;
643 );
644
645 PADDED_MEMBERS(CACHE_LINE_SIZE,
646 /* Only a pmd thread can write on its own 'cycles' and 'stats'.
647 * The main thread keeps 'stats_zero' and 'cycles_zero' as base
648 * values and subtracts them from 'stats' and 'cycles' before
649 * reporting to the user */
650 unsigned long long stats_zero[DP_N_STATS];
651 uint64_t cycles_zero[PMD_N_CYCLES];
652 /* 8 pad bytes. */
653 );
6c3eee82
BP
654};
655
72865317
BP
656/* Interface to netdev-based datapath. */
657struct dpif_netdev {
658 struct dpif dpif;
659 struct dp_netdev *dp;
d33ed218 660 uint64_t last_port_seq;
72865317
BP
661};
662
8a4e3a85 663static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
e9985d6a
DDP
664 struct dp_netdev_port **portp)
665 OVS_REQUIRES(dp->port_mutex);
8a4e3a85 666static int get_port_by_name(struct dp_netdev *dp, const char *devname,
e9985d6a
DDP
667 struct dp_netdev_port **portp)
668 OVS_REQUIRES(dp->port_mutex);
8a4e3a85
BP
669static void dp_netdev_free(struct dp_netdev *)
670 OVS_REQUIRES(dp_netdev_mutex);
8a4e3a85
BP
671static int do_add_port(struct dp_netdev *dp, const char *devname,
672 const char *type, odp_port_t port_no)
59e6d833 673 OVS_REQUIRES(dp->port_mutex);
c40b890f 674static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
59e6d833 675 OVS_REQUIRES(dp->port_mutex);
614c4892
BP
676static int dpif_netdev_open(const struct dpif_class *, const char *name,
677 bool create, struct dpif **);
65f13b50 678static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
1895cc8d 679 struct dp_packet_batch *,
66e4ad8a 680 bool may_steal, const struct flow *flow,
4edb9ae9 681 const struct nlattr *actions,
324c8374
IM
682 size_t actions_len,
683 long long now);
65f13b50 684static void dp_netdev_input(struct dp_netdev_pmd_thread *,
1895cc8d 685 struct dp_packet_batch *, odp_port_t port_no);
a90ed026 686static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
1895cc8d 687 struct dp_packet_batch *);
41ccaa24 688
6b31e073 689static void dp_netdev_disable_upcall(struct dp_netdev *);
ae7ad0a1 690static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd);
65f13b50 691static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
00873463
DDP
692 struct dp_netdev *dp, unsigned core_id,
693 int numa_id);
1c1e46ed 694static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
e9985d6a
DDP
695static void dp_netdev_set_nonpmd(struct dp_netdev *dp)
696 OVS_REQUIRES(dp->port_mutex);
697
e32971b8 698static void *pmd_thread_main(void *);
b19befae 699static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
bd5131ba 700 unsigned core_id);
1c1e46ed
AW
701static struct dp_netdev_pmd_thread *
702dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
140dd699
IM
703static void dp_netdev_del_pmd(struct dp_netdev *dp,
704 struct dp_netdev_pmd_thread *pmd);
e32971b8 705static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd);
d0cca6c3 706static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd);
d0cca6c3 707static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
e32971b8
DDP
708 struct dp_netdev_port *port)
709 OVS_REQUIRES(pmd->port_mutex);
710static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
711 struct tx_port *tx)
712 OVS_REQUIRES(pmd->port_mutex);
d0cca6c3 713static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
947dc567
DDP
714 struct dp_netdev_rxq *rxq)
715 OVS_REQUIRES(pmd->port_mutex);
e32971b8
DDP
716static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd,
717 struct rxq_poll *poll)
718 OVS_REQUIRES(pmd->port_mutex);
719static void reconfigure_datapath(struct dp_netdev *dp)
3eb67853 720 OVS_REQUIRES(dp->port_mutex);
1c1e46ed
AW
721static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
722static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
723static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);
d0cca6c3
DDP
724static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
725 OVS_REQUIRES(pmd->port_mutex);
3453b4d6 726static inline void
4809891b
KT
727dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
728 struct polled_queue *poll_list, int poll_cnt);
729static void
730dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
731 enum rxq_cycles_counter_type type,
732 unsigned long long cycles);
733static uint64_t
734dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
735 enum rxq_cycles_counter_type type);
736static void
737dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
738 unsigned long long cycles);
655856ef
KT
739static uint64_t
740dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx);
324c8374
IM
741static void
742dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
743 long long now, bool purge);
744static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
745 struct tx_port *tx, long long now);
746
67ad54cb 747static inline bool emc_entry_alive(struct emc_entry *ce);
9bbf1c3d
DDP
748static void emc_clear_entry(struct emc_entry *ce);
749
cd995c73
KT
750static void dp_netdev_request_reconfigure(struct dp_netdev *dp);
751
9bbf1c3d
DDP
752static void
753emc_cache_init(struct emc_cache *flow_cache)
754{
755 int i;
756
67ad54cb 757 flow_cache->sweep_idx = 0;
9bbf1c3d
DDP
758 for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
759 flow_cache->entries[i].flow = NULL;
0de8783a 760 flow_cache->entries[i].key.hash = 0;
09b0fa9c 761 flow_cache->entries[i].key.len = sizeof(struct miniflow);
5fcff47b 762 flowmap_init(&flow_cache->entries[i].key.mf.map);
9bbf1c3d
DDP
763 }
764}
765
766static void
767emc_cache_uninit(struct emc_cache *flow_cache)
768{
769 int i;
770
771 for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
772 emc_clear_entry(&flow_cache->entries[i]);
773 }
774}
775
67ad54cb
AW
776/* Check and clear dead flow references slowly (one entry at each
777 * invocation). */
778static void
779emc_cache_slow_sweep(struct emc_cache *flow_cache)
780{
781 struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx];
782
783 if (!emc_entry_alive(entry)) {
784 emc_clear_entry(entry);
785 }
786 flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
787}
788
c4ea7529
BP
789/* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
790bool
791dpif_is_netdev(const struct dpif *dpif)
792{
793 return dpif->dpif_class->open == dpif_netdev_open;
794}
795
72865317
BP
796static struct dpif_netdev *
797dpif_netdev_cast(const struct dpif *dpif)
798{
c4ea7529 799 ovs_assert(dpif_is_netdev(dpif));
72865317
BP
800 return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
801}
802
803static struct dp_netdev *
804get_dp_netdev(const struct dpif *dpif)
805{
806 return dpif_netdev_cast(dpif)->dp;
807}
6553d06b
DDP
808\f
809enum pmd_info_type {
ce179f11
IM
810 PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */
811 PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */
812 PMD_INFO_SHOW_RXQ /* Show poll-lists of pmd threads. */
6553d06b
DDP
813};
814
815static void
816pmd_info_show_stats(struct ds *reply,
817 struct dp_netdev_pmd_thread *pmd,
818 unsigned long long stats[DP_N_STATS],
819 uint64_t cycles[PMD_N_CYCLES])
820{
435c2797 821 unsigned long long total_packets;
6553d06b
DDP
822 uint64_t total_cycles = 0;
823 int i;
824
825 /* These loops subtracts reference values ('*_zero') from the counters.
826 * Since loads and stores are relaxed, it might be possible for a '*_zero'
827 * value to be more recent than the current value we're reading from the
828 * counter. This is not a big problem, since these numbers are not
829 * supposed to be too accurate, but we should at least make sure that
830 * the result is not negative. */
831 for (i = 0; i < DP_N_STATS; i++) {
832 if (stats[i] > pmd->stats_zero[i]) {
833 stats[i] -= pmd->stats_zero[i];
834 } else {
835 stats[i] = 0;
836 }
6553d06b
DDP
837 }
838
435c2797
IM
839 /* Sum of all the matched and not matched packets gives the total. */
840 total_packets = stats[DP_STAT_EXACT_HIT] + stats[DP_STAT_MASKED_HIT]
841 + stats[DP_STAT_MISS];
842
6553d06b
DDP
843 for (i = 0; i < PMD_N_CYCLES; i++) {
844 if (cycles[i] > pmd->cycles_zero[i]) {
845 cycles[i] -= pmd->cycles_zero[i];
846 } else {
847 cycles[i] = 0;
848 }
849
850 total_cycles += cycles[i];
851 }
852
853 ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID)
854 ? "main thread" : "pmd thread");
855
856 if (pmd->numa_id != OVS_NUMA_UNSPEC) {
857 ds_put_format(reply, " numa_id %d", pmd->numa_id);
858 }
d5c199ea 859 if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) {
bd5131ba 860 ds_put_format(reply, " core_id %u", pmd->core_id);
6553d06b
DDP
861 }
862 ds_put_cstr(reply, ":\n");
863
864 ds_put_format(reply,
865 "\temc hits:%llu\n\tmegaflow hits:%llu\n"
3453b4d6 866 "\tavg. subtable lookups per hit:%.2f\n"
6553d06b
DDP
867 "\tmiss:%llu\n\tlost:%llu\n",
868 stats[DP_STAT_EXACT_HIT], stats[DP_STAT_MASKED_HIT],
3453b4d6
JS
869 stats[DP_STAT_MASKED_HIT] > 0
870 ? (1.0*stats[DP_STAT_LOOKUP_HIT])/stats[DP_STAT_MASKED_HIT]
871 : 0,
6553d06b
DDP
872 stats[DP_STAT_MISS], stats[DP_STAT_LOST]);
873
874 if (total_cycles == 0) {
875 return;
876 }
877
878 ds_put_format(reply,
a2ac666d 879 "\tidle cycles:%"PRIu64" (%.02f%%)\n"
6553d06b 880 "\tprocessing cycles:%"PRIu64" (%.02f%%)\n",
a2ac666d
CL
881 cycles[PMD_CYCLES_IDLE],
882 cycles[PMD_CYCLES_IDLE] / (double)total_cycles * 100,
6553d06b
DDP
883 cycles[PMD_CYCLES_PROCESSING],
884 cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100);
885
886 if (total_packets == 0) {
887 return;
888 }
889
890 ds_put_format(reply,
891 "\tavg cycles per packet: %.02f (%"PRIu64"/%llu)\n",
892 total_cycles / (double)total_packets,
893 total_cycles, total_packets);
894
895 ds_put_format(reply,
896 "\tavg processing cycles per packet: "
897 "%.02f (%"PRIu64"/%llu)\n",
898 cycles[PMD_CYCLES_PROCESSING] / (double)total_packets,
899 cycles[PMD_CYCLES_PROCESSING], total_packets);
900}
901
902static void
903pmd_info_clear_stats(struct ds *reply OVS_UNUSED,
904 struct dp_netdev_pmd_thread *pmd,
905 unsigned long long stats[DP_N_STATS],
906 uint64_t cycles[PMD_N_CYCLES])
907{
908 int i;
909
910 /* We cannot write 'stats' and 'cycles' (because they're written by other
911 * threads) and we shouldn't change 'stats' (because they're used to count
912 * datapath stats, which must not be cleared here). Instead, we save the
913 * current values and subtract them from the values to be displayed in the
914 * future */
915 for (i = 0; i < DP_N_STATS; i++) {
916 pmd->stats_zero[i] = stats[i];
917 }
918 for (i = 0; i < PMD_N_CYCLES; i++) {
919 pmd->cycles_zero[i] = cycles[i];
920 }
921}
922
947dc567
DDP
923static int
924compare_poll_list(const void *a_, const void *b_)
925{
926 const struct rxq_poll *a = a_;
927 const struct rxq_poll *b = b_;
928
929 const char *namea = netdev_rxq_get_name(a->rxq->rx);
930 const char *nameb = netdev_rxq_get_name(b->rxq->rx);
931
932 int cmp = strcmp(namea, nameb);
933 if (!cmp) {
934 return netdev_rxq_get_queue_id(a->rxq->rx)
935 - netdev_rxq_get_queue_id(b->rxq->rx);
936 } else {
937 return cmp;
938 }
939}
940
941static void
942sorted_poll_list(struct dp_netdev_pmd_thread *pmd, struct rxq_poll **list,
943 size_t *n)
944{
945 struct rxq_poll *ret, *poll;
946 size_t i;
947
948 *n = hmap_count(&pmd->poll_list);
949 if (!*n) {
950 ret = NULL;
951 } else {
952 ret = xcalloc(*n, sizeof *ret);
953 i = 0;
954 HMAP_FOR_EACH (poll, node, &pmd->poll_list) {
955 ret[i] = *poll;
956 i++;
957 }
958 ovs_assert(i == *n);
1cc1b5f6 959 qsort(ret, *n, sizeof *ret, compare_poll_list);
947dc567
DDP
960 }
961
947dc567
DDP
962 *list = ret;
963}
964
ce179f11
IM
965static void
966pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
967{
968 if (pmd->core_id != NON_PMD_CORE_ID) {
ce179f11 969 const char *prev_name = NULL;
947dc567
DDP
970 struct rxq_poll *list;
971 size_t i, n;
ce179f11 972
3eb67853
IM
973 ds_put_format(reply,
974 "pmd thread numa_id %d core_id %u:\n\tisolated : %s\n",
975 pmd->numa_id, pmd->core_id, (pmd->isolated)
976 ? "true" : "false");
ce179f11 977
d0cca6c3 978 ovs_mutex_lock(&pmd->port_mutex);
947dc567
DDP
979 sorted_poll_list(pmd, &list, &n);
980 for (i = 0; i < n; i++) {
981 const char *name = netdev_rxq_get_name(list[i].rxq->rx);
ce179f11
IM
982
983 if (!prev_name || strcmp(name, prev_name)) {
984 if (prev_name) {
985 ds_put_cstr(reply, "\n");
986 }
947dc567 987 ds_put_format(reply, "\tport: %s\tqueue-id:", name);
ce179f11 988 }
947dc567
DDP
989 ds_put_format(reply, " %d",
990 netdev_rxq_get_queue_id(list[i].rxq->rx));
ce179f11
IM
991 prev_name = name;
992 }
d0cca6c3 993 ovs_mutex_unlock(&pmd->port_mutex);
ce179f11 994 ds_put_cstr(reply, "\n");
947dc567 995 free(list);
ce179f11
IM
996 }
997}
998
34d8e04b
EC
999static int
1000compare_poll_thread_list(const void *a_, const void *b_)
1001{
1002 const struct dp_netdev_pmd_thread *a, *b;
1003
1004 a = *(struct dp_netdev_pmd_thread **)a_;
1005 b = *(struct dp_netdev_pmd_thread **)b_;
1006
1007 if (a->core_id < b->core_id) {
1008 return -1;
1009 }
1010 if (a->core_id > b->core_id) {
1011 return 1;
1012 }
1013 return 0;
1014}
1015
1016/* Create a sorted list of pmd's from the dp->poll_threads cmap. We can use
1017 * this list, as long as we do not go to quiescent state. */
1018static void
1019sorted_poll_thread_list(struct dp_netdev *dp,
1020 struct dp_netdev_pmd_thread ***list,
1021 size_t *n)
1022{
1023 struct dp_netdev_pmd_thread *pmd;
1024 struct dp_netdev_pmd_thread **pmd_list;
1025 size_t k = 0, n_pmds;
1026
1027 n_pmds = cmap_count(&dp->poll_threads);
1028 pmd_list = xcalloc(n_pmds, sizeof *pmd_list);
1029
1030 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
1031 if (k >= n_pmds) {
1032 break;
1033 }
1034 pmd_list[k++] = pmd;
1035 }
1036
1037 qsort(pmd_list, k, sizeof *pmd_list, compare_poll_thread_list);
1038
1039 *list = pmd_list;
1040 *n = k;
1041}
1042
cd995c73
KT
1043static void
1044dpif_netdev_pmd_rebalance(struct unixctl_conn *conn, int argc,
1045 const char *argv[], void *aux OVS_UNUSED)
1046{
1047 struct ds reply = DS_EMPTY_INITIALIZER;
1048 struct dp_netdev *dp = NULL;
1049
1050 ovs_mutex_lock(&dp_netdev_mutex);
1051
1052 if (argc == 2) {
1053 dp = shash_find_data(&dp_netdevs, argv[1]);
1054 } else if (shash_count(&dp_netdevs) == 1) {
1055 /* There's only one datapath */
1056 dp = shash_first(&dp_netdevs)->data;
1057 }
1058
1059 if (!dp) {
1060 ovs_mutex_unlock(&dp_netdev_mutex);
1061 unixctl_command_reply_error(conn,
1062 "please specify an existing datapath");
1063 return;
1064 }
1065
1066 dp_netdev_request_reconfigure(dp);
1067 ovs_mutex_unlock(&dp_netdev_mutex);
1068 ds_put_cstr(&reply, "pmd rxq rebalance requested.\n");
1069 unixctl_command_reply(conn, ds_cstr(&reply));
1070 ds_destroy(&reply);
1071}
1072
6553d06b
DDP
1073static void
1074dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[],
1075 void *aux)
1076{
1077 struct ds reply = DS_EMPTY_INITIALIZER;
34d8e04b 1078 struct dp_netdev_pmd_thread **pmd_list;
6553d06b 1079 struct dp_netdev *dp = NULL;
34d8e04b 1080 size_t n;
6553d06b
DDP
1081 enum pmd_info_type type = *(enum pmd_info_type *) aux;
1082
1083 ovs_mutex_lock(&dp_netdev_mutex);
1084
1085 if (argc == 2) {
1086 dp = shash_find_data(&dp_netdevs, argv[1]);
1087 } else if (shash_count(&dp_netdevs) == 1) {
1088 /* There's only one datapath */
1089 dp = shash_first(&dp_netdevs)->data;
1090 }
1091
1092 if (!dp) {
1093 ovs_mutex_unlock(&dp_netdev_mutex);
1094 unixctl_command_reply_error(conn,
1095 "please specify an existing datapath");
1096 return;
1097 }
1098
34d8e04b
EC
1099 sorted_poll_thread_list(dp, &pmd_list, &n);
1100 for (size_t i = 0; i < n; i++) {
1101 struct dp_netdev_pmd_thread *pmd = pmd_list[i];
1102 if (!pmd) {
1103 break;
1104 }
1105
ce179f11
IM
1106 if (type == PMD_INFO_SHOW_RXQ) {
1107 pmd_info_show_rxq(&reply, pmd);
1108 } else {
1109 unsigned long long stats[DP_N_STATS];
1110 uint64_t cycles[PMD_N_CYCLES];
6553d06b 1111
ce179f11 1112 /* Read current stats and cycle counters */
71f21279
BP
1113 for (size_t j = 0; j < ARRAY_SIZE(stats); j++) {
1114 atomic_read_relaxed(&pmd->stats.n[j], &stats[j]);
ce179f11 1115 }
71f21279
BP
1116 for (size_t j = 0; j < ARRAY_SIZE(cycles); j++) {
1117 atomic_read_relaxed(&pmd->cycles.n[j], &cycles[j]);
ce179f11 1118 }
6553d06b 1119
ce179f11
IM
1120 if (type == PMD_INFO_CLEAR_STATS) {
1121 pmd_info_clear_stats(&reply, pmd, stats, cycles);
1122 } else if (type == PMD_INFO_SHOW_STATS) {
1123 pmd_info_show_stats(&reply, pmd, stats, cycles);
1124 }
6553d06b
DDP
1125 }
1126 }
34d8e04b 1127 free(pmd_list);
6553d06b
DDP
1128
1129 ovs_mutex_unlock(&dp_netdev_mutex);
1130
1131 unixctl_command_reply(conn, ds_cstr(&reply));
1132 ds_destroy(&reply);
1133}
1134\f
1135static int
1136dpif_netdev_init(void)
1137{
1138 static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS,
ce179f11
IM
1139 clear_aux = PMD_INFO_CLEAR_STATS,
1140 poll_aux = PMD_INFO_SHOW_RXQ;
6553d06b
DDP
1141
1142 unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
1143 0, 1, dpif_netdev_pmd_info,
1144 (void *)&show_aux);
1145 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
1146 0, 1, dpif_netdev_pmd_info,
1147 (void *)&clear_aux);
ce179f11
IM
1148 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]",
1149 0, 1, dpif_netdev_pmd_info,
1150 (void *)&poll_aux);
cd995c73
KT
1151 unixctl_command_register("dpif-netdev/pmd-rxq-rebalance", "[dp]",
1152 0, 1, dpif_netdev_pmd_rebalance,
1153 NULL);
6553d06b
DDP
1154 return 0;
1155}
72865317 1156
2197d7ab 1157static int
2240af25
DDP
1158dpif_netdev_enumerate(struct sset *all_dps,
1159 const struct dpif_class *dpif_class)
2197d7ab
GL
1160{
1161 struct shash_node *node;
1162
97be1538 1163 ovs_mutex_lock(&dp_netdev_mutex);
2197d7ab 1164 SHASH_FOR_EACH(node, &dp_netdevs) {
2240af25
DDP
1165 struct dp_netdev *dp = node->data;
1166 if (dpif_class != dp->class) {
1167 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
1168 * If the class doesn't match, skip this dpif. */
1169 continue;
1170 }
2197d7ab
GL
1171 sset_add(all_dps, node->name);
1172 }
97be1538 1173 ovs_mutex_unlock(&dp_netdev_mutex);
5279f8fd 1174
2197d7ab
GL
1175 return 0;
1176}
1177
add90f6f
EJ
1178static bool
1179dpif_netdev_class_is_dummy(const struct dpif_class *class)
1180{
1181 return class != &dpif_netdev_class;
1182}
1183
0aeaabc8
JP
1184static const char *
1185dpif_netdev_port_open_type(const struct dpif_class *class, const char *type)
1186{
1187 return strcmp(type, "internal") ? type
e98d0cb3 1188 : dpif_netdev_class_is_dummy(class) ? "dummy-internal"
0aeaabc8
JP
1189 : "tap";
1190}
1191
72865317
BP
1192static struct dpif *
1193create_dpif_netdev(struct dp_netdev *dp)
1194{
462278db 1195 uint16_t netflow_id = hash_string(dp->name, 0);
72865317 1196 struct dpif_netdev *dpif;
72865317 1197
6a8267c5 1198 ovs_refcount_ref(&dp->ref_cnt);
72865317 1199
72865317 1200 dpif = xmalloc(sizeof *dpif);
614c4892 1201 dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id);
72865317 1202 dpif->dp = dp;
d33ed218 1203 dpif->last_port_seq = seq_read(dp->port_seq);
72865317
BP
1204
1205 return &dpif->dpif;
1206}
1207
4e022ec0
AW
1208/* Choose an unused, non-zero port number and return it on success.
1209 * Return ODPP_NONE on failure. */
1210static odp_port_t
e44768b7 1211choose_port(struct dp_netdev *dp, const char *name)
59e6d833 1212 OVS_REQUIRES(dp->port_mutex)
e44768b7 1213{
4e022ec0 1214 uint32_t port_no;
e44768b7
JP
1215
1216 if (dp->class != &dpif_netdev_class) {
1217 const char *p;
1218 int start_no = 0;
1219
1220 /* If the port name begins with "br", start the number search at
1221 * 100 to make writing tests easier. */
1222 if (!strncmp(name, "br", 2)) {
1223 start_no = 100;
1224 }
1225
1226 /* If the port name contains a number, try to assign that port number.
1227 * This can make writing unit tests easier because port numbers are
1228 * predictable. */
1229 for (p = name; *p != '\0'; p++) {
1230 if (isdigit((unsigned char) *p)) {
1231 port_no = start_no + strtol(p, NULL, 10);
ff073a71
BP
1232 if (port_no > 0 && port_no != odp_to_u32(ODPP_NONE)
1233 && !dp_netdev_lookup_port(dp, u32_to_odp(port_no))) {
4e022ec0 1234 return u32_to_odp(port_no);
e44768b7
JP
1235 }
1236 break;
1237 }
1238 }
1239 }
1240
ff073a71
BP
1241 for (port_no = 1; port_no <= UINT16_MAX; port_no++) {
1242 if (!dp_netdev_lookup_port(dp, u32_to_odp(port_no))) {
4e022ec0 1243 return u32_to_odp(port_no);
e44768b7
JP
1244 }
1245 }
1246
4e022ec0 1247 return ODPP_NONE;
e44768b7
JP
1248}
1249
72865317 1250static int
614c4892
BP
1251create_dp_netdev(const char *name, const struct dpif_class *class,
1252 struct dp_netdev **dpp)
8a4e3a85 1253 OVS_REQUIRES(dp_netdev_mutex)
72865317
BP
1254{
1255 struct dp_netdev *dp;
1256 int error;
72865317 1257
462278db 1258 dp = xzalloc(sizeof *dp);
8a4e3a85
BP
1259 shash_add(&dp_netdevs, name, dp);
1260
1261 *CONST_CAST(const struct dpif_class **, &dp->class) = class;
1262 *CONST_CAST(const char **, &dp->name) = xstrdup(name);
6a8267c5 1263 ovs_refcount_init(&dp->ref_cnt);
1a65ba85 1264 atomic_flag_clear(&dp->destroyed);
8a4e3a85 1265
59e6d833 1266 ovs_mutex_init(&dp->port_mutex);
e9985d6a 1267 hmap_init(&dp->ports);
d33ed218 1268 dp->port_seq = seq_create();
6b31e073
RW
1269 fat_rwlock_init(&dp->upcall_rwlock);
1270
a6a426d6
IM
1271 dp->reconfigure_seq = seq_create();
1272 dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq);
1273
4b27db64
JR
1274 for (int i = 0; i < N_METER_LOCKS; ++i) {
1275 ovs_mutex_init_adaptive(&dp->meter_locks[i]);
1276 }
1277
6b31e073
RW
1278 /* Disable upcalls by default. */
1279 dp_netdev_disable_upcall(dp);
623540e4 1280 dp->upcall_aux = NULL;
6b31e073 1281 dp->upcall_cb = NULL;
e44768b7 1282
5cf3edb3
DDP
1283 conntrack_init(&dp->conntrack);
1284
4c30b246
CL
1285 atomic_init(&dp->emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN);
1286
65f13b50 1287 cmap_init(&dp->poll_threads);
140dd699
IM
1288
1289 ovs_mutex_init(&dp->tx_qid_pool_mutex);
1290 /* We need 1 Tx queue for each possible core + 1 for non-PMD threads. */
1291 dp->tx_qid_pool = id_pool_create(0, ovs_numa_get_n_cores() + 1);
1292
65f13b50
AW
1293 ovs_mutex_init_recursive(&dp->non_pmd_mutex);
1294 ovsthread_key_create(&dp->per_pmd_key, NULL);
1295
e9985d6a 1296 ovs_mutex_lock(&dp->port_mutex);
140dd699
IM
1297 /* non-PMD will be created before all other threads and will
1298 * allocate static_tx_qid = 0. */
f2eee189 1299 dp_netdev_set_nonpmd(dp);
65f13b50 1300
a3e8437a
TLSC
1301 error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
1302 "internal"),
1303 ODPP_LOCAL);
59e6d833 1304 ovs_mutex_unlock(&dp->port_mutex);
72865317
BP
1305 if (error) {
1306 dp_netdev_free(dp);
462278db 1307 return error;
72865317
BP
1308 }
1309
a36de779 1310 dp->last_tnl_conf_seq = seq_read(tnl_conf_seq);
462278db 1311 *dpp = dp;
72865317
BP
1312 return 0;
1313}
1314
a6a426d6
IM
1315static void
1316dp_netdev_request_reconfigure(struct dp_netdev *dp)
1317{
1318 seq_change(dp->reconfigure_seq);
1319}
1320
1321static bool
1322dp_netdev_is_reconf_required(struct dp_netdev *dp)
1323{
1324 return seq_read(dp->reconfigure_seq) != dp->last_reconfigure_seq;
1325}
1326
72865317 1327static int
614c4892 1328dpif_netdev_open(const struct dpif_class *class, const char *name,
4a387741 1329 bool create, struct dpif **dpifp)
72865317 1330{
462278db 1331 struct dp_netdev *dp;
5279f8fd 1332 int error;
462278db 1333
97be1538 1334 ovs_mutex_lock(&dp_netdev_mutex);
462278db
BP
1335 dp = shash_find_data(&dp_netdevs, name);
1336 if (!dp) {
5279f8fd 1337 error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
72865317 1338 } else {
5279f8fd
BP
1339 error = (dp->class != class ? EINVAL
1340 : create ? EEXIST
1341 : 0);
1342 }
1343 if (!error) {
1344 *dpifp = create_dpif_netdev(dp);
6b31e073 1345 dp->dpif = *dpifp;
72865317 1346 }
97be1538 1347 ovs_mutex_unlock(&dp_netdev_mutex);
462278db 1348
5279f8fd 1349 return error;
72865317
BP
1350}
1351
88ace79b
DDP
1352static void
1353dp_netdev_destroy_upcall_lock(struct dp_netdev *dp)
1354 OVS_NO_THREAD_SAFETY_ANALYSIS
1355{
1356 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
1357 ovs_assert(fat_rwlock_tryrdlock(&dp->upcall_rwlock));
1358
1359 /* Before freeing a lock we should release it */
1360 fat_rwlock_unlock(&dp->upcall_rwlock);
1361 fat_rwlock_destroy(&dp->upcall_rwlock);
1362}
1363
4b27db64
JR
1364static void
1365dp_delete_meter(struct dp_netdev *dp, uint32_t meter_id)
1366 OVS_REQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS])
1367{
1368 if (dp->meters[meter_id]) {
1369 free(dp->meters[meter_id]);
1370 dp->meters[meter_id] = NULL;
1371 }
1372}
1373
8a4e3a85
BP
1374/* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
1375 * through the 'dp_netdevs' shash while freeing 'dp'. */
1ba530f4
BP
1376static void
1377dp_netdev_free(struct dp_netdev *dp)
8a4e3a85 1378 OVS_REQUIRES(dp_netdev_mutex)
1ba530f4 1379{
e9985d6a 1380 struct dp_netdev_port *port, *next;
4ad28026 1381
8a4e3a85
BP
1382 shash_find_and_delete(&dp_netdevs, dp->name);
1383
59e6d833 1384 ovs_mutex_lock(&dp->port_mutex);
e9985d6a 1385 HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
c40b890f 1386 do_del_port(dp, port);
1ba530f4 1387 }
59e6d833 1388 ovs_mutex_unlock(&dp->port_mutex);
4b27db64 1389
e32971b8 1390 dp_netdev_destroy_all_pmds(dp, true);
d916785c 1391 cmap_destroy(&dp->poll_threads);
51852a57 1392
140dd699
IM
1393 ovs_mutex_destroy(&dp->tx_qid_pool_mutex);
1394 id_pool_destroy(dp->tx_qid_pool);
1395
b9584f21
DDP
1396 ovs_mutex_destroy(&dp->non_pmd_mutex);
1397 ovsthread_key_delete(dp->per_pmd_key);
1398
1399 conntrack_destroy(&dp->conntrack);
1400
1401
a6a426d6
IM
1402 seq_destroy(dp->reconfigure_seq);
1403
d33ed218 1404 seq_destroy(dp->port_seq);
e9985d6a 1405 hmap_destroy(&dp->ports);
3186ea46 1406 ovs_mutex_destroy(&dp->port_mutex);
88ace79b
DDP
1407
1408 /* Upcalls must be disabled at this point */
1409 dp_netdev_destroy_upcall_lock(dp);
9bbf1c3d 1410
4b27db64
JR
1411 int i;
1412
1413 for (i = 0; i < MAX_METERS; ++i) {
1414 meter_lock(dp, i);
1415 dp_delete_meter(dp, i);
1416 meter_unlock(dp, i);
1417 }
1418 for (i = 0; i < N_METER_LOCKS; ++i) {
1419 ovs_mutex_destroy(&dp->meter_locks[i]);
1420 }
1421
f2eee189 1422 free(dp->pmd_cmask);
8a4e3a85 1423 free(CONST_CAST(char *, dp->name));
72865317
BP
1424 free(dp);
1425}
1426
8a4e3a85
BP
1427static void
1428dp_netdev_unref(struct dp_netdev *dp)
1429{
1430 if (dp) {
1431 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1432 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1433 ovs_mutex_lock(&dp_netdev_mutex);
24f83812 1434 if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
8a4e3a85
BP
1435 dp_netdev_free(dp);
1436 }
1437 ovs_mutex_unlock(&dp_netdev_mutex);
1438 }
1439}
1440
72865317
BP
1441static void
1442dpif_netdev_close(struct dpif *dpif)
1443{
1444 struct dp_netdev *dp = get_dp_netdev(dpif);
5279f8fd 1445
8a4e3a85 1446 dp_netdev_unref(dp);
72865317
BP
1447 free(dpif);
1448}
1449
1450static int
7dab847a 1451dpif_netdev_destroy(struct dpif *dpif)
72865317
BP
1452{
1453 struct dp_netdev *dp = get_dp_netdev(dpif);
5279f8fd 1454
6a8267c5 1455 if (!atomic_flag_test_and_set(&dp->destroyed)) {
24f83812 1456 if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
6a8267c5
BP
1457 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1458 OVS_NOT_REACHED();
1459 }
1460 }
5279f8fd 1461
72865317
BP
1462 return 0;
1463}
1464
eb94da30
DDP
1465/* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1466 * load/store semantics. While the increment is not atomic, the load and
1467 * store operations are, making it impossible to read inconsistent values.
1468 *
1469 * This is used to update thread local stats counters. */
1470static void
1471non_atomic_ullong_add(atomic_ullong *var, unsigned long long n)
1472{
1473 unsigned long long tmp;
1474
1475 atomic_read_relaxed(var, &tmp);
1476 tmp += n;
1477 atomic_store_relaxed(var, tmp);
1478}
1479
72865317 1480static int
a8d9304d 1481dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
72865317
BP
1482{
1483 struct dp_netdev *dp = get_dp_netdev(dpif);
1c1e46ed 1484 struct dp_netdev_pmd_thread *pmd;
8a4e3a85 1485
1c1e46ed
AW
1486 stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0;
1487 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
eb94da30 1488 unsigned long long n;
1c1e46ed 1489 stats->n_flows += cmap_count(&pmd->flow_table);
eb94da30 1490
abcf3ef4
DDP
1491 atomic_read_relaxed(&pmd->stats.n[DP_STAT_MASKED_HIT], &n);
1492 stats->n_hit += n;
1493 atomic_read_relaxed(&pmd->stats.n[DP_STAT_EXACT_HIT], &n);
eb94da30
DDP
1494 stats->n_hit += n;
1495 atomic_read_relaxed(&pmd->stats.n[DP_STAT_MISS], &n);
1496 stats->n_missed += n;
1497 atomic_read_relaxed(&pmd->stats.n[DP_STAT_LOST], &n);
1498 stats->n_lost += n;
51852a57 1499 }
1ce3fa06 1500 stats->n_masks = UINT32_MAX;
847108dc 1501 stats->n_mask_hit = UINT64_MAX;
5279f8fd 1502
72865317
BP
1503 return 0;
1504}
1505
e4cfed38 1506static void
65f13b50 1507dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
e4cfed38 1508{
accf8626 1509 if (pmd->core_id == NON_PMD_CORE_ID) {
d0cca6c3
DDP
1510 ovs_mutex_lock(&pmd->dp->non_pmd_mutex);
1511 ovs_mutex_lock(&pmd->port_mutex);
1512 pmd_load_cached_ports(pmd);
1513 ovs_mutex_unlock(&pmd->port_mutex);
1514 ovs_mutex_unlock(&pmd->dp->non_pmd_mutex);
accf8626
AW
1515 return;
1516 }
1517
1518 ovs_mutex_lock(&pmd->cond_mutex);
2788a1b1 1519 seq_change(pmd->reload_seq);
14e3e12a 1520 atomic_store_relaxed(&pmd->reload, true);
accf8626
AW
1521 ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
1522 ovs_mutex_unlock(&pmd->cond_mutex);
65f13b50 1523}
e4cfed38 1524
59e6d833
BP
1525static uint32_t
1526hash_port_no(odp_port_t port_no)
1527{
1528 return hash_int(odp_to_u32(port_no), 0);
1529}
1530
72865317 1531static int
a3e8437a 1532port_create(const char *devname, const char *type,
b8d29252 1533 odp_port_t port_no, struct dp_netdev_port **portp)
72865317 1534{
4b609110 1535 struct netdev_saved_flags *sf;
72865317 1536 struct dp_netdev_port *port;
2499a8ce 1537 enum netdev_flags flags;
b8d29252 1538 struct netdev *netdev;
e32971b8 1539 int error;
72865317 1540
b8d29252 1541 *portp = NULL;
72865317
BP
1542
1543 /* Open and validate network device. */
a3e8437a 1544 error = netdev_open(devname, type, &netdev);
72865317 1545 if (error) {
b8d29252 1546 return error;
72865317 1547 }
72865317
BP
1548 /* XXX reject non-Ethernet devices */
1549
2499a8ce
AC
1550 netdev_get_flags(netdev, &flags);
1551 if (flags & NETDEV_LOOPBACK) {
1552 VLOG_ERR("%s: cannot add a loopback device", devname);
d17f4f08 1553 error = EINVAL;
b8d29252 1554 goto out;
2499a8ce
AC
1555 }
1556
e32971b8
DDP
1557 error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf);
1558 if (error) {
1559 VLOG_ERR("%s: cannot set promisc flag", devname);
1560 goto out;
324c8374
IM
1561 }
1562
e4cfed38 1563 port = xzalloc(sizeof *port);
35303d71 1564 port->port_no = port_no;
e4cfed38
PS
1565 port->netdev = netdev;
1566 port->type = xstrdup(type);
4b609110 1567 port->sf = sf;
e32971b8
DDP
1568 port->need_reconfigure = true;
1569 ovs_mutex_init(&port->txq_used_mutex);
e4cfed38 1570
b8d29252 1571 *portp = port;
72865317
BP
1572
1573 return 0;
d17f4f08 1574
d17f4f08 1575out:
b8d29252 1576 netdev_close(netdev);
d17f4f08 1577 return error;
72865317
BP
1578}
1579
b8d29252
DDP
1580static int
1581do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
1582 odp_port_t port_no)
1583 OVS_REQUIRES(dp->port_mutex)
1584{
1585 struct dp_netdev_port *port;
1586 int error;
1587
1588 /* Reject devices already in 'dp'. */
1589 if (!get_port_by_name(dp, devname, &port)) {
1590 return EEXIST;
1591 }
1592
a3e8437a 1593 error = port_create(devname, type, port_no, &port);
b8d29252
DDP
1594 if (error) {
1595 return error;
1596 }
1597
e9985d6a 1598 hmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
b8d29252
DDP
1599 seq_change(dp->port_seq);
1600
e32971b8
DDP
1601 reconfigure_datapath(dp);
1602
b8d29252
DDP
1603 return 0;
1604}
1605
247527db
BP
1606static int
1607dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
4e022ec0 1608 odp_port_t *port_nop)
247527db
BP
1609{
1610 struct dp_netdev *dp = get_dp_netdev(dpif);
3aa30359
BP
1611 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1612 const char *dpif_port;
4e022ec0 1613 odp_port_t port_no;
5279f8fd 1614 int error;
247527db 1615
59e6d833 1616 ovs_mutex_lock(&dp->port_mutex);
3aa30359 1617 dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
4e022ec0 1618 if (*port_nop != ODPP_NONE) {
ff073a71
BP
1619 port_no = *port_nop;
1620 error = dp_netdev_lookup_port(dp, *port_nop) ? EBUSY : 0;
232dfa4a 1621 } else {
3aa30359 1622 port_no = choose_port(dp, dpif_port);
5279f8fd 1623 error = port_no == ODPP_NONE ? EFBIG : 0;
232dfa4a 1624 }
5279f8fd 1625 if (!error) {
247527db 1626 *port_nop = port_no;
5279f8fd 1627 error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
247527db 1628 }
59e6d833 1629 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd
BP
1630
1631 return error;
72865317
BP
1632}
1633
1634static int
4e022ec0 1635dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
72865317
BP
1636{
1637 struct dp_netdev *dp = get_dp_netdev(dpif);
5279f8fd
BP
1638 int error;
1639
59e6d833 1640 ovs_mutex_lock(&dp->port_mutex);
c40b890f
BP
1641 if (port_no == ODPP_LOCAL) {
1642 error = EINVAL;
1643 } else {
1644 struct dp_netdev_port *port;
1645
1646 error = get_port_by_number(dp, port_no, &port);
1647 if (!error) {
1648 do_del_port(dp, port);
1649 }
1650 }
59e6d833 1651 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd
BP
1652
1653 return error;
72865317
BP
1654}
1655
1656static bool
4e022ec0 1657is_valid_port_number(odp_port_t port_no)
72865317 1658{
ff073a71
BP
1659 return port_no != ODPP_NONE;
1660}
1661
1662static struct dp_netdev_port *
1663dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
e9985d6a 1664 OVS_REQUIRES(dp->port_mutex)
ff073a71
BP
1665{
1666 struct dp_netdev_port *port;
1667
e9985d6a 1668 HMAP_FOR_EACH_WITH_HASH (port, node, hash_port_no(port_no), &dp->ports) {
35303d71 1669 if (port->port_no == port_no) {
ff073a71
BP
1670 return port;
1671 }
1672 }
1673 return NULL;
72865317
BP
1674}
1675
1676static int
1677get_port_by_number(struct dp_netdev *dp,
4e022ec0 1678 odp_port_t port_no, struct dp_netdev_port **portp)
e9985d6a 1679 OVS_REQUIRES(dp->port_mutex)
72865317
BP
1680{
1681 if (!is_valid_port_number(port_no)) {
1682 *portp = NULL;
1683 return EINVAL;
1684 } else {
ff073a71 1685 *portp = dp_netdev_lookup_port(dp, port_no);
0f6a066f 1686 return *portp ? 0 : ENODEV;
72865317
BP
1687 }
1688}
1689
b284085e 1690static void
62453dad 1691port_destroy(struct dp_netdev_port *port)
b284085e 1692{
62453dad
DDP
1693 if (!port) {
1694 return;
b284085e 1695 }
b284085e 1696
62453dad
DDP
1697 netdev_close(port->netdev);
1698 netdev_restore_flags(port->sf);
accf8626 1699
62453dad 1700 for (unsigned i = 0; i < port->n_rxq; i++) {
947dc567 1701 netdev_rxq_close(port->rxqs[i].rx);
b284085e 1702 }
324c8374 1703 ovs_mutex_destroy(&port->txq_used_mutex);
3eb67853 1704 free(port->rxq_affinity_list);
324c8374 1705 free(port->txq_used);
3eb67853 1706 free(port->rxqs);
62453dad
DDP
1707 free(port->type);
1708 free(port);
b284085e
PS
1709}
1710
72865317
BP
1711static int
1712get_port_by_name(struct dp_netdev *dp,
1713 const char *devname, struct dp_netdev_port **portp)
59e6d833 1714 OVS_REQUIRES(dp->port_mutex)
72865317
BP
1715{
1716 struct dp_netdev_port *port;
1717
e9985d6a 1718 HMAP_FOR_EACH (port, node, &dp->ports) {
3efb6063 1719 if (!strcmp(netdev_get_name(port->netdev), devname)) {
72865317
BP
1720 *portp = port;
1721 return 0;
1722 }
1723 }
0f6a066f
DDP
1724
1725 /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non
1726 * existing port. */
1727 return ENODEV;
72865317
BP
1728}
1729
b9584f21 1730/* Returns 'true' if there is a port with pmd netdev. */
65f13b50 1731static bool
b9584f21 1732has_pmd_port(struct dp_netdev *dp)
e9985d6a 1733 OVS_REQUIRES(dp->port_mutex)
65f13b50
AW
1734{
1735 struct dp_netdev_port *port;
1736
e9985d6a 1737 HMAP_FOR_EACH (port, node, &dp->ports) {
5dd57e80 1738 if (netdev_is_pmd(port->netdev)) {
b9584f21 1739 return true;
65f13b50
AW
1740 }
1741 }
1742
1743 return false;
1744}
1745
c40b890f
BP
1746static void
1747do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
59e6d833 1748 OVS_REQUIRES(dp->port_mutex)
72865317 1749{
e9985d6a 1750 hmap_remove(&dp->ports, &port->node);
d33ed218 1751 seq_change(dp->port_seq);
d0cca6c3 1752
e32971b8 1753 reconfigure_datapath(dp);
72865317 1754
62453dad 1755 port_destroy(port);
72865317
BP
1756}
1757
1758static void
4c738a8d
BP
1759answer_port_query(const struct dp_netdev_port *port,
1760 struct dpif_port *dpif_port)
72865317 1761{
3efb6063 1762 dpif_port->name = xstrdup(netdev_get_name(port->netdev));
0cbfe35d 1763 dpif_port->type = xstrdup(port->type);
35303d71 1764 dpif_port->port_no = port->port_no;
72865317
BP
1765}
1766
1767static int
4e022ec0 1768dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
4c738a8d 1769 struct dpif_port *dpif_port)
72865317
BP
1770{
1771 struct dp_netdev *dp = get_dp_netdev(dpif);
1772 struct dp_netdev_port *port;
1773 int error;
1774
e9985d6a 1775 ovs_mutex_lock(&dp->port_mutex);
72865317 1776 error = get_port_by_number(dp, port_no, &port);
4afba28d 1777 if (!error && dpif_port) {
4c738a8d 1778 answer_port_query(port, dpif_port);
72865317 1779 }
e9985d6a 1780 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd 1781
72865317
BP
1782 return error;
1783}
1784
1785static int
1786dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
4c738a8d 1787 struct dpif_port *dpif_port)
72865317
BP
1788{
1789 struct dp_netdev *dp = get_dp_netdev(dpif);
1790 struct dp_netdev_port *port;
1791 int error;
1792
59e6d833 1793 ovs_mutex_lock(&dp->port_mutex);
72865317 1794 error = get_port_by_name(dp, devname, &port);
4afba28d 1795 if (!error && dpif_port) {
4c738a8d 1796 answer_port_query(port, dpif_port);
72865317 1797 }
59e6d833 1798 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd 1799
72865317
BP
1800 return error;
1801}
1802
61e7deb1
BP
1803static void
1804dp_netdev_flow_free(struct dp_netdev_flow *flow)
1805{
61e7deb1 1806 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow));
61e7deb1
BP
1807 free(flow);
1808}
1809
ed79f89a
DDP
1810static void dp_netdev_flow_unref(struct dp_netdev_flow *flow)
1811{
1812 if (ovs_refcount_unref_relaxed(&flow->ref_cnt) == 1) {
1813 ovsrcu_postpone(dp_netdev_flow_free, flow);
1814 }
1815}
1816
70e5ed6f
JS
1817static uint32_t
1818dp_netdev_flow_hash(const ovs_u128 *ufid)
1819{
1820 return ufid->u32[0];
1821}
1822
3453b4d6
JS
1823static inline struct dpcls *
1824dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread *pmd,
1825 odp_port_t in_port)
1826{
1827 struct dpcls *cls;
1828 uint32_t hash = hash_port_no(in_port);
1829 CMAP_FOR_EACH_WITH_HASH (cls, node, hash, &pmd->classifiers) {
1830 if (cls->in_port == in_port) {
1831 /* Port classifier exists already */
1832 return cls;
1833 }
1834 }
1835 return NULL;
1836}
1837
1838static inline struct dpcls *
1839dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread *pmd,
1840 odp_port_t in_port)
1841 OVS_REQUIRES(pmd->flow_mutex)
1842{
1843 struct dpcls *cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
1844 uint32_t hash = hash_port_no(in_port);
1845
1846 if (!cls) {
1847 /* Create new classifier for in_port */
1848 cls = xmalloc(sizeof(*cls));
1849 dpcls_init(cls);
1850 cls->in_port = in_port;
1851 cmap_insert(&pmd->classifiers, &cls->node, hash);
1852 VLOG_DBG("Creating dpcls %p for in_port %d", cls, in_port);
1853 }
1854 return cls;
1855}
1856
72865317 1857static void
1c1e46ed
AW
1858dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd,
1859 struct dp_netdev_flow *flow)
1860 OVS_REQUIRES(pmd->flow_mutex)
72865317 1861{
9f361d6b 1862 struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node);
3453b4d6
JS
1863 struct dpcls *cls;
1864 odp_port_t in_port = flow->flow.in_port.odp_port;
2c0ea78f 1865
3453b4d6
JS
1866 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
1867 ovs_assert(cls != NULL);
1868 dpcls_remove(cls, &flow->cr);
1c1e46ed 1869 cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid));
9bbf1c3d 1870 flow->dead = true;
ed79f89a
DDP
1871
1872 dp_netdev_flow_unref(flow);
72865317
BP
1873}
1874
1875static void
1c1e46ed 1876dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd)
72865317 1877{
78c8df12 1878 struct dp_netdev_flow *netdev_flow;
72865317 1879
1c1e46ed
AW
1880 ovs_mutex_lock(&pmd->flow_mutex);
1881 CMAP_FOR_EACH (netdev_flow, node, &pmd->flow_table) {
1882 dp_netdev_pmd_remove_flow(pmd, netdev_flow);
72865317 1883 }
1c1e46ed 1884 ovs_mutex_unlock(&pmd->flow_mutex);
72865317
BP
1885}
1886
1887static int
1888dpif_netdev_flow_flush(struct dpif *dpif)
1889{
1890 struct dp_netdev *dp = get_dp_netdev(dpif);
1c1e46ed
AW
1891 struct dp_netdev_pmd_thread *pmd;
1892
1893 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
1894 dp_netdev_pmd_flow_flush(pmd);
1895 }
5279f8fd 1896
72865317
BP
1897 return 0;
1898}
1899
b0ec0f27 1900struct dp_netdev_port_state {
e9985d6a 1901 struct hmap_position position;
4c738a8d 1902 char *name;
b0ec0f27
BP
1903};
1904
1905static int
1906dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
1907{
1908 *statep = xzalloc(sizeof(struct dp_netdev_port_state));
1909 return 0;
1910}
1911
72865317 1912static int
b0ec0f27 1913dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
4c738a8d 1914 struct dpif_port *dpif_port)
72865317 1915{
b0ec0f27 1916 struct dp_netdev_port_state *state = state_;
72865317 1917 struct dp_netdev *dp = get_dp_netdev(dpif);
e9985d6a 1918 struct hmap_node *node;
ff073a71 1919 int retval;
72865317 1920
e9985d6a
DDP
1921 ovs_mutex_lock(&dp->port_mutex);
1922 node = hmap_at_position(&dp->ports, &state->position);
ff073a71
BP
1923 if (node) {
1924 struct dp_netdev_port *port;
5279f8fd 1925
ff073a71
BP
1926 port = CONTAINER_OF(node, struct dp_netdev_port, node);
1927
1928 free(state->name);
1929 state->name = xstrdup(netdev_get_name(port->netdev));
1930 dpif_port->name = state->name;
1931 dpif_port->type = port->type;
35303d71 1932 dpif_port->port_no = port->port_no;
ff073a71
BP
1933
1934 retval = 0;
1935 } else {
1936 retval = EOF;
72865317 1937 }
e9985d6a 1938 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd 1939
ff073a71 1940 return retval;
b0ec0f27
BP
1941}
1942
1943static int
4c738a8d 1944dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
b0ec0f27 1945{
4c738a8d
BP
1946 struct dp_netdev_port_state *state = state_;
1947 free(state->name);
b0ec0f27
BP
1948 free(state);
1949 return 0;
72865317
BP
1950}
1951
1952static int
67a4917b 1953dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
72865317
BP
1954{
1955 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
d33ed218 1956 uint64_t new_port_seq;
5279f8fd
BP
1957 int error;
1958
d33ed218
BP
1959 new_port_seq = seq_read(dpif->dp->port_seq);
1960 if (dpif->last_port_seq != new_port_seq) {
1961 dpif->last_port_seq = new_port_seq;
5279f8fd 1962 error = ENOBUFS;
72865317 1963 } else {
5279f8fd 1964 error = EAGAIN;
72865317 1965 }
5279f8fd
BP
1966
1967 return error;
72865317
BP
1968}
1969
1970static void
1971dpif_netdev_port_poll_wait(const struct dpif *dpif_)
1972{
1973 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
5279f8fd 1974
d33ed218 1975 seq_wait(dpif->dp->port_seq, dpif->last_port_seq);
8a4e3a85
BP
1976}
1977
1978static struct dp_netdev_flow *
0de8783a 1979dp_netdev_flow_cast(const struct dpcls_rule *cr)
8a4e3a85
BP
1980{
1981 return cr ? CONTAINER_OF(cr, struct dp_netdev_flow, cr) : NULL;
72865317
BP
1982}
1983
9bbf1c3d
DDP
1984static bool dp_netdev_flow_ref(struct dp_netdev_flow *flow)
1985{
1986 return ovs_refcount_try_ref_rcu(&flow->ref_cnt);
1987}
1988
79df317f
DDP
1989/* netdev_flow_key utilities.
1990 *
1991 * netdev_flow_key is basically a miniflow. We use these functions
1992 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1993 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1994 *
1995 * - Since we are dealing exclusively with miniflows created by
1996 * miniflow_extract(), if the map is different the miniflow is different.
1997 * Therefore we can be faster by comparing the map and the miniflow in a
1998 * single memcmp().
5fcff47b 1999 * - These functions can be inlined by the compiler. */
79df317f 2000
361d808d 2001/* Given the number of bits set in miniflow's maps, returns the size of the
caeb4906 2002 * 'netdev_flow_key.mf' */
361d808d
JR
2003static inline size_t
2004netdev_flow_key_size(size_t flow_u64s)
79df317f 2005{
361d808d 2006 return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s);
79df317f
DDP
2007}
2008
79df317f
DDP
2009static inline bool
2010netdev_flow_key_equal(const struct netdev_flow_key *a,
0de8783a
JR
2011 const struct netdev_flow_key *b)
2012{
caeb4906
JR
2013 /* 'b->len' may be not set yet. */
2014 return a->hash == b->hash && !memcmp(&a->mf, &b->mf, a->len);
0de8783a
JR
2015}
2016
2017/* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
d79a39fe 2018 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
0de8783a
JR
2019 * generated by miniflow_extract. */
2020static inline bool
2021netdev_flow_key_equal_mf(const struct netdev_flow_key *key,
2022 const struct miniflow *mf)
79df317f 2023{
caeb4906 2024 return !memcmp(&key->mf, mf, key->len);
79df317f
DDP
2025}
2026
2027static inline void
2028netdev_flow_key_clone(struct netdev_flow_key *dst,
0de8783a
JR
2029 const struct netdev_flow_key *src)
2030{
caeb4906
JR
2031 memcpy(dst, src,
2032 offsetof(struct netdev_flow_key, mf) + src->len);
0de8783a
JR
2033}
2034
0de8783a
JR
2035/* Initialize a netdev_flow_key 'mask' from 'match'. */
2036static inline void
2037netdev_flow_mask_init(struct netdev_flow_key *mask,
2038 const struct match *match)
2039{
09b0fa9c 2040 uint64_t *dst = miniflow_values(&mask->mf);
5fcff47b 2041 struct flowmap fmap;
0de8783a 2042 uint32_t hash = 0;
5fcff47b 2043 size_t idx;
0de8783a
JR
2044
2045 /* Only check masks that make sense for the flow. */
5fcff47b
JR
2046 flow_wc_map(&match->flow, &fmap);
2047 flowmap_init(&mask->mf.map);
0de8783a 2048
5fcff47b
JR
2049 FLOWMAP_FOR_EACH_INDEX(idx, fmap) {
2050 uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx);
0de8783a 2051
5fcff47b
JR
2052 if (mask_u64) {
2053 flowmap_set(&mask->mf.map, idx, 1);
2054 *dst++ = mask_u64;
2055 hash = hash_add64(hash, mask_u64);
0de8783a 2056 }
0de8783a
JR
2057 }
2058
5fcff47b 2059 map_t map;
0de8783a 2060
5fcff47b
JR
2061 FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) {
2062 hash = hash_add64(hash, map);
2063 }
0de8783a 2064
5fcff47b 2065 size_t n = dst - miniflow_get_values(&mask->mf);
0de8783a 2066
d70e8c28 2067 mask->hash = hash_finish(hash, n * 8);
0de8783a
JR
2068 mask->len = netdev_flow_key_size(n);
2069}
2070
361d808d 2071/* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
0de8783a
JR
2072static inline void
2073netdev_flow_key_init_masked(struct netdev_flow_key *dst,
2074 const struct flow *flow,
2075 const struct netdev_flow_key *mask)
79df317f 2076{
09b0fa9c
JR
2077 uint64_t *dst_u64 = miniflow_values(&dst->mf);
2078 const uint64_t *mask_u64 = miniflow_get_values(&mask->mf);
0de8783a 2079 uint32_t hash = 0;
d70e8c28 2080 uint64_t value;
0de8783a
JR
2081
2082 dst->len = mask->len;
361d808d 2083 dst->mf = mask->mf; /* Copy maps. */
0de8783a 2084
5fcff47b 2085 FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) {
d70e8c28
JR
2086 *dst_u64 = value & *mask_u64++;
2087 hash = hash_add64(hash, *dst_u64++);
0de8783a 2088 }
09b0fa9c
JR
2089 dst->hash = hash_finish(hash,
2090 (dst_u64 - miniflow_get_values(&dst->mf)) * 8);
0de8783a
JR
2091}
2092
5fcff47b
JR
2093/* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
2094#define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
2095 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
0de8783a
JR
2096
2097/* Returns a hash value for the bits of 'key' where there are 1-bits in
2098 * 'mask'. */
2099static inline uint32_t
2100netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key,
2101 const struct netdev_flow_key *mask)
2102{
09b0fa9c 2103 const uint64_t *p = miniflow_get_values(&mask->mf);
0de8783a 2104 uint32_t hash = 0;
5fcff47b 2105 uint64_t value;
0de8783a 2106
5fcff47b
JR
2107 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) {
2108 hash = hash_add64(hash, value & *p++);
0de8783a
JR
2109 }
2110
09b0fa9c 2111 return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8);
79df317f
DDP
2112}
2113
9bbf1c3d
DDP
2114static inline bool
2115emc_entry_alive(struct emc_entry *ce)
2116{
2117 return ce->flow && !ce->flow->dead;
2118}
2119
2120static void
2121emc_clear_entry(struct emc_entry *ce)
2122{
2123 if (ce->flow) {
2124 dp_netdev_flow_unref(ce->flow);
2125 ce->flow = NULL;
2126 }
2127}
2128
2129static inline void
2130emc_change_entry(struct emc_entry *ce, struct dp_netdev_flow *flow,
0de8783a 2131 const struct netdev_flow_key *key)
9bbf1c3d
DDP
2132{
2133 if (ce->flow != flow) {
2134 if (ce->flow) {
2135 dp_netdev_flow_unref(ce->flow);
2136 }
2137
2138 if (dp_netdev_flow_ref(flow)) {
2139 ce->flow = flow;
2140 } else {
2141 ce->flow = NULL;
2142 }
2143 }
0de8783a
JR
2144 if (key) {
2145 netdev_flow_key_clone(&ce->key, key);
9bbf1c3d
DDP
2146 }
2147}
2148
2149static inline void
0de8783a 2150emc_insert(struct emc_cache *cache, const struct netdev_flow_key *key,
9bbf1c3d
DDP
2151 struct dp_netdev_flow *flow)
2152{
2153 struct emc_entry *to_be_replaced = NULL;
2154 struct emc_entry *current_entry;
2155
0de8783a
JR
2156 EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) {
2157 if (netdev_flow_key_equal(&current_entry->key, key)) {
9bbf1c3d 2158 /* We found the entry with the 'mf' miniflow */
0de8783a 2159 emc_change_entry(current_entry, flow, NULL);
9bbf1c3d
DDP
2160 return;
2161 }
2162
2163 /* Replacement policy: put the flow in an empty (not alive) entry, or
2164 * in the first entry where it can be */
2165 if (!to_be_replaced
2166 || (emc_entry_alive(to_be_replaced)
2167 && !emc_entry_alive(current_entry))
0de8783a 2168 || current_entry->key.hash < to_be_replaced->key.hash) {
9bbf1c3d
DDP
2169 to_be_replaced = current_entry;
2170 }
2171 }
2172 /* We didn't find the miniflow in the cache.
2173 * The 'to_be_replaced' entry is where the new flow will be stored */
2174
0de8783a 2175 emc_change_entry(to_be_replaced, flow, key);
9bbf1c3d
DDP
2176}
2177
4c30b246
CL
2178static inline void
2179emc_probabilistic_insert(struct dp_netdev_pmd_thread *pmd,
2180 const struct netdev_flow_key *key,
2181 struct dp_netdev_flow *flow)
2182{
2183 /* Insert an entry into the EMC based on probability value 'min'. By
2184 * default the value is UINT32_MAX / 100 which yields an insertion
2185 * probability of 1/100 ie. 1% */
2186
2187 uint32_t min;
2188 atomic_read_relaxed(&pmd->dp->emc_insert_min, &min);
2189
656238ee 2190 if (min && random_uint32() <= min) {
4c30b246
CL
2191 emc_insert(&pmd->flow_cache, key, flow);
2192 }
2193}
2194
9bbf1c3d 2195static inline struct dp_netdev_flow *
0de8783a 2196emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key)
9bbf1c3d
DDP
2197{
2198 struct emc_entry *current_entry;
2199
0de8783a
JR
2200 EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) {
2201 if (current_entry->key.hash == key->hash
2202 && emc_entry_alive(current_entry)
2203 && netdev_flow_key_equal_mf(&current_entry->key, &key->mf)) {
9bbf1c3d 2204
0de8783a 2205 /* We found the entry with the 'key->mf' miniflow */
9bbf1c3d
DDP
2206 return current_entry->flow;
2207 }
2208 }
2209
2210 return NULL;
2211}
2212
72865317 2213static struct dp_netdev_flow *
3453b4d6
JS
2214dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread *pmd,
2215 const struct netdev_flow_key *key,
2216 int *lookup_num_p)
2c0ea78f 2217{
3453b4d6 2218 struct dpcls *cls;
0de8783a 2219 struct dpcls_rule *rule;
3453b4d6
JS
2220 odp_port_t in_port = u32_to_odp(MINIFLOW_GET_U32(&key->mf, in_port));
2221 struct dp_netdev_flow *netdev_flow = NULL;
2c0ea78f 2222
3453b4d6
JS
2223 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
2224 if (OVS_LIKELY(cls)) {
2225 dpcls_lookup(cls, key, &rule, 1, lookup_num_p);
2226 netdev_flow = dp_netdev_flow_cast(rule);
2227 }
8a4e3a85 2228 return netdev_flow;
2c0ea78f
GS
2229}
2230
2231static struct dp_netdev_flow *
1c1e46ed
AW
2232dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd,
2233 const ovs_u128 *ufidp, const struct nlattr *key,
2234 size_t key_len)
72865317 2235{
1763b4b8 2236 struct dp_netdev_flow *netdev_flow;
70e5ed6f
JS
2237 struct flow flow;
2238 ovs_u128 ufid;
2239
2240 /* If a UFID is not provided, determine one based on the key. */
2241 if (!ufidp && key && key_len
f0fb825a 2242 && !dpif_netdev_flow_from_nlattrs(key, key_len, &flow, false)) {
1c1e46ed 2243 dpif_flow_hash(pmd->dp->dpif, &flow, sizeof flow, &ufid);
70e5ed6f
JS
2244 ufidp = &ufid;
2245 }
72865317 2246
70e5ed6f
JS
2247 if (ufidp) {
2248 CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp),
1c1e46ed 2249 &pmd->flow_table) {
2ff8484b 2250 if (ovs_u128_equals(netdev_flow->ufid, *ufidp)) {
70e5ed6f
JS
2251 return netdev_flow;
2252 }
72865317
BP
2253 }
2254 }
8a4e3a85 2255
72865317
BP
2256 return NULL;
2257}
2258
2259static void
eb94da30 2260get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
1763b4b8 2261 struct dpif_flow_stats *stats)
feebdea2 2262{
eb94da30
DDP
2263 struct dp_netdev_flow *netdev_flow;
2264 unsigned long long n;
2265 long long used;
2266 uint16_t flags;
2267
2268 netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_);
2269
2270 atomic_read_relaxed(&netdev_flow->stats.packet_count, &n);
2271 stats->n_packets = n;
2272 atomic_read_relaxed(&netdev_flow->stats.byte_count, &n);
2273 stats->n_bytes = n;
2274 atomic_read_relaxed(&netdev_flow->stats.used, &used);
2275 stats->used = used;
2276 atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
2277 stats->tcp_flags = flags;
72865317
BP
2278}
2279
7af12bd7
JS
2280/* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
2281 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
2282 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
2283 * protect them. */
6fe09f8c 2284static void
70e5ed6f 2285dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
7af12bd7 2286 struct ofpbuf *key_buf, struct ofpbuf *mask_buf,
64bb477f 2287 struct dpif_flow *flow, bool terse)
6fe09f8c 2288{
64bb477f
JS
2289 if (terse) {
2290 memset(flow, 0, sizeof *flow);
2291 } else {
2292 struct flow_wildcards wc;
2293 struct dp_netdev_actions *actions;
2294 size_t offset;
5262eea1
JG
2295 struct odp_flow_key_parms odp_parms = {
2296 .flow = &netdev_flow->flow,
2297 .mask = &wc.masks,
2494ccd7 2298 .support = dp_netdev_support,
5262eea1 2299 };
64bb477f
JS
2300
2301 miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks);
f4b835bb
JR
2302 /* in_port is exact matched, but we have left it out from the mask for
2303 * optimnization reasons. Add in_port back to the mask. */
2304 wc.masks.in_port.odp_port = ODPP_NONE;
64bb477f
JS
2305
2306 /* Key */
6fd6ed71 2307 offset = key_buf->size;
64bb477f 2308 flow->key = ofpbuf_tail(key_buf);
5262eea1 2309 odp_flow_key_from_flow(&odp_parms, key_buf);
6fd6ed71 2310 flow->key_len = key_buf->size - offset;
64bb477f
JS
2311
2312 /* Mask */
6fd6ed71 2313 offset = mask_buf->size;
64bb477f 2314 flow->mask = ofpbuf_tail(mask_buf);
ec1f6f32 2315 odp_parms.key_buf = key_buf;
5262eea1 2316 odp_flow_key_from_mask(&odp_parms, mask_buf);
6fd6ed71 2317 flow->mask_len = mask_buf->size - offset;
64bb477f
JS
2318
2319 /* Actions */
2320 actions = dp_netdev_flow_get_actions(netdev_flow);
2321 flow->actions = actions->actions;
2322 flow->actions_len = actions->size;
2323 }
6fe09f8c 2324
70e5ed6f
JS
2325 flow->ufid = netdev_flow->ufid;
2326 flow->ufid_present = true;
1c1e46ed 2327 flow->pmd_id = netdev_flow->pmd_id;
6fe09f8c
JS
2328 get_dpif_flow_stats(netdev_flow, &flow->stats);
2329}
2330
36956a7d 2331static int
8c301900
JR
2332dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len,
2333 const struct nlattr *mask_key,
2334 uint32_t mask_key_len, const struct flow *flow,
f0fb825a 2335 struct flow_wildcards *wc, bool probe)
8c301900 2336{
ca8d3442
DDP
2337 enum odp_key_fitness fitness;
2338
8d8ab6c2 2339 fitness = odp_flow_key_to_mask(mask_key, mask_key_len, wc, flow);
ca8d3442 2340 if (fitness) {
f0fb825a
EG
2341 if (!probe) {
2342 /* This should not happen: it indicates that
2343 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
2344 * disagree on the acceptable form of a mask. Log the problem
2345 * as an error, with enough details to enable debugging. */
2346 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2347
2348 if (!VLOG_DROP_ERR(&rl)) {
2349 struct ds s;
2350
2351 ds_init(&s);
2352 odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s,
2353 true);
2354 VLOG_ERR("internal error parsing flow mask %s (%s)",
2355 ds_cstr(&s), odp_key_fitness_to_string(fitness));
2356 ds_destroy(&s);
2357 }
8c301900 2358 }
ca8d3442
DDP
2359
2360 return EINVAL;
8c301900
JR
2361 }
2362
2363 return 0;
2364}
2365
2366static int
2367dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len,
f0fb825a 2368 struct flow *flow, bool probe)
36956a7d 2369{
8d8ab6c2 2370 if (odp_flow_key_to_flow(key, key_len, flow)) {
f0fb825a
EG
2371 if (!probe) {
2372 /* This should not happen: it indicates that
2373 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
2374 * the acceptable form of a flow. Log the problem as an error,
2375 * with enough details to enable debugging. */
2376 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2377
2378 if (!VLOG_DROP_ERR(&rl)) {
2379 struct ds s;
2380
2381 ds_init(&s);
2382 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
2383 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
2384 ds_destroy(&s);
2385 }
36956a7d
BP
2386 }
2387
2388 return EINVAL;
2389 }
2390
5cf3edb3 2391 if (flow->ct_state & DP_NETDEV_CS_UNSUPPORTED_MASK) {
07659514
JS
2392 return EINVAL;
2393 }
2394
36956a7d
BP
2395 return 0;
2396}
2397
72865317 2398static int
6fe09f8c 2399dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get)
72865317
BP
2400{
2401 struct dp_netdev *dp = get_dp_netdev(dpif);
1763b4b8 2402 struct dp_netdev_flow *netdev_flow;
1c1e46ed 2403 struct dp_netdev_pmd_thread *pmd;
c673049c
IM
2404 struct hmapx to_find = HMAPX_INITIALIZER(&to_find);
2405 struct hmapx_node *node;
2406 int error = EINVAL;
2407
2408 if (get->pmd_id == PMD_ID_NULL) {
2409 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2410 if (dp_netdev_pmd_try_ref(pmd) && !hmapx_add(&to_find, pmd)) {
2411 dp_netdev_pmd_unref(pmd);
2412 }
2413 }
2414 } else {
2415 pmd = dp_netdev_get_pmd(dp, get->pmd_id);
2416 if (!pmd) {
2417 goto out;
2418 }
2419 hmapx_add(&to_find, pmd);
1c1e46ed
AW
2420 }
2421
c673049c
IM
2422 if (!hmapx_count(&to_find)) {
2423 goto out;
72865317 2424 }
1c1e46ed 2425
c673049c
IM
2426 HMAPX_FOR_EACH (node, &to_find) {
2427 pmd = (struct dp_netdev_pmd_thread *) node->data;
2428 netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key,
2429 get->key_len);
2430 if (netdev_flow) {
2431 dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer,
2432 get->flow, false);
2433 error = 0;
2434 break;
2435 } else {
2436 error = ENOENT;
2437 }
2438 }
bc4a05c6 2439
c673049c
IM
2440 HMAPX_FOR_EACH (node, &to_find) {
2441 pmd = (struct dp_netdev_pmd_thread *) node->data;
2442 dp_netdev_pmd_unref(pmd);
2443 }
2444out:
2445 hmapx_destroy(&to_find);
5279f8fd 2446 return error;
72865317
BP
2447}
2448
0de8783a 2449static struct dp_netdev_flow *
1c1e46ed
AW
2450dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
2451 struct match *match, const ovs_u128 *ufid,
ae2ceebd 2452 const struct nlattr *actions, size_t actions_len)
1c1e46ed 2453 OVS_REQUIRES(pmd->flow_mutex)
72865317 2454{
0de8783a
JR
2455 struct dp_netdev_flow *flow;
2456 struct netdev_flow_key mask;
3453b4d6 2457 struct dpcls *cls;
f4b835bb
JR
2458
2459 /* Make sure in_port is exact matched before we read it. */
2460 ovs_assert(match->wc.masks.in_port.odp_port == ODPP_NONE);
3453b4d6 2461 odp_port_t in_port = match->flow.in_port.odp_port;
ed79f89a 2462
f4b835bb
JR
2463 /* As we select the dpcls based on the port number, each netdev flow
2464 * belonging to the same dpcls will have the same odp_port value.
2465 * For performance reasons we wildcard odp_port here in the mask. In the
2466 * typical case dp_hash is also wildcarded, and the resulting 8-byte
2467 * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and
2468 * will not be part of the subtable mask.
2469 * This will speed up the hash computation during dpcls_lookup() because
2470 * there is one less call to hash_add64() in this case. */
2471 match->wc.masks.in_port.odp_port = 0;
0de8783a 2472 netdev_flow_mask_init(&mask, match);
f4b835bb
JR
2473 match->wc.masks.in_port.odp_port = ODPP_NONE;
2474
0de8783a 2475 /* Make sure wc does not have metadata. */
5fcff47b
JR
2476 ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata)
2477 && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs));
679ba04c 2478
0de8783a 2479 /* Do not allocate extra space. */
caeb4906 2480 flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
1c1e46ed 2481 memset(&flow->stats, 0, sizeof flow->stats);
0de8783a 2482 flow->dead = false;
11e5cf1f 2483 flow->batch = NULL;
bd5131ba 2484 *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id;
0de8783a 2485 *CONST_CAST(struct flow *, &flow->flow) = match->flow;
70e5ed6f 2486 *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid;
0de8783a 2487 ovs_refcount_init(&flow->ref_cnt);
0de8783a 2488 ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len));
2c0ea78f 2489
0de8783a 2490 netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask);
3453b4d6 2491
f4b835bb 2492 /* Select dpcls for in_port. Relies on in_port to be exact match. */
3453b4d6
JS
2493 cls = dp_netdev_pmd_find_dpcls(pmd, in_port);
2494 dpcls_insert(cls, &flow->cr, &mask);
72865317 2495
4c75aaab
EJ
2496 cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node),
2497 dp_netdev_flow_hash(&flow->ufid));
2498
beb75a40 2499 if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl)))) {
623540e4 2500 struct ds ds = DS_EMPTY_INITIALIZER;
9044f2c1
JG
2501 struct ofpbuf key_buf, mask_buf;
2502 struct odp_flow_key_parms odp_parms = {
2503 .flow = &match->flow,
2504 .mask = &match->wc.masks,
2505 .support = dp_netdev_support,
2506 };
2507
2508 ofpbuf_init(&key_buf, 0);
2509 ofpbuf_init(&mask_buf, 0);
623540e4 2510
9044f2c1
JG
2511 odp_flow_key_from_flow(&odp_parms, &key_buf);
2512 odp_parms.key_buf = &key_buf;
2513 odp_flow_key_from_mask(&odp_parms, &mask_buf);
0de8783a 2514
623540e4 2515 ds_put_cstr(&ds, "flow_add: ");
70e5ed6f
JS
2516 odp_format_ufid(ufid, &ds);
2517 ds_put_cstr(&ds, " ");
9044f2c1
JG
2518 odp_flow_format(key_buf.data, key_buf.size,
2519 mask_buf.data, mask_buf.size,
2520 NULL, &ds, false);
623540e4 2521 ds_put_cstr(&ds, ", actions:");
0722f341 2522 format_odp_actions(&ds, actions, actions_len, NULL);
623540e4 2523
beb75a40 2524 VLOG_DBG("%s", ds_cstr(&ds));
623540e4 2525
9044f2c1
JG
2526 ofpbuf_uninit(&key_buf);
2527 ofpbuf_uninit(&mask_buf);
beb75a40
JS
2528
2529 /* Add a printout of the actual match installed. */
2530 struct match m;
2531 ds_clear(&ds);
2532 ds_put_cstr(&ds, "flow match: ");
2533 miniflow_expand(&flow->cr.flow.mf, &m.flow);
2534 miniflow_expand(&flow->cr.mask->mf, &m.wc.masks);
b2f4b622 2535 memset(&m.tun_md, 0, sizeof m.tun_md);
beb75a40
JS
2536 match_format(&m, NULL, &ds, OFP_DEFAULT_PRIORITY);
2537
2538 VLOG_DBG("%s", ds_cstr(&ds));
2539
623540e4
EJ
2540 ds_destroy(&ds);
2541 }
2542
0de8783a 2543 return flow;
72865317
BP
2544}
2545
72865317 2546static int
f5d317a1
DDP
2547flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
2548 struct netdev_flow_key *key,
2549 struct match *match,
2550 ovs_u128 *ufid,
2551 const struct dpif_flow_put *put,
2552 struct dpif_flow_stats *stats)
72865317 2553{
1763b4b8 2554 struct dp_netdev_flow *netdev_flow;
f5d317a1 2555 int error = 0;
72865317 2556
f5d317a1
DDP
2557 if (stats) {
2558 memset(stats, 0, sizeof *stats);
70e5ed6f
JS
2559 }
2560
1c1e46ed 2561 ovs_mutex_lock(&pmd->flow_mutex);
f5d317a1 2562 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL);
1763b4b8 2563 if (!netdev_flow) {
89625d1e 2564 if (put->flags & DPIF_FP_CREATE) {
1c1e46ed 2565 if (cmap_count(&pmd->flow_table) < MAX_FLOWS) {
f5d317a1 2566 dp_netdev_flow_add(pmd, match, ufid, put->actions,
70e5ed6f 2567 put->actions_len);
0de8783a 2568 error = 0;
72865317 2569 } else {
5279f8fd 2570 error = EFBIG;
72865317
BP
2571 }
2572 } else {
5279f8fd 2573 error = ENOENT;
72865317
BP
2574 }
2575 } else {
beb75a40 2576 if (put->flags & DPIF_FP_MODIFY) {
8a4e3a85
BP
2577 struct dp_netdev_actions *new_actions;
2578 struct dp_netdev_actions *old_actions;
2579
2580 new_actions = dp_netdev_actions_create(put->actions,
2581 put->actions_len);
2582
61e7deb1
BP
2583 old_actions = dp_netdev_flow_get_actions(netdev_flow);
2584 ovsrcu_set(&netdev_flow->actions, new_actions);
679ba04c 2585
f5d317a1
DDP
2586 if (stats) {
2587 get_dpif_flow_stats(netdev_flow, stats);
a84cb64a
BP
2588 }
2589 if (put->flags & DPIF_FP_ZERO_STATS) {
97447f55
DDP
2590 /* XXX: The userspace datapath uses thread local statistics
2591 * (for flows), which should be updated only by the owning
2592 * thread. Since we cannot write on stats memory here,
2593 * we choose not to support this flag. Please note:
2594 * - This feature is currently used only by dpctl commands with
2595 * option --clear.
2596 * - Should the need arise, this operation can be implemented
2597 * by keeping a base value (to be update here) for each
2598 * counter, and subtracting it before outputting the stats */
2599 error = EOPNOTSUPP;
72865317 2600 }
8a4e3a85 2601
61e7deb1 2602 ovsrcu_postpone(dp_netdev_actions_free, old_actions);
2c0ea78f 2603 } else if (put->flags & DPIF_FP_CREATE) {
5279f8fd 2604 error = EEXIST;
2c0ea78f
GS
2605 } else {
2606 /* Overlapping flow. */
2607 error = EINVAL;
72865317
BP
2608 }
2609 }
1c1e46ed 2610 ovs_mutex_unlock(&pmd->flow_mutex);
5279f8fd 2611 return error;
72865317
BP
2612}
2613
72865317 2614static int
f5d317a1 2615dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
72865317
BP
2616{
2617 struct dp_netdev *dp = get_dp_netdev(dpif);
beb75a40 2618 struct netdev_flow_key key, mask;
1c1e46ed 2619 struct dp_netdev_pmd_thread *pmd;
f5d317a1
DDP
2620 struct match match;
2621 ovs_u128 ufid;
2622 int error;
f0fb825a 2623 bool probe = put->flags & DPIF_FP_PROBE;
72865317 2624
f5d317a1
DDP
2625 if (put->stats) {
2626 memset(put->stats, 0, sizeof *put->stats);
2627 }
f0fb825a
EG
2628 error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow,
2629 probe);
f5d317a1
DDP
2630 if (error) {
2631 return error;
2632 }
2633 error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len,
2634 put->mask, put->mask_len,
f0fb825a 2635 &match.flow, &match.wc, probe);
f5d317a1
DDP
2636 if (error) {
2637 return error;
1c1e46ed
AW
2638 }
2639
f5d317a1
DDP
2640 if (put->ufid) {
2641 ufid = *put->ufid;
2642 } else {
2643 dpif_flow_hash(dpif, &match.flow, sizeof match.flow, &ufid);
2644 }
2645
2646 /* Must produce a netdev_flow_key for lookup.
beb75a40
JS
2647 * Use the same method as employed to create the key when adding
2648 * the flow to the dplcs to make sure they match. */
2649 netdev_flow_mask_init(&mask, &match);
2650 netdev_flow_key_init_masked(&key, &match.flow, &mask);
f5d317a1
DDP
2651
2652 if (put->pmd_id == PMD_ID_NULL) {
2653 if (cmap_count(&dp->poll_threads) == 0) {
2654 return EINVAL;
2655 }
2656 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2657 struct dpif_flow_stats pmd_stats;
2658 int pmd_error;
2659
2660 pmd_error = flow_put_on_pmd(pmd, &key, &match, &ufid, put,
2661 &pmd_stats);
2662 if (pmd_error) {
2663 error = pmd_error;
2664 } else if (put->stats) {
2665 put->stats->n_packets += pmd_stats.n_packets;
2666 put->stats->n_bytes += pmd_stats.n_bytes;
2667 put->stats->used = MAX(put->stats->used, pmd_stats.used);
2668 put->stats->tcp_flags |= pmd_stats.tcp_flags;
2669 }
2670 }
2671 } else {
2672 pmd = dp_netdev_get_pmd(dp, put->pmd_id);
2673 if (!pmd) {
2674 return EINVAL;
2675 }
2676 error = flow_put_on_pmd(pmd, &key, &match, &ufid, put, put->stats);
2677 dp_netdev_pmd_unref(pmd);
2678 }
2679
2680 return error;
2681}
2682
2683static int
2684flow_del_on_pmd(struct dp_netdev_pmd_thread *pmd,
2685 struct dpif_flow_stats *stats,
2686 const struct dpif_flow_del *del)
2687{
2688 struct dp_netdev_flow *netdev_flow;
2689 int error = 0;
2690
1c1e46ed
AW
2691 ovs_mutex_lock(&pmd->flow_mutex);
2692 netdev_flow = dp_netdev_pmd_find_flow(pmd, del->ufid, del->key,
2693 del->key_len);
1763b4b8 2694 if (netdev_flow) {
f5d317a1
DDP
2695 if (stats) {
2696 get_dpif_flow_stats(netdev_flow, stats);
feebdea2 2697 }
1c1e46ed 2698 dp_netdev_pmd_remove_flow(pmd, netdev_flow);
72865317 2699 } else {
5279f8fd 2700 error = ENOENT;
72865317 2701 }
1c1e46ed 2702 ovs_mutex_unlock(&pmd->flow_mutex);
f5d317a1
DDP
2703
2704 return error;
2705}
2706
2707static int
2708dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
2709{
2710 struct dp_netdev *dp = get_dp_netdev(dpif);
2711 struct dp_netdev_pmd_thread *pmd;
2712 int error = 0;
2713
2714 if (del->stats) {
2715 memset(del->stats, 0, sizeof *del->stats);
2716 }
2717
2718 if (del->pmd_id == PMD_ID_NULL) {
2719 if (cmap_count(&dp->poll_threads) == 0) {
2720 return EINVAL;
2721 }
2722 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2723 struct dpif_flow_stats pmd_stats;
2724 int pmd_error;
2725
2726 pmd_error = flow_del_on_pmd(pmd, &pmd_stats, del);
2727 if (pmd_error) {
2728 error = pmd_error;
2729 } else if (del->stats) {
2730 del->stats->n_packets += pmd_stats.n_packets;
2731 del->stats->n_bytes += pmd_stats.n_bytes;
2732 del->stats->used = MAX(del->stats->used, pmd_stats.used);
2733 del->stats->tcp_flags |= pmd_stats.tcp_flags;
2734 }
2735 }
2736 } else {
2737 pmd = dp_netdev_get_pmd(dp, del->pmd_id);
2738 if (!pmd) {
2739 return EINVAL;
2740 }
2741 error = flow_del_on_pmd(pmd, del->stats, del);
2742 dp_netdev_pmd_unref(pmd);
2743 }
2744
5279f8fd
BP
2745
2746 return error;
72865317
BP
2747}
2748
ac64794a
BP
2749struct dpif_netdev_flow_dump {
2750 struct dpif_flow_dump up;
1c1e46ed
AW
2751 struct cmap_position poll_thread_pos;
2752 struct cmap_position flow_pos;
2753 struct dp_netdev_pmd_thread *cur_pmd;
d2ad7ef1
JS
2754 int status;
2755 struct ovs_mutex mutex;
e723fd32
JS
2756};
2757
ac64794a
BP
2758static struct dpif_netdev_flow_dump *
2759dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump)
72865317 2760{
ac64794a 2761 return CONTAINER_OF(dump, struct dpif_netdev_flow_dump, up);
e723fd32
JS
2762}
2763
ac64794a 2764static struct dpif_flow_dump *
7e8b7199
PB
2765dpif_netdev_flow_dump_create(const struct dpif *dpif_, bool terse,
2766 char *type OVS_UNUSED)
e723fd32 2767{
ac64794a 2768 struct dpif_netdev_flow_dump *dump;
e723fd32 2769
1c1e46ed 2770 dump = xzalloc(sizeof *dump);
ac64794a 2771 dpif_flow_dump_init(&dump->up, dpif_);
64bb477f 2772 dump->up.terse = terse;
ac64794a
BP
2773 ovs_mutex_init(&dump->mutex);
2774
2775 return &dump->up;
e723fd32
JS
2776}
2777
2778static int
ac64794a 2779dpif_netdev_flow_dump_destroy(struct dpif_flow_dump *dump_)
e723fd32 2780{
ac64794a 2781 struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
e723fd32 2782
ac64794a
BP
2783 ovs_mutex_destroy(&dump->mutex);
2784 free(dump);
704a1e09
BP
2785 return 0;
2786}
2787
ac64794a
BP
2788struct dpif_netdev_flow_dump_thread {
2789 struct dpif_flow_dump_thread up;
2790 struct dpif_netdev_flow_dump *dump;
8bb113da
RW
2791 struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
2792 struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
ac64794a
BP
2793};
2794
2795static struct dpif_netdev_flow_dump_thread *
2796dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
2797{
2798 return CONTAINER_OF(thread, struct dpif_netdev_flow_dump_thread, up);
2799}
2800
2801static struct dpif_flow_dump_thread *
2802dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump *dump_)
2803{
2804 struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
2805 struct dpif_netdev_flow_dump_thread *thread;
2806
2807 thread = xmalloc(sizeof *thread);
2808 dpif_flow_dump_thread_init(&thread->up, &dump->up);
2809 thread->dump = dump;
2810 return &thread->up;
2811}
2812
2813static void
2814dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
2815{
2816 struct dpif_netdev_flow_dump_thread *thread
2817 = dpif_netdev_flow_dump_thread_cast(thread_);
2818
2819 free(thread);
2820}
2821
704a1e09 2822static int
ac64794a 2823dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
8bb113da 2824 struct dpif_flow *flows, int max_flows)
ac64794a
BP
2825{
2826 struct dpif_netdev_flow_dump_thread *thread
2827 = dpif_netdev_flow_dump_thread_cast(thread_);
2828 struct dpif_netdev_flow_dump *dump = thread->dump;
8bb113da 2829 struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH];
8bb113da
RW
2830 int n_flows = 0;
2831 int i;
14608a15 2832
ac64794a 2833 ovs_mutex_lock(&dump->mutex);
8bb113da 2834 if (!dump->status) {
1c1e46ed
AW
2835 struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
2836 struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
2837 struct dp_netdev_pmd_thread *pmd = dump->cur_pmd;
2838 int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
2839
2840 /* First call to dump_next(), extracts the first pmd thread.
2841 * If there is no pmd thread, returns immediately. */
2842 if (!pmd) {
2843 pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos);
2844 if (!pmd) {
2845 ovs_mutex_unlock(&dump->mutex);
2846 return n_flows;
8bb113da 2847
8bb113da 2848 }
d2ad7ef1 2849 }
1c1e46ed
AW
2850
2851 do {
2852 for (n_flows = 0; n_flows < flow_limit; n_flows++) {
2853 struct cmap_node *node;
2854
2855 node = cmap_next_position(&pmd->flow_table, &dump->flow_pos);
2856 if (!node) {
2857 break;
2858 }
2859 netdev_flows[n_flows] = CONTAINER_OF(node,
2860 struct dp_netdev_flow,
2861 node);
2862 }
2863 /* When finishing dumping the current pmd thread, moves to
2864 * the next. */
2865 if (n_flows < flow_limit) {
2866 memset(&dump->flow_pos, 0, sizeof dump->flow_pos);
2867 dp_netdev_pmd_unref(pmd);
2868 pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos);
2869 if (!pmd) {
2870 dump->status = EOF;
2871 break;
2872 }
2873 }
2874 /* Keeps the reference to next caller. */
2875 dump->cur_pmd = pmd;
2876
2877 /* If the current dump is empty, do not exit the loop, since the
2878 * remaining pmds could have flows to be dumped. Just dumps again
2879 * on the new 'pmd'. */
2880 } while (!n_flows);
8a4e3a85 2881 }
ac64794a 2882 ovs_mutex_unlock(&dump->mutex);
ac64794a 2883
8bb113da
RW
2884 for (i = 0; i < n_flows; i++) {
2885 struct odputil_keybuf *maskbuf = &thread->maskbuf[i];
2886 struct odputil_keybuf *keybuf = &thread->keybuf[i];
2887 struct dp_netdev_flow *netdev_flow = netdev_flows[i];
2888 struct dpif_flow *f = &flows[i];
7af12bd7 2889 struct ofpbuf key, mask;
8bb113da 2890
7af12bd7
JS
2891 ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
2892 ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
64bb477f
JS
2893 dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f,
2894 dump->up.terse);
8bb113da 2895 }
feebdea2 2896
8bb113da 2897 return n_flows;
72865317
BP
2898}
2899
2900static int
758c456d 2901dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
65f13b50 2902 OVS_NO_THREAD_SAFETY_ANALYSIS
72865317
BP
2903{
2904 struct dp_netdev *dp = get_dp_netdev(dpif);
65f13b50 2905 struct dp_netdev_pmd_thread *pmd;
1895cc8d 2906 struct dp_packet_batch pp;
72865317 2907
cf62fa4c
PS
2908 if (dp_packet_size(execute->packet) < ETH_HEADER_LEN ||
2909 dp_packet_size(execute->packet) > UINT16_MAX) {
72865317
BP
2910 return EINVAL;
2911 }
2912
65f13b50
AW
2913 /* Tries finding the 'pmd'. If NULL is returned, that means
2914 * the current thread is a non-pmd thread and should use
b19befae 2915 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
65f13b50
AW
2916 pmd = ovsthread_getspecific(dp->per_pmd_key);
2917 if (!pmd) {
b19befae 2918 pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
546e57d4
DDP
2919 if (!pmd) {
2920 return EBUSY;
2921 }
65f13b50
AW
2922 }
2923
05267613
AZ
2924 if (execute->probe) {
2925 /* If this is part of a probe, Drop the packet, since executing
2926 * the action may actually cause spurious packets be sent into
2927 * the network. */
2928 return 0;
2929 }
2930
65f13b50
AW
2931 /* If the current thread is non-pmd thread, acquires
2932 * the 'non_pmd_mutex'. */
2933 if (pmd->core_id == NON_PMD_CORE_ID) {
2934 ovs_mutex_lock(&dp->non_pmd_mutex);
2935 }
1c1e46ed 2936
36d8de17
DDP
2937 /* The action processing expects the RSS hash to be valid, because
2938 * it's always initialized at the beginning of datapath processing.
2939 * In this case, though, 'execute->packet' may not have gone through
2940 * the datapath at all, it may have been generated by the upper layer
2941 * (OpenFlow packet-out, BFD frame, ...). */
2942 if (!dp_packet_rss_valid(execute->packet)) {
2943 dp_packet_set_rss_hash(execute->packet,
2944 flow_hash_5tuple(execute->flow, 0));
2945 }
2946
72c84bc2 2947 dp_packet_batch_init_packet(&pp, execute->packet);
66e4ad8a
DDP
2948 dp_netdev_execute_actions(pmd, &pp, false, execute->flow,
2949 execute->actions, execute->actions_len,
2950 time_msec());
36d8de17 2951
65f13b50
AW
2952 if (pmd->core_id == NON_PMD_CORE_ID) {
2953 ovs_mutex_unlock(&dp->non_pmd_mutex);
e9985d6a 2954 dp_netdev_pmd_unref(pmd);
65f13b50 2955 }
8a4e3a85 2956
758c456d 2957 return 0;
72865317
BP
2958}
2959
1a0c894a
BP
2960static void
2961dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
2962{
2963 size_t i;
2964
2965 for (i = 0; i < n_ops; i++) {
2966 struct dpif_op *op = ops[i];
2967
2968 switch (op->type) {
2969 case DPIF_OP_FLOW_PUT:
2970 op->error = dpif_netdev_flow_put(dpif, &op->u.flow_put);
2971 break;
2972
2973 case DPIF_OP_FLOW_DEL:
2974 op->error = dpif_netdev_flow_del(dpif, &op->u.flow_del);
2975 break;
2976
2977 case DPIF_OP_EXECUTE:
2978 op->error = dpif_netdev_execute(dpif, &op->u.execute);
2979 break;
6fe09f8c
JS
2980
2981 case DPIF_OP_FLOW_GET:
2982 op->error = dpif_netdev_flow_get(dpif, &op->u.flow_get);
2983 break;
1a0c894a
BP
2984 }
2985 }
2986}
2987
d4f6865c
DDP
2988/* Applies datapath configuration from the database. Some of the changes are
2989 * actually applied in dpif_netdev_run(). */
f2eee189 2990static int
d4f6865c 2991dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config)
f2eee189
AW
2992{
2993 struct dp_netdev *dp = get_dp_netdev(dpif);
d4f6865c 2994 const char *cmask = smap_get(other_config, "pmd-cpu-mask");
4c30b246
CL
2995 unsigned long long insert_prob =
2996 smap_get_ullong(other_config, "emc-insert-inv-prob",
2997 DEFAULT_EM_FLOW_INSERT_INV_PROB);
2998 uint32_t insert_min, cur_min;
f2eee189 2999
a6a426d6
IM
3000 if (!nullable_string_is_equal(dp->pmd_cmask, cmask)) {
3001 free(dp->pmd_cmask);
3002 dp->pmd_cmask = nullable_xstrdup(cmask);
3003 dp_netdev_request_reconfigure(dp);
f2eee189
AW
3004 }
3005
4c30b246
CL
3006 atomic_read_relaxed(&dp->emc_insert_min, &cur_min);
3007 if (insert_prob <= UINT32_MAX) {
3008 insert_min = insert_prob == 0 ? 0 : UINT32_MAX / insert_prob;
3009 } else {
3010 insert_min = DEFAULT_EM_FLOW_INSERT_MIN;
3011 insert_prob = DEFAULT_EM_FLOW_INSERT_INV_PROB;
3012 }
3013
3014 if (insert_min != cur_min) {
3015 atomic_store_relaxed(&dp->emc_insert_min, insert_min);
3016 if (insert_min == 0) {
3017 VLOG_INFO("EMC has been disabled");
3018 } else {
3019 VLOG_INFO("EMC insertion probability changed to 1/%llu (~%.2f%%)",
3020 insert_prob, (100 / (float)insert_prob));
3021 }
3022 }
3023
f2eee189
AW
3024 return 0;
3025}
3026
3eb67853
IM
3027/* Parses affinity list and returns result in 'core_ids'. */
3028static int
3029parse_affinity_list(const char *affinity_list, unsigned *core_ids, int n_rxq)
3030{
3031 unsigned i;
3032 char *list, *copy, *key, *value;
3033 int error = 0;
3034
3035 for (i = 0; i < n_rxq; i++) {
51c37a56 3036 core_ids[i] = OVS_CORE_UNSPEC;
3eb67853
IM
3037 }
3038
3039 if (!affinity_list) {
3040 return 0;
3041 }
3042
3043 list = copy = xstrdup(affinity_list);
3044
3045 while (ofputil_parse_key_value(&list, &key, &value)) {
3046 int rxq_id, core_id;
3047
3048 if (!str_to_int(key, 0, &rxq_id) || rxq_id < 0
3049 || !str_to_int(value, 0, &core_id) || core_id < 0) {
3050 error = EINVAL;
3051 break;
3052 }
3053
3054 if (rxq_id < n_rxq) {
3055 core_ids[rxq_id] = core_id;
3056 }
3057 }
3058
3059 free(copy);
3060 return error;
3061}
3062
3063/* Parses 'affinity_list' and applies configuration if it is valid. */
3064static int
3065dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port *port,
3066 const char *affinity_list)
3067{
3068 unsigned *core_ids, i;
3069 int error = 0;
3070
3071 core_ids = xmalloc(port->n_rxq * sizeof *core_ids);
3072 if (parse_affinity_list(affinity_list, core_ids, port->n_rxq)) {
3073 error = EINVAL;
3074 goto exit;
3075 }
3076
3077 for (i = 0; i < port->n_rxq; i++) {
3078 port->rxqs[i].core_id = core_ids[i];
3079 }
3080
3081exit:
3082 free(core_ids);
3083 return error;
3084}
3085
3086/* Changes the affinity of port's rx queues. The changes are actually applied
3087 * in dpif_netdev_run(). */
3088static int
3089dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
3090 const struct smap *cfg)
3091{
3092 struct dp_netdev *dp = get_dp_netdev(dpif);
3093 struct dp_netdev_port *port;
3094 int error = 0;
3095 const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity");
3096
3097 ovs_mutex_lock(&dp->port_mutex);
3098 error = get_port_by_number(dp, port_no, &port);
3099 if (error || !netdev_is_pmd(port->netdev)
3100 || nullable_string_is_equal(affinity_list, port->rxq_affinity_list)) {
3101 goto unlock;
3102 }
3103
3104 error = dpif_netdev_port_set_rxq_affinity(port, affinity_list);
3105 if (error) {
3106 goto unlock;
3107 }
3108 free(port->rxq_affinity_list);
3109 port->rxq_affinity_list = nullable_xstrdup(affinity_list);
3110
3111 dp_netdev_request_reconfigure(dp);
3112unlock:
3113 ovs_mutex_unlock(&dp->port_mutex);
3114 return error;
3115}
3116
5bf93d67
EJ
3117static int
3118dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
3119 uint32_t queue_id, uint32_t *priority)
3120{
3121 *priority = queue_id;
3122 return 0;
3123}
3124
72865317 3125\f
9ff55ae2 3126/* Creates and returns a new 'struct dp_netdev_actions', whose actions are
1401f6de 3127 * a copy of the 'size' bytes of 'actions' input parameters. */
a84cb64a
BP
3128struct dp_netdev_actions *
3129dp_netdev_actions_create(const struct nlattr *actions, size_t size)
3130{
3131 struct dp_netdev_actions *netdev_actions;
3132
9ff55ae2
DDP
3133 netdev_actions = xmalloc(sizeof *netdev_actions + size);
3134 memcpy(netdev_actions->actions, actions, size);
a84cb64a
BP
3135 netdev_actions->size = size;
3136
3137 return netdev_actions;
3138}
3139
a84cb64a 3140struct dp_netdev_actions *
61e7deb1 3141dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow)
a84cb64a 3142{
61e7deb1 3143 return ovsrcu_get(struct dp_netdev_actions *, &flow->actions);
a84cb64a
BP
3144}
3145
61e7deb1
BP
3146static void
3147dp_netdev_actions_free(struct dp_netdev_actions *actions)
a84cb64a 3148{
61e7deb1 3149 free(actions);
a84cb64a
BP
3150}
3151\f
55e3ca97
DDP
3152static inline unsigned long long
3153cycles_counter(void)
3154{
3155#ifdef DPDK_NETDEV
3156 return rte_get_tsc_cycles();
3157#else
3158 return 0;
3159#endif
3160}
3161
3162/* Fake mutex to make sure that the calls to cycles_count_* are balanced */
3163extern struct ovs_mutex cycles_counter_fake_mutex;
3164
3165/* Start counting cycles. Must be followed by 'cycles_count_end()' */
3166static inline void
3167cycles_count_start(struct dp_netdev_pmd_thread *pmd)
3168 OVS_ACQUIRES(&cycles_counter_fake_mutex)
3169 OVS_NO_THREAD_SAFETY_ANALYSIS
3170{
3171 pmd->last_cycles = cycles_counter();
3172}
3173
3174/* Stop counting cycles and add them to the counter 'type' */
3175static inline void
3176cycles_count_end(struct dp_netdev_pmd_thread *pmd,
3177 enum pmd_cycles_counter_type type)
3178 OVS_RELEASES(&cycles_counter_fake_mutex)
3179 OVS_NO_THREAD_SAFETY_ANALYSIS
3180{
3181 unsigned long long interval = cycles_counter() - pmd->last_cycles;
3182
3183 non_atomic_ullong_add(&pmd->cycles.n[type], interval);
3184}
e4cfed38 3185
a2ac666d
CL
3186/* Calculate the intermediate cycle result and add to the counter 'type' */
3187static inline void
3188cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
c59e759f 3189 struct dp_netdev_rxq *rxq,
a2ac666d
CL
3190 enum pmd_cycles_counter_type type)
3191 OVS_NO_THREAD_SAFETY_ANALYSIS
3192{
3193 unsigned long long new_cycles = cycles_counter();
3194 unsigned long long interval = new_cycles - pmd->last_cycles;
3195 pmd->last_cycles = new_cycles;
3196
3197 non_atomic_ullong_add(&pmd->cycles.n[type], interval);
c59e759f
KT
3198 if (rxq && (type == PMD_CYCLES_PROCESSING)) {
3199 /* Add to the amount of current processing cycles. */
3200 non_atomic_ullong_add(&rxq->cycles[RXQ_CYCLES_PROC_CURR], interval);
3201 }
a2ac666d
CL
3202}
3203
4809891b
KT
3204static void
3205dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
3206 enum rxq_cycles_counter_type type,
3207 unsigned long long cycles)
3208{
3209 atomic_store_relaxed(&rx->cycles[type], cycles);
3210}
3211
3212static uint64_t
3213dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
3214 enum rxq_cycles_counter_type type)
3215{
3216 unsigned long long processing_cycles;
3217 atomic_read_relaxed(&rx->cycles[type], &processing_cycles);
3218 return processing_cycles;
3219}
3220
3221static void
3222dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
3223 unsigned long long cycles)
3224{
4ee87ad3
BP
3225 unsigned int idx = rx->intrvl_idx++ % PMD_RXQ_INTERVAL_MAX;
3226 atomic_store_relaxed(&rx->cycles_intrvl[idx], cycles);
4809891b
KT
3227}
3228
655856ef
KT
3229static uint64_t
3230dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx)
3231{
3232 unsigned long long processing_cycles;
3233 atomic_read_relaxed(&rx->cycles_intrvl[idx], &processing_cycles);
3234 return processing_cycles;
3235}
3236
a2ac666d 3237static int
65f13b50 3238dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
947dc567
DDP
3239 struct netdev_rxq *rx,
3240 odp_port_t port_no)
e4cfed38 3241{
1895cc8d
PS
3242 struct dp_packet_batch batch;
3243 int error;
a2ac666d 3244 int batch_cnt = 0;
e4cfed38 3245
1895cc8d 3246 dp_packet_batch_init(&batch);
947dc567 3247 error = netdev_rxq_recv(rx, &batch);
e4cfed38 3248 if (!error) {
3c33f0ff 3249 *recirc_depth_get() = 0;
41ccaa24 3250
a2ac666d 3251 batch_cnt = batch.count;
947dc567 3252 dp_netdev_input(pmd, &batch, port_no);
e4cfed38 3253 } else if (error != EAGAIN && error != EOPNOTSUPP) {
3c33f0ff 3254 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
e4cfed38
PS
3255
3256 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
947dc567 3257 netdev_rxq_get_name(rx), ovs_strerror(error));
e4cfed38 3258 }
a2ac666d
CL
3259
3260 return batch_cnt;
e4cfed38
PS
3261}
3262
e32971b8
DDP
3263static struct tx_port *
3264tx_port_lookup(const struct hmap *hmap, odp_port_t port_no)
3265{
3266 struct tx_port *tx;
3267
3268 HMAP_FOR_EACH_IN_BUCKET (tx, node, hash_port_no(port_no), hmap) {
3269 if (tx->port->port_no == port_no) {
3270 return tx;
3271 }
3272 }
3273
3274 return NULL;
3275}
3276
dc36593c
DDP
3277static int
3278port_reconfigure(struct dp_netdev_port *port)
3279{
3280 struct netdev *netdev = port->netdev;
dc36593c
DDP
3281 int i, err;
3282
e32971b8 3283 port->need_reconfigure = false;
dc36593c
DDP
3284
3285 /* Closes the existing 'rxq's. */
3286 for (i = 0; i < port->n_rxq; i++) {
947dc567
DDP
3287 netdev_rxq_close(port->rxqs[i].rx);
3288 port->rxqs[i].rx = NULL;
dc36593c 3289 }
4809891b 3290 unsigned last_nrxq = port->n_rxq;
dc36593c
DDP
3291 port->n_rxq = 0;
3292
050c60bf 3293 /* Allows 'netdev' to apply the pending configuration changes. */
e32971b8
DDP
3294 if (netdev_is_reconf_required(netdev)) {
3295 err = netdev_reconfigure(netdev);
3296 if (err && (err != EOPNOTSUPP)) {
3297 VLOG_ERR("Failed to set interface %s new configuration",
3298 netdev_get_name(netdev));
3299 return err;
3300 }
dc36593c 3301 }
050c60bf 3302 /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */
3eb67853
IM
3303 port->rxqs = xrealloc(port->rxqs,
3304 sizeof *port->rxqs * netdev_n_rxq(netdev));
324c8374
IM
3305 /* Realloc 'used' counters for tx queues. */
3306 free(port->txq_used);
3307 port->txq_used = xcalloc(netdev_n_txq(netdev), sizeof *port->txq_used);
3308
dc36593c 3309 for (i = 0; i < netdev_n_rxq(netdev); i++) {
38259bd7
BP
3310 bool new_queue = i >= last_nrxq;
3311 if (new_queue) {
3312 memset(&port->rxqs[i], 0, sizeof port->rxqs[i]);
3313 }
3314
947dc567 3315 port->rxqs[i].port = port;
38259bd7
BP
3316
3317 if (new_queue) {
4809891b
KT
3318 dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_CURR, 0);
3319 dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_HIST, 0);
3320 for (unsigned j = 0; j < PMD_RXQ_INTERVAL_MAX; j++) {
3321 dp_netdev_rxq_set_intrvl_cycles(&port->rxqs[i], 0);
3322 }
3323 }
947dc567 3324 err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
dc36593c
DDP
3325 if (err) {
3326 return err;
3327 }
3328 port->n_rxq++;
3329 }
3330
3eb67853
IM
3331 /* Parse affinity list to apply configuration for new queues. */
3332 dpif_netdev_port_set_rxq_affinity(port, port->rxq_affinity_list);
3333
dc36593c
DDP
3334 return 0;
3335}
3336
e32971b8
DDP
3337struct rr_numa_list {
3338 struct hmap numas; /* Contains 'struct rr_numa' */
3339};
3340
3341struct rr_numa {
3342 struct hmap_node node;
3343
3344 int numa_id;
3345
3346 /* Non isolated pmds on numa node 'numa_id' */
3347 struct dp_netdev_pmd_thread **pmds;
3348 int n_pmds;
3349
3350 int cur_index;
79da1e41 3351 bool idx_inc;
e32971b8
DDP
3352};
3353
3354static struct rr_numa *
3355rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id)
3356{
3357 struct rr_numa *numa;
3358
3359 HMAP_FOR_EACH_WITH_HASH (numa, node, hash_int(numa_id, 0), &rr->numas) {
3360 if (numa->numa_id == numa_id) {
3361 return numa;
3362 }
3363 }
3364
3365 return NULL;
3366}
3367
c37813fd
BM
3368/* Returns the next node in numa list following 'numa' in round-robin fashion.
3369 * Returns first node if 'numa' is a null pointer or the last node in 'rr'.
3370 * Returns NULL if 'rr' numa list is empty. */
3371static struct rr_numa *
3372rr_numa_list_next(struct rr_numa_list *rr, const struct rr_numa *numa)
3373{
3374 struct hmap_node *node = NULL;
3375
3376 if (numa) {
3377 node = hmap_next(&rr->numas, &numa->node);
3378 }
3379 if (!node) {
3380 node = hmap_first(&rr->numas);
3381 }
3382
3383 return (node) ? CONTAINER_OF(node, struct rr_numa, node) : NULL;
3384}
3385
e32971b8
DDP
3386static void
3387rr_numa_list_populate(struct dp_netdev *dp, struct rr_numa_list *rr)
3388{
3389 struct dp_netdev_pmd_thread *pmd;
3390 struct rr_numa *numa;
3391
3392 hmap_init(&rr->numas);
3393
3394 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3395 if (pmd->core_id == NON_PMD_CORE_ID || pmd->isolated) {
3396 continue;
3397 }
3398
3399 numa = rr_numa_list_lookup(rr, pmd->numa_id);
3400 if (!numa) {
3401 numa = xzalloc(sizeof *numa);
3402 numa->numa_id = pmd->numa_id;
3403 hmap_insert(&rr->numas, &numa->node, hash_int(pmd->numa_id, 0));
3404 }
3405 numa->n_pmds++;
3406 numa->pmds = xrealloc(numa->pmds, numa->n_pmds * sizeof *numa->pmds);
3407 numa->pmds[numa->n_pmds - 1] = pmd;
79da1e41
KT
3408 /* At least one pmd so initialise curr_idx and idx_inc. */
3409 numa->cur_index = 0;
3410 numa->idx_inc = true;
e32971b8
DDP
3411 }
3412}
3413
79da1e41
KT
3414/* Returns the next pmd from the numa node in
3415 * incrementing or decrementing order. */
e32971b8
DDP
3416static struct dp_netdev_pmd_thread *
3417rr_numa_get_pmd(struct rr_numa *numa)
3418{
79da1e41
KT
3419 int numa_idx = numa->cur_index;
3420
3421 if (numa->idx_inc == true) {
3422 /* Incrementing through list of pmds. */
3423 if (numa->cur_index == numa->n_pmds-1) {
3424 /* Reached the last pmd. */
3425 numa->idx_inc = false;
3426 } else {
3427 numa->cur_index++;
3428 }
3429 } else {
3430 /* Decrementing through list of pmds. */
3431 if (numa->cur_index == 0) {
3432 /* Reached the first pmd. */
3433 numa->idx_inc = true;
3434 } else {
3435 numa->cur_index--;
3436 }
3437 }
3438 return numa->pmds[numa_idx];
e32971b8
DDP
3439}
3440
3441static void
3442rr_numa_list_destroy(struct rr_numa_list *rr)
3443{
3444 struct rr_numa *numa;
3445
3446 HMAP_FOR_EACH_POP (numa, node, &rr->numas) {
3447 free(numa->pmds);
3448 free(numa);
3449 }
3450 hmap_destroy(&rr->numas);
3451}
3452
655856ef
KT
3453/* Sort Rx Queues by the processing cycles they are consuming. */
3454static int
3455rxq_cycle_sort(const void *a, const void *b)
3456{
28080276
KT
3457 struct dp_netdev_rxq *qa;
3458 struct dp_netdev_rxq *qb;
655856ef
KT
3459 uint64_t total_qa, total_qb;
3460 unsigned i;
3461
3462 qa = *(struct dp_netdev_rxq **) a;
3463 qb = *(struct dp_netdev_rxq **) b;
3464
3465 total_qa = total_qb = 0;
3466 for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
3467 total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
3468 total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
3469 }
3470 dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
3471 dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
3472
3473 if (total_qa >= total_qb) {
3474 return -1;
3475 }
3476 return 1;
3477}
3478
e32971b8
DDP
3479/* Assign pmds to queues. If 'pinned' is true, assign pmds to pinned
3480 * queues and marks the pmds as isolated. Otherwise, assign non isolated
3481 * pmds to unpinned queues.
3482 *
655856ef
KT
3483 * If 'pinned' is false queues will be sorted by processing cycles they are
3484 * consuming and then assigned to pmds in round robin order.
3485 *
e32971b8
DDP
3486 * The function doesn't touch the pmd threads, it just stores the assignment
3487 * in the 'pmd' member of each rxq. */
3488static void
3489rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
3490{
3491 struct dp_netdev_port *port;
3492 struct rr_numa_list rr;
c37813fd 3493 struct rr_numa *non_local_numa = NULL;
655856ef
KT
3494 struct dp_netdev_rxq ** rxqs = NULL;
3495 int i, n_rxqs = 0;
3496 struct rr_numa *numa = NULL;
3497 int numa_id;
e32971b8
DDP
3498
3499 HMAP_FOR_EACH (port, node, &dp->ports) {
e32971b8
DDP
3500 if (!netdev_is_pmd(port->netdev)) {
3501 continue;
3502 }
3503
e32971b8
DDP
3504 for (int qid = 0; qid < port->n_rxq; qid++) {
3505 struct dp_netdev_rxq *q = &port->rxqs[qid];
3506
3507 if (pinned && q->core_id != OVS_CORE_UNSPEC) {
3508 struct dp_netdev_pmd_thread *pmd;
3509
3510 pmd = dp_netdev_get_pmd(dp, q->core_id);
3511 if (!pmd) {
3512 VLOG_WARN("There is no PMD thread on core %d. Queue "
3513 "%d on port \'%s\' will not be polled.",
3514 q->core_id, qid, netdev_get_name(port->netdev));
3515 } else {
3516 q->pmd = pmd;
3517 pmd->isolated = true;
3518 dp_netdev_pmd_unref(pmd);
3519 }
3520 } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
655856ef
KT
3521 if (n_rxqs == 0) {
3522 rxqs = xmalloc(sizeof *rxqs);
e32971b8 3523 } else {
655856ef 3524 rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1));
e32971b8 3525 }
655856ef
KT
3526 /* Store the queue. */
3527 rxqs[n_rxqs++] = q;
e32971b8
DDP
3528 }
3529 }
3530 }
3531
655856ef
KT
3532 if (n_rxqs > 1) {
3533 /* Sort the queues in order of the processing cycles
3534 * they consumed during their last pmd interval. */
3535 qsort(rxqs, n_rxqs, sizeof *rxqs, rxq_cycle_sort);
3536 }
3537
3538 rr_numa_list_populate(dp, &rr);
3539 /* Assign the sorted queues to pmds in round robin. */
3540 for (i = 0; i < n_rxqs; i++) {
3541 numa_id = netdev_get_numa_id(rxqs[i]->port->netdev);
3542 numa = rr_numa_list_lookup(&rr, numa_id);
3543 if (!numa) {
3544 /* There are no pmds on the queue's local NUMA node.
3545 Round robin on the NUMA nodes that do have pmds. */
3546 non_local_numa = rr_numa_list_next(&rr, non_local_numa);
3547 if (!non_local_numa) {
3548 VLOG_ERR("There is no available (non-isolated) pmd "
3549 "thread for port \'%s\' queue %d. This queue "
3550 "will not be polled. Is pmd-cpu-mask set to "
3551 "zero? Or are all PMDs isolated to other "
3552 "queues?", netdev_rxq_get_name(rxqs[i]->rx),
3553 netdev_rxq_get_queue_id(rxqs[i]->rx));
3554 continue;
3555 }
3556 rxqs[i]->pmd = rr_numa_get_pmd(non_local_numa);
3557 VLOG_WARN("There's no available (non-isolated) pmd thread "
3558 "on numa node %d. Queue %d on port \'%s\' will "
3559 "be assigned to the pmd on core %d "
3560 "(numa node %d). Expect reduced performance.",
3561 numa_id, netdev_rxq_get_queue_id(rxqs[i]->rx),
3562 netdev_rxq_get_name(rxqs[i]->rx),
3563 rxqs[i]->pmd->core_id, rxqs[i]->pmd->numa_id);
3564 } else {
3565 rxqs[i]->pmd = rr_numa_get_pmd(numa);
3566 VLOG_INFO("Core %d on numa node %d assigned port \'%s\' "
3567 "rx queue %d (measured processing cycles %"PRIu64").",
3568 rxqs[i]->pmd->core_id, numa_id,
3569 netdev_rxq_get_name(rxqs[i]->rx),
3570 netdev_rxq_get_queue_id(rxqs[i]->rx),
3571 dp_netdev_rxq_get_cycles(rxqs[i], RXQ_CYCLES_PROC_HIST));
3572 }
3573 }
3574
e32971b8 3575 rr_numa_list_destroy(&rr);
655856ef 3576 free(rxqs);
e32971b8
DDP
3577}
3578
140dd699
IM
3579static void
3580reload_affected_pmds(struct dp_netdev *dp)
3581{
3582 struct dp_netdev_pmd_thread *pmd;
3583
3584 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3585 if (pmd->need_reload) {
3586 dp_netdev_reload_pmd__(pmd);
3587 pmd->need_reload = false;
3588 }
3589 }
3590}
3591
6e3c6fa4
DDP
3592static void
3593reconfigure_pmd_threads(struct dp_netdev *dp)
3594 OVS_REQUIRES(dp->port_mutex)
3595{
e32971b8
DDP
3596 struct dp_netdev_pmd_thread *pmd;
3597 struct ovs_numa_dump *pmd_cores;
140dd699
IM
3598 struct ovs_numa_info_core *core;
3599 struct hmapx to_delete = HMAPX_INITIALIZER(&to_delete);
3600 struct hmapx_node *node;
e32971b8 3601 bool changed = false;
140dd699 3602 bool need_to_adjust_static_tx_qids = false;
e32971b8
DDP
3603
3604 /* The pmd threads should be started only if there's a pmd port in the
3605 * datapath. If the user didn't provide any "pmd-cpu-mask", we start
3606 * NR_PMD_THREADS per numa node. */
3607 if (!has_pmd_port(dp)) {
3608 pmd_cores = ovs_numa_dump_n_cores_per_numa(0);
3609 } else if (dp->pmd_cmask && dp->pmd_cmask[0]) {
3610 pmd_cores = ovs_numa_dump_cores_with_cmask(dp->pmd_cmask);
3611 } else {
3612 pmd_cores = ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS);
3613 }
3614
140dd699
IM
3615 /* We need to adjust 'static_tx_qid's only if we're reducing number of
3616 * PMD threads. Otherwise, new threads will allocate all the freed ids. */
3617 if (ovs_numa_dump_count(pmd_cores) < cmap_count(&dp->poll_threads) - 1) {
3618 /* Adjustment is required to keep 'static_tx_qid's sequential and
3619 * avoid possible issues, for example, imbalanced tx queue usage
3620 * and unnecessary locking caused by remapping on netdev level. */
3621 need_to_adjust_static_tx_qids = true;
3622 }
3623
3624 /* Check for unwanted pmd threads */
3625 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3626 if (pmd->core_id == NON_PMD_CORE_ID) {
3627 continue;
3628 }
3629 if (!ovs_numa_dump_contains_core(pmd_cores, pmd->numa_id,
3630 pmd->core_id)) {
3631 hmapx_add(&to_delete, pmd);
3632 } else if (need_to_adjust_static_tx_qids) {
3633 pmd->need_reload = true;
e32971b8
DDP
3634 }
3635 }
3636
140dd699
IM
3637 HMAPX_FOR_EACH (node, &to_delete) {
3638 pmd = (struct dp_netdev_pmd_thread *) node->data;
3639 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d destroyed.",
3640 pmd->numa_id, pmd->core_id);
3641 dp_netdev_del_pmd(dp, pmd);
3642 }
3643 changed = !hmapx_is_empty(&to_delete);
3644 hmapx_destroy(&to_delete);
e32971b8 3645
140dd699
IM
3646 if (need_to_adjust_static_tx_qids) {
3647 /* 'static_tx_qid's are not sequential now.
3648 * Reload remaining threads to fix this. */
3649 reload_affected_pmds(dp);
3650 }
e32971b8 3651
140dd699
IM
3652 /* Check for required new pmd threads */
3653 FOR_EACH_CORE_ON_DUMP(core, pmd_cores) {
3654 pmd = dp_netdev_get_pmd(dp, core->core_id);
3655 if (!pmd) {
3656 pmd = xzalloc(sizeof *pmd);
e32971b8 3657 dp_netdev_configure_pmd(pmd, dp, core->core_id, core->numa_id);
e32971b8 3658 pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd);
140dd699
IM
3659 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d created.",
3660 pmd->numa_id, pmd->core_id);
3661 changed = true;
3662 } else {
3663 dp_netdev_pmd_unref(pmd);
e32971b8 3664 }
140dd699
IM
3665 }
3666
3667 if (changed) {
3668 struct ovs_numa_info_numa *numa;
e32971b8
DDP
3669
3670 /* Log the number of pmd threads per numa node. */
3671 FOR_EACH_NUMA_ON_DUMP (numa, pmd_cores) {
140dd699 3672 VLOG_INFO("There are %"PRIuSIZE" pmd threads on numa node %d",
e32971b8
DDP
3673 numa->n_cores, numa->numa_id);
3674 }
3675 }
3676
3677 ovs_numa_dump_destroy(pmd_cores);
3678}
3679
e32971b8
DDP
3680static void
3681pmd_remove_stale_ports(struct dp_netdev *dp,
3682 struct dp_netdev_pmd_thread *pmd)
3683 OVS_EXCLUDED(pmd->port_mutex)
3684 OVS_REQUIRES(dp->port_mutex)
3685{
3686 struct rxq_poll *poll, *poll_next;
3687 struct tx_port *tx, *tx_next;
3688
3689 ovs_mutex_lock(&pmd->port_mutex);
3690 HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
3691 struct dp_netdev_port *port = poll->rxq->port;
3692
3693 if (port->need_reconfigure
3694 || !hmap_contains(&dp->ports, &port->node)) {
3695 dp_netdev_del_rxq_from_pmd(pmd, poll);
3696 }
3697 }
3698 HMAP_FOR_EACH_SAFE (tx, tx_next, node, &pmd->tx_ports) {
3699 struct dp_netdev_port *port = tx->port;
3700
3701 if (port->need_reconfigure
3702 || !hmap_contains(&dp->ports, &port->node)) {
3703 dp_netdev_del_port_tx_from_pmd(pmd, tx);
3704 }
3705 }
3706 ovs_mutex_unlock(&pmd->port_mutex);
3707}
3708
3709/* Must be called each time a port is added/removed or the cmask changes.
3710 * This creates and destroys pmd threads, reconfigures ports, opens their
3711 * rxqs and assigns all rxqs/txqs to pmd threads. */
3712static void
3713reconfigure_datapath(struct dp_netdev *dp)
3714 OVS_REQUIRES(dp->port_mutex)
3715{
3716 struct dp_netdev_pmd_thread *pmd;
3717 struct dp_netdev_port *port;
3718 int wanted_txqs;
6e3c6fa4 3719
a6a426d6
IM
3720 dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq);
3721
e32971b8
DDP
3722 /* Step 1: Adjust the pmd threads based on the datapath ports, the cores
3723 * on the system and the user configuration. */
3724 reconfigure_pmd_threads(dp);
6e3c6fa4 3725
e32971b8 3726 wanted_txqs = cmap_count(&dp->poll_threads);
324c8374 3727
e32971b8
DDP
3728 /* The number of pmd threads might have changed, or a port can be new:
3729 * adjust the txqs. */
3730 HMAP_FOR_EACH (port, node, &dp->ports) {
3731 netdev_set_tx_multiq(port->netdev, wanted_txqs);
324c8374
IM
3732 }
3733
e32971b8
DDP
3734 /* Step 2: Remove from the pmd threads ports that have been removed or
3735 * need reconfiguration. */
3736
3737 /* Check for all the ports that need reconfiguration. We cache this in
85a4f238
IM
3738 * 'port->need_reconfigure', because netdev_is_reconf_required() can
3739 * change at any time. */
e32971b8
DDP
3740 HMAP_FOR_EACH (port, node, &dp->ports) {
3741 if (netdev_is_reconf_required(port->netdev)) {
3742 port->need_reconfigure = true;
3743 }
3744 }
3745
3746 /* Remove from the pmd threads all the ports that have been deleted or
3747 * need reconfiguration. */
3748 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3749 pmd_remove_stale_ports(dp, pmd);
3750 }
3751
3752 /* Reload affected pmd threads. We must wait for the pmd threads before
3753 * reconfiguring the ports, because a port cannot be reconfigured while
3754 * it's being used. */
3755 reload_affected_pmds(dp);
3756
3757 /* Step 3: Reconfigure ports. */
3758
3759 /* We only reconfigure the ports that we determined above, because they're
3760 * not being used by any pmd thread at the moment. If a port fails to
3761 * reconfigure we remove it from the datapath. */
f582b6df
BP
3762 struct dp_netdev_port *next_port;
3763 HMAP_FOR_EACH_SAFE (port, next_port, node, &dp->ports) {
dc36593c 3764 int err;
6e3c6fa4 3765
e32971b8
DDP
3766 if (!port->need_reconfigure) {
3767 continue;
3768 }
3769
dc36593c
DDP
3770 err = port_reconfigure(port);
3771 if (err) {
3772 hmap_remove(&dp->ports, &port->node);
3773 seq_change(dp->port_seq);
3774 port_destroy(port);
324c8374 3775 } else {
e32971b8 3776 port->dynamic_txqs = netdev_n_txq(port->netdev) < wanted_txqs;
6e3c6fa4
DDP
3777 }
3778 }
e32971b8
DDP
3779
3780 /* Step 4: Compute new rxq scheduling. We don't touch the pmd threads
3781 * for now, we just update the 'pmd' pointer in each rxq to point to the
3782 * wanted thread according to the scheduling policy. */
3783
3784 /* Reset all the pmd threads to non isolated. */
3785 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3786 pmd->isolated = false;
3787 }
3788
3789 /* Reset all the queues to unassigned */
3790 HMAP_FOR_EACH (port, node, &dp->ports) {
3791 for (int i = 0; i < port->n_rxq; i++) {
3792 port->rxqs[i].pmd = NULL;
3793 }
3794 }
3795
3796 /* Add pinned queues and mark pmd threads isolated. */
3797 rxq_scheduling(dp, true);
3798
3799 /* Add non-pinned queues. */
3800 rxq_scheduling(dp, false);
3801
3802 /* Step 5: Remove queues not compliant with new scheduling. */
3803 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3804 struct rxq_poll *poll, *poll_next;
3805
3806 ovs_mutex_lock(&pmd->port_mutex);
3807 HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
3808 if (poll->rxq->pmd != pmd) {
3809 dp_netdev_del_rxq_from_pmd(pmd, poll);
3810 }
3811 }
3812 ovs_mutex_unlock(&pmd->port_mutex);
3813 }
3814
3815 /* Reload affected pmd threads. We must wait for the pmd threads to remove
3816 * the old queues before readding them, otherwise a queue can be polled by
3817 * two threads at the same time. */
3818 reload_affected_pmds(dp);
3819
3820 /* Step 6: Add queues from scheduling, if they're not there already. */
3821 HMAP_FOR_EACH (port, node, &dp->ports) {
3822 if (!netdev_is_pmd(port->netdev)) {
3823 continue;
3824 }
3825
3826 for (int qid = 0; qid < port->n_rxq; qid++) {
3827 struct dp_netdev_rxq *q = &port->rxqs[qid];
3828
3829 if (q->pmd) {
3830 ovs_mutex_lock(&q->pmd->port_mutex);
3831 dp_netdev_add_rxq_to_pmd(q->pmd, q);
3832 ovs_mutex_unlock(&q->pmd->port_mutex);
3833 }
3834 }
3835 }
3836
3837 /* Add every port to the tx cache of every pmd thread, if it's not
3838 * there already and if this pmd has at least one rxq to poll. */
3839 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3840 ovs_mutex_lock(&pmd->port_mutex);
3841 if (hmap_count(&pmd->poll_list) || pmd->core_id == NON_PMD_CORE_ID) {
3842 HMAP_FOR_EACH (port, node, &dp->ports) {
3843 dp_netdev_add_port_tx_to_pmd(pmd, port);
3844 }
3845 }
3846 ovs_mutex_unlock(&pmd->port_mutex);
3847 }
3848
3849 /* Reload affected pmd threads. */
3850 reload_affected_pmds(dp);
6e3c6fa4
DDP
3851}
3852
050c60bf
DDP
3853/* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
3854static bool
3855ports_require_restart(const struct dp_netdev *dp)
3856 OVS_REQUIRES(dp->port_mutex)
3857{
3858 struct dp_netdev_port *port;
3859
3860 HMAP_FOR_EACH (port, node, &dp->ports) {
3861 if (netdev_is_reconf_required(port->netdev)) {
3862 return true;
3863 }
3864 }
3865
3866 return false;
3867}
3868
a36de779
PS
3869/* Return true if needs to revalidate datapath flows. */
3870static bool
e4cfed38
PS
3871dpif_netdev_run(struct dpif *dpif)
3872{
3873 struct dp_netdev_port *port;
3874 struct dp_netdev *dp = get_dp_netdev(dpif);
546e57d4 3875 struct dp_netdev_pmd_thread *non_pmd;
a36de779 3876 uint64_t new_tnl_seq;
a2ac666d 3877 int process_packets = 0;
e4cfed38 3878
e9985d6a 3879 ovs_mutex_lock(&dp->port_mutex);
546e57d4
DDP
3880 non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
3881 if (non_pmd) {
3882 ovs_mutex_lock(&dp->non_pmd_mutex);
a2ac666d 3883 cycles_count_start(non_pmd);
546e57d4
DDP
3884 HMAP_FOR_EACH (port, node, &dp->ports) {
3885 if (!netdev_is_pmd(port->netdev)) {
3886 int i;
55c955bd 3887
546e57d4 3888 for (i = 0; i < port->n_rxq; i++) {
a2ac666d
CL
3889 process_packets =
3890 dp_netdev_process_rxq_port(non_pmd,
3891 port->rxqs[i].rx,
3892 port->port_no);
28080276
KT
3893 cycles_count_intermediate(non_pmd, NULL,
3894 process_packets
3895 ? PMD_CYCLES_PROCESSING
3896 : PMD_CYCLES_IDLE);
546e57d4 3897 }
55c955bd 3898 }
e4cfed38 3899 }
a2ac666d 3900 cycles_count_end(non_pmd, PMD_CYCLES_IDLE);
546e57d4
DDP
3901 dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false);
3902 ovs_mutex_unlock(&dp->non_pmd_mutex);
6e3c6fa4 3903
546e57d4
DDP
3904 dp_netdev_pmd_unref(non_pmd);
3905 }
1c1e46ed 3906
a6a426d6 3907 if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) {
e32971b8 3908 reconfigure_datapath(dp);
6e3c6fa4
DDP
3909 }
3910 ovs_mutex_unlock(&dp->port_mutex);
3911
53902038 3912 tnl_neigh_cache_run();
7f9b8504 3913 tnl_port_map_run();
a36de779
PS
3914 new_tnl_seq = seq_read(tnl_conf_seq);
3915
3916 if (dp->last_tnl_conf_seq != new_tnl_seq) {
3917 dp->last_tnl_conf_seq = new_tnl_seq;
3918 return true;
3919 }
3920 return false;
e4cfed38
PS
3921}
3922
3923static void
3924dpif_netdev_wait(struct dpif *dpif)
3925{
3926 struct dp_netdev_port *port;
3927 struct dp_netdev *dp = get_dp_netdev(dpif);
3928
59e6d833 3929 ovs_mutex_lock(&dp_netdev_mutex);
e9985d6a
DDP
3930 ovs_mutex_lock(&dp->port_mutex);
3931 HMAP_FOR_EACH (port, node, &dp->ports) {
050c60bf 3932 netdev_wait_reconf_required(port->netdev);
55c955bd
PS
3933 if (!netdev_is_pmd(port->netdev)) {
3934 int i;
3935
490e82af 3936 for (i = 0; i < port->n_rxq; i++) {
947dc567 3937 netdev_rxq_wait(port->rxqs[i].rx);
55c955bd 3938 }
e4cfed38
PS
3939 }
3940 }
e9985d6a 3941 ovs_mutex_unlock(&dp->port_mutex);
59e6d833 3942 ovs_mutex_unlock(&dp_netdev_mutex);
a36de779 3943 seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);
e4cfed38
PS
3944}
3945
d0cca6c3
DDP
3946static void
3947pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd)
3948{
3949 struct tx_port *tx_port_cached;
3950
324c8374
IM
3951 /* Free all used tx queue ids. */
3952 dpif_netdev_xps_revalidate_pmd(pmd, 0, true);
3953
57eebbb4
DDP
3954 HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->tnl_port_cache) {
3955 free(tx_port_cached);
3956 }
3957 HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->send_port_cache) {
d0cca6c3
DDP
3958 free(tx_port_cached);
3959 }
3960}
3961
3962/* Copies ports from 'pmd->tx_ports' (shared with the main thread) to
899363ed
BB
3963 * thread-local copies. Copy to 'pmd->tnl_port_cache' if it is a tunnel
3964 * device, otherwise to 'pmd->send_port_cache' if the port has at least
3965 * one txq. */
d0cca6c3
DDP
3966static void
3967pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
3968 OVS_REQUIRES(pmd->port_mutex)
3969{
3970 struct tx_port *tx_port, *tx_port_cached;
3971
3972 pmd_free_cached_ports(pmd);
57eebbb4
DDP
3973 hmap_shrink(&pmd->send_port_cache);
3974 hmap_shrink(&pmd->tnl_port_cache);
d0cca6c3
DDP
3975
3976 HMAP_FOR_EACH (tx_port, node, &pmd->tx_ports) {
57eebbb4
DDP
3977 if (netdev_has_tunnel_push_pop(tx_port->port->netdev)) {
3978 tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached);
3979 hmap_insert(&pmd->tnl_port_cache, &tx_port_cached->node,
3980 hash_port_no(tx_port_cached->port->port_no));
3981 }
3982
3983 if (netdev_n_txq(tx_port->port->netdev)) {
3984 tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached);
3985 hmap_insert(&pmd->send_port_cache, &tx_port_cached->node,
3986 hash_port_no(tx_port_cached->port->port_no));
3987 }
d0cca6c3
DDP
3988 }
3989}
3990
140dd699
IM
3991static void
3992pmd_alloc_static_tx_qid(struct dp_netdev_pmd_thread *pmd)
3993{
3994 ovs_mutex_lock(&pmd->dp->tx_qid_pool_mutex);
3995 if (!id_pool_alloc_id(pmd->dp->tx_qid_pool, &pmd->static_tx_qid)) {
3996 VLOG_ABORT("static_tx_qid allocation failed for PMD on core %2d"
3997 ", numa_id %d.", pmd->core_id, pmd->numa_id);
3998 }
3999 ovs_mutex_unlock(&pmd->dp->tx_qid_pool_mutex);
4000
4001 VLOG_DBG("static_tx_qid = %d allocated for PMD thread on core %2d"
4002 ", numa_id %d.", pmd->static_tx_qid, pmd->core_id, pmd->numa_id);
4003}
4004
4005static void
4006pmd_free_static_tx_qid(struct dp_netdev_pmd_thread *pmd)
4007{
4008 ovs_mutex_lock(&pmd->dp->tx_qid_pool_mutex);
4009 id_pool_free_id(pmd->dp->tx_qid_pool, pmd->static_tx_qid);
4010 ovs_mutex_unlock(&pmd->dp->tx_qid_pool_mutex);
4011}
4012
e4cfed38 4013static int
d0cca6c3 4014pmd_load_queues_and_ports(struct dp_netdev_pmd_thread *pmd,
947dc567 4015 struct polled_queue **ppoll_list)
e4cfed38 4016{
947dc567 4017 struct polled_queue *poll_list = *ppoll_list;
ae7ad0a1
IM
4018 struct rxq_poll *poll;
4019 int i;
e4cfed38 4020
d0cca6c3 4021 ovs_mutex_lock(&pmd->port_mutex);
947dc567
DDP
4022 poll_list = xrealloc(poll_list, hmap_count(&pmd->poll_list)
4023 * sizeof *poll_list);
a1fdee13 4024
ae7ad0a1 4025 i = 0;
947dc567 4026 HMAP_FOR_EACH (poll, node, &pmd->poll_list) {
922b28d4 4027 poll_list[i].rxq = poll->rxq;
947dc567
DDP
4028 poll_list[i].port_no = poll->rxq->port->port_no;
4029 i++;
e4cfed38 4030 }
d0cca6c3
DDP
4031
4032 pmd_load_cached_ports(pmd);
4033
4034 ovs_mutex_unlock(&pmd->port_mutex);
e4cfed38 4035
e4cfed38 4036 *ppoll_list = poll_list;
d42f9307 4037 return i;
e4cfed38
PS
4038}
4039
6c3eee82 4040static void *
e4cfed38 4041pmd_thread_main(void *f_)
6c3eee82 4042{
65f13b50 4043 struct dp_netdev_pmd_thread *pmd = f_;
e4cfed38 4044 unsigned int lc = 0;
947dc567 4045 struct polled_queue *poll_list;
d42f9307 4046 bool exiting;
e4cfed38
PS
4047 int poll_cnt;
4048 int i;
a2ac666d 4049 int process_packets = 0;
6c3eee82 4050
e4cfed38
PS
4051 poll_list = NULL;
4052
65f13b50
AW
4053 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
4054 ovsthread_setspecific(pmd->dp->per_pmd_key, pmd);
6930c7e0
DDP
4055 ovs_numa_thread_setaffinity_core(pmd->core_id);
4056 dpdk_set_lcore_id(pmd->core_id);
d0cca6c3 4057 poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
e215018b 4058 emc_cache_init(&pmd->flow_cache);
e4cfed38 4059reload:
140dd699 4060 pmd_alloc_static_tx_qid(pmd);
ae7ad0a1 4061
7dd671f0
MK
4062 /* List port/core affinity */
4063 for (i = 0; i < poll_cnt; i++) {
ce179f11 4064 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
922b28d4
KT
4065 pmd->core_id, netdev_rxq_get_name(poll_list[i].rxq->rx),
4066 netdev_rxq_get_queue_id(poll_list[i].rxq->rx));
7dd671f0
MK
4067 }
4068
2788a1b1
DDP
4069 if (!poll_cnt) {
4070 while (seq_read(pmd->reload_seq) == pmd->last_reload_seq) {
4071 seq_wait(pmd->reload_seq, pmd->last_reload_seq);
4072 poll_block();
4073 }
4074 lc = UINT_MAX;
4075 }
4076
a2ac666d 4077 cycles_count_start(pmd);
e4cfed38 4078 for (;;) {
e4cfed38 4079 for (i = 0; i < poll_cnt; i++) {
a2ac666d 4080 process_packets =
922b28d4 4081 dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx,
a2ac666d 4082 poll_list[i].port_no);
4809891b 4083 cycles_count_intermediate(pmd, poll_list[i].rxq,
a2ac666d
CL
4084 process_packets ? PMD_CYCLES_PROCESSING
4085 : PMD_CYCLES_IDLE);
e4cfed38
PS
4086 }
4087
4088 if (lc++ > 1024) {
14e3e12a 4089 bool reload;
6c3eee82 4090
e4cfed38 4091 lc = 0;
84067a4c 4092
fbe0962b 4093 coverage_try_clear();
4809891b 4094 dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
9dede5cf
FL
4095 if (!ovsrcu_try_quiesce()) {
4096 emc_cache_slow_sweep(&pmd->flow_cache);
4097 }
84067a4c 4098
14e3e12a
DDP
4099 atomic_read_relaxed(&pmd->reload, &reload);
4100 if (reload) {
6c3eee82
BP
4101 break;
4102 }
4103 }
e4cfed38 4104 }
6c3eee82 4105
a2ac666d
CL
4106 cycles_count_end(pmd, PMD_CYCLES_IDLE);
4107
d0cca6c3 4108 poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
d42f9307
DDP
4109 exiting = latch_is_set(&pmd->exit_latch);
4110 /* Signal here to make sure the pmd finishes
4111 * reloading the updated configuration. */
4112 dp_netdev_pmd_reload_done(pmd);
4113
140dd699 4114 pmd_free_static_tx_qid(pmd);
9bbf1c3d 4115
d42f9307 4116 if (!exiting) {
e4cfed38
PS
4117 goto reload;
4118 }
6c3eee82 4119
e215018b 4120 emc_cache_uninit(&pmd->flow_cache);
e4cfed38 4121 free(poll_list);
d0cca6c3 4122 pmd_free_cached_ports(pmd);
6c3eee82
BP
4123 return NULL;
4124}
4125
6b31e073
RW
4126static void
4127dp_netdev_disable_upcall(struct dp_netdev *dp)
4128 OVS_ACQUIRES(dp->upcall_rwlock)
4129{
4130 fat_rwlock_wrlock(&dp->upcall_rwlock);
4131}
4132
5dddf960
JR
4133\f
4134/* Meters */
4135static void
4136dpif_netdev_meter_get_features(const struct dpif * dpif OVS_UNUSED,
4137 struct ofputil_meter_features *features)
4138{
4b27db64
JR
4139 features->max_meters = MAX_METERS;
4140 features->band_types = DP_SUPPORTED_METER_BAND_TYPES;
4141 features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK;
4142 features->max_bands = MAX_BANDS;
5dddf960
JR
4143 features->max_color = 0;
4144}
4145
4b27db64
JR
4146/* Returns false when packet needs to be dropped. */
4147static void
4148dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_,
4149 uint32_t meter_id, long long int now)
4150{
4151 struct dp_meter *meter;
4152 struct dp_meter_band *band;
79c81260 4153 struct dp_packet *packet;
4b27db64
JR
4154 long long int long_delta_t; /* msec */
4155 uint32_t delta_t; /* msec */
4156 int i;
79c81260 4157 const size_t cnt = dp_packet_batch_size(packets_);
4b27db64
JR
4158 uint32_t bytes, volume;
4159 int exceeded_band[NETDEV_MAX_BURST];
4160 uint32_t exceeded_rate[NETDEV_MAX_BURST];
4161 int exceeded_pkt = cnt; /* First packet that exceeded a band rate. */
4162
4163 if (meter_id >= MAX_METERS) {
4164 return;
4165 }
4166
4167 meter_lock(dp, meter_id);
4168 meter = dp->meters[meter_id];
4169 if (!meter) {
4170 goto out;
4171 }
4172
4173 /* Initialize as negative values. */
4174 memset(exceeded_band, 0xff, cnt * sizeof *exceeded_band);
4175 /* Initialize as zeroes. */
4176 memset(exceeded_rate, 0, cnt * sizeof *exceeded_rate);
4177
4178 /* All packets will hit the meter at the same time. */
4179 long_delta_t = (now - meter->used); /* msec */
4180
4181 /* Make sure delta_t will not be too large, so that bucket will not
4182 * wrap around below. */
4183 delta_t = (long_delta_t > (long long int)meter->max_delta_t)
4184 ? meter->max_delta_t : (uint32_t)long_delta_t;
4185
4186 /* Update meter stats. */
4187 meter->used = now;
4188 meter->packet_count += cnt;
4189 bytes = 0;
79c81260
BB
4190 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
4191 bytes += dp_packet_size(packet);
4b27db64
JR
4192 }
4193 meter->byte_count += bytes;
4194
4195 /* Meters can operate in terms of packets per second or kilobits per
4196 * second. */
4197 if (meter->flags & OFPMF13_PKTPS) {
4198 /* Rate in packets/second, bucket 1/1000 packets. */
4199 /* msec * packets/sec = 1/1000 packets. */
4200 volume = cnt * 1000; /* Take 'cnt' packets from the bucket. */
4201 } else {
4202 /* Rate in kbps, bucket in bits. */
4203 /* msec * kbps = bits */
4204 volume = bytes * 8;
4205 }
4206
4207 /* Update all bands and find the one hit with the highest rate for each
4208 * packet (if any). */
4209 for (int m = 0; m < meter->n_bands; ++m) {
4210 band = &meter->bands[m];
4211
4212 /* Update band's bucket. */
4213 band->bucket += delta_t * band->up.rate;
4214 if (band->bucket > band->up.burst_size) {
4215 band->bucket = band->up.burst_size;
4216 }
4217
4218 /* Drain the bucket for all the packets, if possible. */
4219 if (band->bucket >= volume) {
4220 band->bucket -= volume;
4221 } else {
4222 int band_exceeded_pkt;
4223
4224 /* Band limit hit, must process packet-by-packet. */
4225 if (meter->flags & OFPMF13_PKTPS) {
4226 band_exceeded_pkt = band->bucket / 1000;
4227 band->bucket %= 1000; /* Remainder stays in bucket. */
4228
4229 /* Update the exceeding band for each exceeding packet.
4230 * (Only one band will be fired by a packet, and that
4231 * can be different for each packet.) */
4232 for (i = band_exceeded_pkt; i < cnt; i++) {
4233 if (band->up.rate > exceeded_rate[i]) {
4234 exceeded_rate[i] = band->up.rate;
4235 exceeded_band[i] = m;
4236 }
4237 }
4238 } else {
4239 /* Packet sizes differ, must process one-by-one. */
4240 band_exceeded_pkt = cnt;
79c81260
BB
4241 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
4242 uint32_t bits = dp_packet_size(packet) * 8;
4b27db64
JR
4243
4244 if (band->bucket >= bits) {
4245 band->bucket -= bits;
4246 } else {
4247 if (i < band_exceeded_pkt) {
4248 band_exceeded_pkt = i;
4249 }
4250 /* Update the exceeding band for the exceeding packet.
4251 * (Only one band will be fired by a packet, and that
4252 * can be different for each packet.) */
4253 if (band->up.rate > exceeded_rate[i]) {
4254 exceeded_rate[i] = band->up.rate;
4255 exceeded_band[i] = m;
4256 }
4257 }
4258 }
4259 }
4260 /* Remember the first exceeding packet. */
4261 if (exceeded_pkt > band_exceeded_pkt) {
4262 exceeded_pkt = band_exceeded_pkt;
4263 }
4264 }
4265 }
4266
4267 /* Fire the highest rate band exceeded by each packet.
4268 * Drop packets if needed, by swapping packet to the end that will be
4269 * ignored. */
4b27db64 4270 size_t j;
79c81260 4271 DP_PACKET_BATCH_REFILL_FOR_EACH (j, cnt, packet, packets_) {
4b27db64
JR
4272 if (exceeded_band[j] >= 0) {
4273 /* Meter drop packet. */
4274 band = &meter->bands[exceeded_band[j]];
4275 band->packet_count += 1;
4276 band->byte_count += dp_packet_size(packet);
4277
4278 dp_packet_delete(packet);
4279 } else {
4280 /* Meter accepts packet. */
4281 dp_packet_batch_refill(packets_, packet, j);
4282 }
4283 }
4284 out:
4285 meter_unlock(dp, meter_id);
4286}
4287
4288/* Meter set/get/del processing is still single-threaded. */
5dddf960 4289static int
4b27db64
JR
4290dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id *meter_id,
4291 struct ofputil_meter_config *config)
5dddf960 4292{
4b27db64
JR
4293 struct dp_netdev *dp = get_dp_netdev(dpif);
4294 uint32_t mid = meter_id->uint32;
4295 struct dp_meter *meter;
4296 int i;
4297
4b27db64
JR
4298 if (mid >= MAX_METERS) {
4299 return EFBIG; /* Meter_id out of range. */
4300 }
4301
4302 if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK ||
4303 !(config->flags & (OFPMF13_KBPS | OFPMF13_PKTPS))) {
4304 return EBADF; /* Unsupported flags set */
4305 }
2029ce9a 4306
4b27db64
JR
4307 /* Validate bands */
4308 if (config->n_bands == 0 || config->n_bands > MAX_BANDS) {
4309 return EINVAL; /* Too many bands */
4310 }
2029ce9a
AVA
4311
4312 /* Validate rates */
4313 for (i = 0; i < config->n_bands; i++) {
4314 if (config->bands[i].rate == 0) {
66a396d4 4315 return EDOM; /* rate must be non-zero */
2029ce9a
AVA
4316 }
4317 }
4318
4b27db64
JR
4319 for (i = 0; i < config->n_bands; ++i) {
4320 switch (config->bands[i].type) {
4321 case OFPMBT13_DROP:
4322 break;
4323 default:
4324 return ENODEV; /* Unsupported band type */
4325 }
4326 }
4327
4328 /* Allocate meter */
4329 meter = xzalloc(sizeof *meter
4330 + config->n_bands * sizeof(struct dp_meter_band));
4331 if (meter) {
4332 meter->flags = config->flags;
4333 meter->n_bands = config->n_bands;
4334 meter->max_delta_t = 0;
4335 meter->used = time_msec();
4336
4337 /* set up bands */
4338 for (i = 0; i < config->n_bands; ++i) {
4339 uint32_t band_max_delta_t;
4340
4341 /* Set burst size to a workable value if none specified. */
4342 if (config->bands[i].burst_size == 0) {
4343 config->bands[i].burst_size = config->bands[i].rate;
4344 }
4345
4346 meter->bands[i].up = config->bands[i];
4347 /* Convert burst size to the bucket units: */
4348 /* pkts => 1/1000 packets, kilobits => bits. */
4349 meter->bands[i].up.burst_size *= 1000;
4350 /* Initialize bucket to empty. */
4351 meter->bands[i].bucket = 0;
4352
4353 /* Figure out max delta_t that is enough to fill any bucket. */
4354 band_max_delta_t
4355 = meter->bands[i].up.burst_size / meter->bands[i].up.rate;
4356 if (band_max_delta_t > meter->max_delta_t) {
4357 meter->max_delta_t = band_max_delta_t;
4358 }
4359 }
4360
4361 meter_lock(dp, mid);
4362 dp_delete_meter(dp, mid); /* Free existing meter, if any */
4363 dp->meters[mid] = meter;
4364 meter_unlock(dp, mid);
4365
4b27db64
JR
4366 return 0;
4367 }
4368 return ENOMEM;
5dddf960
JR
4369}
4370
4371static int
4b27db64
JR
4372dpif_netdev_meter_get(const struct dpif *dpif,
4373 ofproto_meter_id meter_id_,
4374 struct ofputil_meter_stats *stats, uint16_t n_bands)
5dddf960 4375{
4b27db64
JR
4376 const struct dp_netdev *dp = get_dp_netdev(dpif);
4377 const struct dp_meter *meter;
4378 uint32_t meter_id = meter_id_.uint32;
4379
4380 if (meter_id >= MAX_METERS) {
4381 return EFBIG;
4382 }
4383 meter = dp->meters[meter_id];
4384 if (!meter) {
4385 return ENOENT;
4386 }
4387 if (stats) {
4388 int i = 0;
4389
4390 meter_lock(dp, meter_id);
4391 stats->packet_in_count = meter->packet_count;
4392 stats->byte_in_count = meter->byte_count;
4393
4394 for (i = 0; i < n_bands && i < meter->n_bands; ++i) {
4395 stats->bands[i].packet_count = meter->bands[i].packet_count;
4396 stats->bands[i].byte_count = meter->bands[i].byte_count;
4397 }
4398 meter_unlock(dp, meter_id);
4399
4400 stats->n_bands = i;
4401 }
4402 return 0;
5dddf960
JR
4403}
4404
4405static int
4b27db64
JR
4406dpif_netdev_meter_del(struct dpif *dpif,
4407 ofproto_meter_id meter_id_,
4408 struct ofputil_meter_stats *stats, uint16_t n_bands)
5dddf960 4409{
4b27db64
JR
4410 struct dp_netdev *dp = get_dp_netdev(dpif);
4411 int error;
4412
4413 error = dpif_netdev_meter_get(dpif, meter_id_, stats, n_bands);
4414 if (!error) {
4415 uint32_t meter_id = meter_id_.uint32;
4416
4417 meter_lock(dp, meter_id);
4418 dp_delete_meter(dp, meter_id);
4419 meter_unlock(dp, meter_id);
4b27db64
JR
4420 }
4421 return error;
5dddf960
JR
4422}
4423
4424\f
6b31e073
RW
4425static void
4426dpif_netdev_disable_upcall(struct dpif *dpif)
4427 OVS_NO_THREAD_SAFETY_ANALYSIS
4428{
4429 struct dp_netdev *dp = get_dp_netdev(dpif);
4430 dp_netdev_disable_upcall(dp);
4431}
4432
4433static void
4434dp_netdev_enable_upcall(struct dp_netdev *dp)
4435 OVS_RELEASES(dp->upcall_rwlock)
4436{
4437 fat_rwlock_unlock(&dp->upcall_rwlock);
4438}
4439
4440static void
4441dpif_netdev_enable_upcall(struct dpif *dpif)
4442 OVS_NO_THREAD_SAFETY_ANALYSIS
4443{
4444 struct dp_netdev *dp = get_dp_netdev(dpif);
4445 dp_netdev_enable_upcall(dp);
4446}
4447
ae7ad0a1 4448static void
accf8626
AW
4449dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
4450{
4451 ovs_mutex_lock(&pmd->cond_mutex);
14e3e12a 4452 atomic_store_relaxed(&pmd->reload, false);
2788a1b1 4453 pmd->last_reload_seq = seq_read(pmd->reload_seq);
accf8626
AW
4454 xpthread_cond_signal(&pmd->cond);
4455 ovs_mutex_unlock(&pmd->cond_mutex);
4456}
4457
1c1e46ed 4458/* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
546e57d4
DDP
4459 * the pointer if succeeds, otherwise, NULL (it can return NULL even if
4460 * 'core_id' is NON_PMD_CORE_ID).
1c1e46ed
AW
4461 *
4462 * Caller must unrefs the returned reference. */
65f13b50 4463static struct dp_netdev_pmd_thread *
bd5131ba 4464dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
65f13b50
AW
4465{
4466 struct dp_netdev_pmd_thread *pmd;
55847abe 4467 const struct cmap_node *pnode;
65f13b50 4468
b19befae 4469 pnode = cmap_find(&dp->poll_threads, hash_int(core_id, 0));
1c1e46ed
AW
4470 if (!pnode) {
4471 return NULL;
4472 }
65f13b50
AW
4473 pmd = CONTAINER_OF(pnode, struct dp_netdev_pmd_thread, node);
4474
1c1e46ed 4475 return dp_netdev_pmd_try_ref(pmd) ? pmd : NULL;
65f13b50
AW
4476}
4477
f2eee189
AW
4478/* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
4479static void
4480dp_netdev_set_nonpmd(struct dp_netdev *dp)
e9985d6a 4481 OVS_REQUIRES(dp->port_mutex)
f2eee189
AW
4482{
4483 struct dp_netdev_pmd_thread *non_pmd;
4484
4485 non_pmd = xzalloc(sizeof *non_pmd);
00873463 4486 dp_netdev_configure_pmd(non_pmd, dp, NON_PMD_CORE_ID, OVS_NUMA_UNSPEC);
f2eee189
AW
4487}
4488
1c1e46ed
AW
4489/* Caller must have valid pointer to 'pmd'. */
4490static bool
4491dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd)
4492{
4493 return ovs_refcount_try_ref_rcu(&pmd->ref_cnt);
4494}
4495
4496static void
4497dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd)
4498{
4499 if (pmd && ovs_refcount_unref(&pmd->ref_cnt) == 1) {
4500 ovsrcu_postpone(dp_netdev_destroy_pmd, pmd);
4501 }
4502}
4503
4504/* Given cmap position 'pos', tries to ref the next node. If try_ref()
4505 * fails, keeps checking for next node until reaching the end of cmap.
4506 *
4507 * Caller must unrefs the returned reference. */
4508static struct dp_netdev_pmd_thread *
4509dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos)
4510{
4511 struct dp_netdev_pmd_thread *next;
4512
4513 do {
4514 struct cmap_node *node;
4515
4516 node = cmap_next_position(&dp->poll_threads, pos);
4517 next = node ? CONTAINER_OF(node, struct dp_netdev_pmd_thread, node)
4518 : NULL;
4519 } while (next && !dp_netdev_pmd_try_ref(next));
4520
4521 return next;
4522}
4523
65f13b50 4524/* Configures the 'pmd' based on the input argument. */
6c3eee82 4525static void
65f13b50 4526dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
00873463 4527 unsigned core_id, int numa_id)
65f13b50
AW
4528{
4529 pmd->dp = dp;
65f13b50
AW
4530 pmd->core_id = core_id;
4531 pmd->numa_id = numa_id;
e32971b8 4532 pmd->need_reload = false;
1c1e46ed
AW
4533
4534 ovs_refcount_init(&pmd->ref_cnt);
65f13b50 4535 latch_init(&pmd->exit_latch);
2788a1b1
DDP
4536 pmd->reload_seq = seq_create();
4537 pmd->last_reload_seq = seq_read(pmd->reload_seq);
14e3e12a 4538 atomic_init(&pmd->reload, false);
accf8626
AW
4539 xpthread_cond_init(&pmd->cond, NULL);
4540 ovs_mutex_init(&pmd->cond_mutex);
1c1e46ed 4541 ovs_mutex_init(&pmd->flow_mutex);
d0cca6c3 4542 ovs_mutex_init(&pmd->port_mutex);
1c1e46ed 4543 cmap_init(&pmd->flow_table);
3453b4d6
JS
4544 cmap_init(&pmd->classifiers);
4545 pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL;
4809891b 4546 pmd->rxq_interval = time_msec() + PMD_RXQ_INTERVAL_LEN;
947dc567 4547 hmap_init(&pmd->poll_list);
d0cca6c3 4548 hmap_init(&pmd->tx_ports);
57eebbb4
DDP
4549 hmap_init(&pmd->tnl_port_cache);
4550 hmap_init(&pmd->send_port_cache);
65f13b50
AW
4551 /* init the 'flow_cache' since there is no
4552 * actual thread created for NON_PMD_CORE_ID. */
4553 if (core_id == NON_PMD_CORE_ID) {
4554 emc_cache_init(&pmd->flow_cache);
140dd699 4555 pmd_alloc_static_tx_qid(pmd);
65f13b50
AW
4556 }
4557 cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node),
4558 hash_int(core_id, 0));
4559}
4560
1c1e46ed
AW
4561static void
4562dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
4563{
3453b4d6
JS
4564 struct dpcls *cls;
4565
1c1e46ed 4566 dp_netdev_pmd_flow_flush(pmd);
57eebbb4
DDP
4567 hmap_destroy(&pmd->send_port_cache);
4568 hmap_destroy(&pmd->tnl_port_cache);
d0cca6c3 4569 hmap_destroy(&pmd->tx_ports);
947dc567 4570 hmap_destroy(&pmd->poll_list);
3453b4d6
JS
4571 /* All flows (including their dpcls_rules) have been deleted already */
4572 CMAP_FOR_EACH (cls, node, &pmd->classifiers) {
4573 dpcls_destroy(cls);
7c269972 4574 ovsrcu_postpone(free, cls);
3453b4d6
JS
4575 }
4576 cmap_destroy(&pmd->classifiers);
1c1e46ed
AW
4577 cmap_destroy(&pmd->flow_table);
4578 ovs_mutex_destroy(&pmd->flow_mutex);
4579 latch_destroy(&pmd->exit_latch);
2788a1b1 4580 seq_destroy(pmd->reload_seq);
1c1e46ed
AW
4581 xpthread_cond_destroy(&pmd->cond);
4582 ovs_mutex_destroy(&pmd->cond_mutex);
d0cca6c3 4583 ovs_mutex_destroy(&pmd->port_mutex);
1c1e46ed
AW
4584 free(pmd);
4585}
4586
4587/* Stops the pmd thread, removes it from the 'dp->poll_threads',
4588 * and unrefs the struct. */
65f13b50 4589static void
e4e74c3a 4590dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
6c3eee82 4591{
d0cca6c3
DDP
4592 /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize,
4593 * but extra cleanup is necessary */
65f13b50 4594 if (pmd->core_id == NON_PMD_CORE_ID) {
febf4a7a 4595 ovs_mutex_lock(&dp->non_pmd_mutex);
65f13b50 4596 emc_cache_uninit(&pmd->flow_cache);
d0cca6c3 4597 pmd_free_cached_ports(pmd);
140dd699 4598 pmd_free_static_tx_qid(pmd);
febf4a7a 4599 ovs_mutex_unlock(&dp->non_pmd_mutex);
65f13b50
AW
4600 } else {
4601 latch_set(&pmd->exit_latch);
4602 dp_netdev_reload_pmd__(pmd);
65f13b50
AW
4603 xpthread_join(pmd->thread, NULL);
4604 }
ae7ad0a1 4605
d0cca6c3 4606 dp_netdev_pmd_clear_ports(pmd);
ae7ad0a1 4607
e4e74c3a
AW
4608 /* Purges the 'pmd''s flows after stopping the thread, but before
4609 * destroying the flows, so that the flow stats can be collected. */
4610 if (dp->dp_purge_cb) {
4611 dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id);
4612 }
65f13b50 4613 cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0));
1c1e46ed 4614 dp_netdev_pmd_unref(pmd);
65f13b50 4615}
6c3eee82 4616
e32971b8
DDP
4617/* Destroys all pmd threads. If 'non_pmd' is true it also destroys the non pmd
4618 * thread. */
65f13b50 4619static void
e32971b8 4620dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd)
65f13b50
AW
4621{
4622 struct dp_netdev_pmd_thread *pmd;
d916785c
DDP
4623 struct dp_netdev_pmd_thread **pmd_list;
4624 size_t k = 0, n_pmds;
4625
e32971b8 4626 n_pmds = cmap_count(&dp->poll_threads);
d916785c 4627 pmd_list = xcalloc(n_pmds, sizeof *pmd_list);
65f13b50
AW
4628
4629 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
e32971b8 4630 if (!non_pmd && pmd->core_id == NON_PMD_CORE_ID) {
b9584f21
DDP
4631 continue;
4632 }
d916785c
DDP
4633 /* We cannot call dp_netdev_del_pmd(), since it alters
4634 * 'dp->poll_threads' (while we're iterating it) and it
4635 * might quiesce. */
4636 ovs_assert(k < n_pmds);
4637 pmd_list[k++] = pmd;
6c3eee82 4638 }
d916785c
DDP
4639
4640 for (size_t i = 0; i < k; i++) {
4641 dp_netdev_del_pmd(dp, pmd_list[i]);
4642 }
4643 free(pmd_list);
65f13b50 4644}
6c3eee82 4645
d0cca6c3
DDP
4646/* Deletes all rx queues from pmd->poll_list and all the ports from
4647 * pmd->tx_ports. */
cc245ce8 4648static void
d0cca6c3 4649dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd)
cc245ce8
IM
4650{
4651 struct rxq_poll *poll;
d0cca6c3 4652 struct tx_port *port;
cc245ce8 4653
d0cca6c3 4654 ovs_mutex_lock(&pmd->port_mutex);
947dc567 4655 HMAP_FOR_EACH_POP (poll, node, &pmd->poll_list) {
cc245ce8
IM
4656 free(poll);
4657 }
d0cca6c3
DDP
4658 HMAP_FOR_EACH_POP (port, node, &pmd->tx_ports) {
4659 free(port);
4660 }
4661 ovs_mutex_unlock(&pmd->port_mutex);
cc245ce8
IM
4662}
4663
e32971b8 4664/* Adds rx queue to poll_list of PMD thread, if it's not there already. */
b68872d8 4665static void
e32971b8
DDP
4666dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
4667 struct dp_netdev_rxq *rxq)
4668 OVS_REQUIRES(pmd->port_mutex)
b68872d8 4669{
e32971b8
DDP
4670 int qid = netdev_rxq_get_queue_id(rxq->rx);
4671 uint32_t hash = hash_2words(odp_to_u32(rxq->port->port_no), qid);
4672 struct rxq_poll *poll;
b68872d8 4673
e32971b8
DDP
4674 HMAP_FOR_EACH_WITH_HASH (poll, node, hash, &pmd->poll_list) {
4675 if (poll->rxq == rxq) {
4676 /* 'rxq' is already polled by this thread. Do nothing. */
4677 return;
d0cca6c3 4678 }
cc245ce8 4679 }
cc245ce8 4680
e32971b8
DDP
4681 poll = xmalloc(sizeof *poll);
4682 poll->rxq = rxq;
4683 hmap_insert(&pmd->poll_list, &poll->node, hash);
b68872d8 4684
e32971b8 4685 pmd->need_reload = true;
ae7ad0a1
IM
4686}
4687
e32971b8 4688/* Delete 'poll' from poll_list of PMD thread. */
ae7ad0a1 4689static void
e32971b8
DDP
4690dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd,
4691 struct rxq_poll *poll)
d0cca6c3 4692 OVS_REQUIRES(pmd->port_mutex)
ae7ad0a1 4693{
e32971b8
DDP
4694 hmap_remove(&pmd->poll_list, &poll->node);
4695 free(poll);
ae7ad0a1 4696
e32971b8 4697 pmd->need_reload = true;
ae7ad0a1
IM
4698}
4699
d0cca6c3
DDP
4700/* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the
4701 * changes to take effect. */
cc245ce8 4702static void
d0cca6c3
DDP
4703dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
4704 struct dp_netdev_port *port)
e32971b8 4705 OVS_REQUIRES(pmd->port_mutex)
d0cca6c3 4706{
57eebbb4
DDP
4707 struct tx_port *tx;
4708
e32971b8
DDP
4709 tx = tx_port_lookup(&pmd->tx_ports, port->port_no);
4710 if (tx) {
4711 /* 'port' is already on this thread tx cache. Do nothing. */
4712 return;
4713 }
4714
57eebbb4 4715 tx = xzalloc(sizeof *tx);
d0cca6c3 4716
324c8374
IM
4717 tx->port = port;
4718 tx->qid = -1;
d0cca6c3 4719
324c8374 4720 hmap_insert(&pmd->tx_ports, &tx->node, hash_port_no(tx->port->port_no));
e32971b8 4721 pmd->need_reload = true;
d0cca6c3
DDP
4722}
4723
e32971b8
DDP
4724/* Del 'tx' from the tx port cache of 'pmd', which must be reloaded for the
4725 * changes to take effect. */
b9584f21 4726static void
e32971b8
DDP
4727dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
4728 struct tx_port *tx)
4729 OVS_REQUIRES(pmd->port_mutex)
b9584f21 4730{
e32971b8
DDP
4731 hmap_remove(&pmd->tx_ports, &tx->node);
4732 free(tx);
4733 pmd->need_reload = true;
6c3eee82
BP
4734}
4735\f
b5cbbcf6
AZ
4736static char *
4737dpif_netdev_get_datapath_version(void)
4738{
4739 return xstrdup("<built-in>");
4740}
4741
72865317 4742static void
1c1e46ed 4743dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size,
11bfdadd 4744 uint16_t tcp_flags, long long now)
72865317 4745{
eb94da30 4746 uint16_t flags;
72865317 4747
eb94da30
DDP
4748 atomic_store_relaxed(&netdev_flow->stats.used, now);
4749 non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt);
4750 non_atomic_ullong_add(&netdev_flow->stats.byte_count, size);
4751 atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
4752 flags |= tcp_flags;
4753 atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags);
51852a57
BP
4754}
4755
4756static void
1c1e46ed
AW
4757dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd,
4758 enum dp_stat_type type, int cnt)
51852a57 4759{
eb94da30 4760 non_atomic_ullong_add(&pmd->stats.n[type], cnt);
51852a57
BP
4761}
4762
623540e4 4763static int
e14deea0 4764dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet_,
7af12bd7 4765 struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid,
623540e4
EJ
4766 enum dpif_upcall_type type, const struct nlattr *userdata,
4767 struct ofpbuf *actions, struct ofpbuf *put_actions)
4768{
1c1e46ed 4769 struct dp_netdev *dp = pmd->dp;
623540e4 4770
623540e4
EJ
4771 if (OVS_UNLIKELY(!dp->upcall_cb)) {
4772 return ENODEV;
4773 }
4774
4775 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) {
4776 struct ds ds = DS_EMPTY_INITIALIZER;
623540e4 4777 char *packet_str;
cf62fa4c 4778 struct ofpbuf key;
5262eea1
JG
4779 struct odp_flow_key_parms odp_parms = {
4780 .flow = flow,
1dea1435 4781 .mask = wc ? &wc->masks : NULL,
2494ccd7 4782 .support = dp_netdev_support,
5262eea1 4783 };
623540e4
EJ
4784
4785 ofpbuf_init(&key, 0);
5262eea1 4786 odp_flow_key_from_flow(&odp_parms, &key);
2482b0b0 4787 packet_str = ofp_dp_packet_to_string(packet_);
623540e4 4788
6fd6ed71 4789 odp_flow_key_format(key.data, key.size, &ds);
623540e4
EJ
4790
4791 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name,
4792 dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str);
4793
4794 ofpbuf_uninit(&key);
4795 free(packet_str);
6fd6ed71 4796
623540e4
EJ
4797 ds_destroy(&ds);
4798 }
4799
8d8ab6c2
JG
4800 return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata,
4801 actions, wc, put_actions, dp->upcall_aux);
623540e4
EJ
4802}
4803
bde94613
FA
4804static inline uint32_t
4805dpif_netdev_packet_get_rss_hash_orig_pkt(struct dp_packet *packet,
4806 const struct miniflow *mf)
4807{
4808 uint32_t hash;
4809
4810 if (OVS_LIKELY(dp_packet_rss_valid(packet))) {
4811 hash = dp_packet_get_rss_hash(packet);
4812 } else {
4813 hash = miniflow_hash_5tuple(mf, 0);
4814 dp_packet_set_rss_hash(packet, hash);
4815 }
4816
4817 return hash;
4818}
4819
9bbf1c3d 4820static inline uint32_t
048963aa
DDP
4821dpif_netdev_packet_get_rss_hash(struct dp_packet *packet,
4822 const struct miniflow *mf)
9bbf1c3d 4823{
048963aa 4824 uint32_t hash, recirc_depth;
9bbf1c3d 4825
f2f44f5d
DDP
4826 if (OVS_LIKELY(dp_packet_rss_valid(packet))) {
4827 hash = dp_packet_get_rss_hash(packet);
4828 } else {
9bbf1c3d 4829 hash = miniflow_hash_5tuple(mf, 0);
2bc1bbd2 4830 dp_packet_set_rss_hash(packet, hash);
9bbf1c3d 4831 }
048963aa
DDP
4832
4833 /* The RSS hash must account for the recirculation depth to avoid
4834 * collisions in the exact match cache */
4835 recirc_depth = *recirc_depth_get_unsafe();
4836 if (OVS_UNLIKELY(recirc_depth)) {
4837 hash = hash_finish(hash, recirc_depth);
4838 dp_packet_set_rss_hash(packet, hash);
4839 }
9bbf1c3d
DDP
4840 return hash;
4841}
4842
f7ce4811 4843struct packet_batch_per_flow {
8cbf4f47
DDP
4844 unsigned int byte_count;
4845 uint16_t tcp_flags;
8cbf4f47
DDP
4846 struct dp_netdev_flow *flow;
4847
1895cc8d 4848 struct dp_packet_batch array;
8cbf4f47
DDP
4849};
4850
4851static inline void
f7ce4811
PS
4852packet_batch_per_flow_update(struct packet_batch_per_flow *batch,
4853 struct dp_packet *packet,
4854 const struct miniflow *mf)
8cbf4f47 4855{
cf62fa4c 4856 batch->byte_count += dp_packet_size(packet);
1895cc8d
PS
4857 batch->tcp_flags |= miniflow_get_tcp_flags(mf);
4858 batch->array.packets[batch->array.count++] = packet;
8cbf4f47
DDP
4859}
4860
4861static inline void
f7ce4811
PS
4862packet_batch_per_flow_init(struct packet_batch_per_flow *batch,
4863 struct dp_netdev_flow *flow)
8cbf4f47 4864{
11e5cf1f 4865 flow->batch = batch;
8cbf4f47 4866
11e5cf1f 4867 batch->flow = flow;
1895cc8d 4868 dp_packet_batch_init(&batch->array);
8cbf4f47
DDP
4869 batch->byte_count = 0;
4870 batch->tcp_flags = 0;
8cbf4f47
DDP
4871}
4872
4873static inline void
f7ce4811
PS
4874packet_batch_per_flow_execute(struct packet_batch_per_flow *batch,
4875 struct dp_netdev_pmd_thread *pmd,
4876 long long now)
8cbf4f47
DDP
4877{
4878 struct dp_netdev_actions *actions;
4879 struct dp_netdev_flow *flow = batch->flow;
4880
1895cc8d 4881 dp_netdev_flow_used(flow, batch->array.count, batch->byte_count,
11bfdadd 4882 batch->tcp_flags, now);
8cbf4f47
DDP
4883
4884 actions = dp_netdev_flow_get_actions(flow);
4885
66e4ad8a 4886 dp_netdev_execute_actions(pmd, &batch->array, true, &flow->flow,
324c8374 4887 actions->actions, actions->size, now);
8cbf4f47
DDP
4888}
4889
8aaa125d 4890static inline void
e14deea0 4891dp_netdev_queue_batches(struct dp_packet *pkt,
9bbf1c3d 4892 struct dp_netdev_flow *flow, const struct miniflow *mf,
47a45d86
KT
4893 struct packet_batch_per_flow *batches,
4894 size_t *n_batches)
9bbf1c3d 4895{
f7ce4811 4896 struct packet_batch_per_flow *batch = flow->batch;
11e5cf1f 4897
f9fe365b
AZ
4898 if (OVS_UNLIKELY(!batch)) {
4899 batch = &batches[(*n_batches)++];
f7ce4811 4900 packet_batch_per_flow_init(batch, flow);
9bbf1c3d
DDP
4901 }
4902
f7ce4811 4903 packet_batch_per_flow_update(batch, pkt, mf);
9bbf1c3d
DDP
4904}
4905
9bbf1c3d 4906/* Try to process all ('cnt') the 'packets' using only the exact match cache
a90ed026 4907 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
8aaa125d
DDP
4908 * miniflow is copied into 'keys' and the packet pointer is moved at the
4909 * beginning of the 'packets' array.
9bbf1c3d
DDP
4910 *
4911 * The function returns the number of packets that needs to be processed in the
4912 * 'packets' array (they have been moved to the beginning of the vector).
a90ed026 4913 *
02305520
FA
4914 * For performance reasons a caller may choose not to initialize the metadata
4915 * in 'packets_'. If 'md_is_valid' is false, the metadata in 'packets'
4916 * is not valid and must be initialized by this function using 'port_no'.
4917 * If 'md_is_valid' is true, the metadata is already valid and 'port_no'
4918 * will be ignored.
9bbf1c3d
DDP
4919 */
4920static inline size_t
72c84bc2
AZ
4921emc_processing(struct dp_netdev_pmd_thread *pmd,
4922 struct dp_packet_batch *packets_,
1895cc8d 4923 struct netdev_flow_key *keys,
f7ce4811 4924 struct packet_batch_per_flow batches[], size_t *n_batches,
a90ed026 4925 bool md_is_valid, odp_port_t port_no)
72865317 4926{
65f13b50 4927 struct emc_cache *flow_cache = &pmd->flow_cache;
b89c678b 4928 struct netdev_flow_key *key = &keys[0];
72c84bc2
AZ
4929 size_t n_missed = 0, n_dropped = 0;
4930 struct dp_packet *packet;
45df9fef 4931 const size_t cnt = dp_packet_batch_size(packets_);
f79b1ddb 4932 uint32_t cur_min;
72c84bc2 4933 int i;
8cbf4f47 4934
f79b1ddb
BB
4935 atomic_read_relaxed(&pmd->dp->emc_insert_min, &cur_min);
4936
45df9fef 4937 DP_PACKET_BATCH_REFILL_FOR_EACH (i, cnt, packet, packets_) {
9bbf1c3d 4938 struct dp_netdev_flow *flow;
9bbf1c3d 4939
5a2fed48
AZ
4940 if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
4941 dp_packet_delete(packet);
3d88a620 4942 n_dropped++;
84d6d5eb
EJ
4943 continue;
4944 }
8cbf4f47 4945
45df9fef 4946 if (i != cnt - 1) {
72c84bc2 4947 struct dp_packet **packets = packets_->packets;
a90ed026 4948 /* Prefetch next packet data and metadata. */
72a5e2b8 4949 OVS_PREFETCH(dp_packet_data(packets[i+1]));
a90ed026 4950 pkt_metadata_prefetch_init(&packets[i+1]->md);
72a5e2b8
DDP
4951 }
4952
a90ed026
DDP
4953 if (!md_is_valid) {
4954 pkt_metadata_init(&packet->md, port_no);
4955 }
5a2fed48 4956 miniflow_extract(packet, &key->mf);
d262ac2c 4957 key->len = 0; /* Not computed yet. */
bde94613
FA
4958 /* If EMC is disabled skip hash computation and emc_lookup */
4959 if (cur_min) {
4960 if (!md_is_valid) {
4961 key->hash = dpif_netdev_packet_get_rss_hash_orig_pkt(packet,
4962 &key->mf);
4963 } else {
4964 key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf);
4965 }
4966 flow = emc_lookup(flow_cache, key);
4967 } else {
4968 flow = NULL;
4969 }
8aaa125d 4970 if (OVS_LIKELY(flow)) {
5a2fed48 4971 dp_netdev_queue_batches(packet, flow, &key->mf, batches,
8aaa125d
DDP
4972 n_batches);
4973 } else {
d1aa0b94 4974 /* Exact match cache missed. Group missed packets together at
72c84bc2
AZ
4975 * the beginning of the 'packets' array. */
4976 dp_packet_batch_refill(packets_, packet, i);
400486f7
DDP
4977 /* 'key[n_missed]' contains the key of the current packet and it
4978 * must be returned to the caller. The next key should be extracted
4979 * to 'keys[n_missed + 1]'. */
4980 key = &keys[++n_missed];
9bbf1c3d
DDP
4981 }
4982 }
4983
47a45d86 4984 dp_netdev_count_packet(pmd, DP_STAT_EXACT_HIT,
45df9fef 4985 cnt - n_dropped - n_missed);
4f150744 4986
72c84bc2 4987 return dp_packet_batch_size(packets_);
9bbf1c3d
DDP
4988}
4989
a260d966 4990static inline void
47a45d86
KT
4991handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
4992 struct dp_packet *packet,
a260d966
PS
4993 const struct netdev_flow_key *key,
4994 struct ofpbuf *actions, struct ofpbuf *put_actions,
324c8374 4995 int *lost_cnt, long long now)
a260d966
PS
4996{
4997 struct ofpbuf *add_actions;
4998 struct dp_packet_batch b;
4999 struct match match;
5000 ovs_u128 ufid;
5001 int error;
5002
5003 match.tun_md.valid = false;
5004 miniflow_expand(&key->mf, &match.flow);
5005
5006 ofpbuf_clear(actions);
5007 ofpbuf_clear(put_actions);
5008
5009 dpif_flow_hash(pmd->dp->dpif, &match.flow, sizeof match.flow, &ufid);
5010 error = dp_netdev_upcall(pmd, packet, &match.flow, &match.wc,
5011 &ufid, DPIF_UC_MISS, NULL, actions,
5012 put_actions);
5013 if (OVS_UNLIKELY(error && error != ENOSPC)) {
5014 dp_packet_delete(packet);
5015 (*lost_cnt)++;
5016 return;
5017 }
5018
5019 /* The Netlink encoding of datapath flow keys cannot express
5020 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
5021 * tag is interpreted as exact match on the fact that there is no
5022 * VLAN. Unless we refactor a lot of code that translates between
5023 * Netlink and struct flow representations, we have to do the same
5024 * here. */
f0fb825a
EG
5025 if (!match.wc.masks.vlans[0].tci) {
5026 match.wc.masks.vlans[0].tci = htons(0xffff);
a260d966
PS
5027 }
5028
5029 /* We can't allow the packet batching in the next loop to execute
5030 * the actions. Otherwise, if there are any slow path actions,
5031 * we'll send the packet up twice. */
72c84bc2 5032 dp_packet_batch_init_packet(&b, packet);
66e4ad8a 5033 dp_netdev_execute_actions(pmd, &b, true, &match.flow,
324c8374 5034 actions->data, actions->size, now);
a260d966
PS
5035
5036 add_actions = put_actions->size ? put_actions : actions;
5037 if (OVS_LIKELY(error != ENOSPC)) {
5038 struct dp_netdev_flow *netdev_flow;
5039
5040 /* XXX: There's a race window where a flow covering this packet
5041 * could have already been installed since we last did the flow
5042 * lookup before upcall. This could be solved by moving the
5043 * mutex lock outside the loop, but that's an awful long time
5044 * to be locking everyone out of making flow installs. If we
5045 * move to a per-core classifier, it would be reasonable. */
5046 ovs_mutex_lock(&pmd->flow_mutex);
3453b4d6 5047 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL);
a260d966
PS
5048 if (OVS_LIKELY(!netdev_flow)) {
5049 netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid,
5050 add_actions->data,
5051 add_actions->size);
5052 }
5053 ovs_mutex_unlock(&pmd->flow_mutex);
4c30b246 5054 emc_probabilistic_insert(pmd, key, netdev_flow);
a260d966
PS
5055 }
5056}
5057
9bbf1c3d 5058static inline void
65f13b50 5059fast_path_processing(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5060 struct dp_packet_batch *packets_,
8aaa125d 5061 struct netdev_flow_key *keys,
324c8374 5062 struct packet_batch_per_flow batches[], size_t *n_batches,
3453b4d6 5063 odp_port_t in_port,
324c8374 5064 long long now)
9bbf1c3d 5065{
31c82130 5066 const size_t cnt = dp_packet_batch_size(packets_);
1a0d5831 5067#if !defined(__CHECKER__) && !defined(_WIN32)
9bbf1c3d
DDP
5068 const size_t PKT_ARRAY_SIZE = cnt;
5069#else
1a0d5831 5070 /* Sparse or MSVC doesn't like variable length array. */
cd159f1a 5071 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
9bbf1c3d 5072#endif
31c82130 5073 struct dp_packet *packet;
3453b4d6 5074 struct dpcls *cls;
0de8783a 5075 struct dpcls_rule *rules[PKT_ARRAY_SIZE];
65f13b50 5076 struct dp_netdev *dp = pmd->dp;
8aaa125d 5077 int miss_cnt = 0, lost_cnt = 0;
3453b4d6 5078 int lookup_cnt = 0, add_lookup_cnt;
9bbf1c3d 5079 bool any_miss;
8aaa125d 5080 size_t i;
9bbf1c3d
DDP
5081
5082 for (i = 0; i < cnt; i++) {
0de8783a 5083 /* Key length is needed in all the cases, hash computed on demand. */
361d808d 5084 keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf));
9bbf1c3d 5085 }
3453b4d6
JS
5086 /* Get the classifier for the in_port */
5087 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
5088 if (OVS_LIKELY(cls)) {
5089 any_miss = !dpcls_lookup(cls, keys, rules, cnt, &lookup_cnt);
5090 } else {
5091 any_miss = true;
5092 memset(rules, 0, sizeof(rules));
5093 }
623540e4
EJ
5094 if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
5095 uint64_t actions_stub[512 / 8], slow_stub[512 / 8];
5096 struct ofpbuf actions, put_actions;
623540e4
EJ
5097
5098 ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub);
5099 ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub);
5100
31c82130 5101 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
0de8783a 5102 struct dp_netdev_flow *netdev_flow;
623540e4 5103
0de8783a 5104 if (OVS_LIKELY(rules[i])) {
623540e4
EJ
5105 continue;
5106 }
5107
5108 /* It's possible that an earlier slow path execution installed
0de8783a 5109 * a rule covering this flow. In this case, it's a lot cheaper
623540e4 5110 * to catch it here than execute a miss. */
3453b4d6
JS
5111 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i],
5112 &add_lookup_cnt);
623540e4 5113 if (netdev_flow) {
3453b4d6 5114 lookup_cnt += add_lookup_cnt;
0de8783a 5115 rules[i] = &netdev_flow->cr;
623540e4
EJ
5116 continue;
5117 }
5118
60fc3b7b 5119 miss_cnt++;
31c82130 5120 handle_packet_upcall(pmd, packet, &keys[i], &actions,
324c8374 5121 &put_actions, &lost_cnt, now);
623540e4
EJ
5122 }
5123
5124 ofpbuf_uninit(&actions);
5125 ofpbuf_uninit(&put_actions);
5126 fat_rwlock_unlock(&dp->upcall_rwlock);
ac8c2081 5127 } else if (OVS_UNLIKELY(any_miss)) {
31c82130 5128 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
0de8783a 5129 if (OVS_UNLIKELY(!rules[i])) {
31c82130 5130 dp_packet_delete(packet);
8aaa125d
DDP
5131 lost_cnt++;
5132 miss_cnt++;
ac8c2081
DDP
5133 }
5134 }
623540e4 5135 }
84d6d5eb 5136
31c82130 5137 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
84d6d5eb 5138 struct dp_netdev_flow *flow;
8cbf4f47 5139
0de8783a 5140 if (OVS_UNLIKELY(!rules[i])) {
84d6d5eb
EJ
5141 continue;
5142 }
5143
84d6d5eb 5144 flow = dp_netdev_flow_cast(rules[i]);
0de8783a 5145
4c30b246 5146 emc_probabilistic_insert(pmd, &keys[i], flow);
8aaa125d 5147 dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches);
8cbf4f47
DDP
5148 }
5149
8aaa125d 5150 dp_netdev_count_packet(pmd, DP_STAT_MASKED_HIT, cnt - miss_cnt);
3453b4d6 5151 dp_netdev_count_packet(pmd, DP_STAT_LOOKUP_HIT, lookup_cnt);
8aaa125d
DDP
5152 dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt);
5153 dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt);
72865317
BP
5154}
5155
a90ed026
DDP
5156/* Packets enter the datapath from a port (or from recirculation) here.
5157 *
02305520
FA
5158 * When 'md_is_valid' is true the metadata in 'packets' are already valid.
5159 * When false the metadata in 'packets' need to be initialized. */
adcf00ba 5160static void
a90ed026 5161dp_netdev_input__(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5162 struct dp_packet_batch *packets,
a90ed026 5163 bool md_is_valid, odp_port_t port_no)
9bbf1c3d 5164{
1a0d5831 5165#if !defined(__CHECKER__) && !defined(_WIN32)
37eabc70 5166 const size_t PKT_ARRAY_SIZE = dp_packet_batch_size(packets);
9bbf1c3d 5167#else
1a0d5831 5168 /* Sparse or MSVC doesn't like variable length array. */
cd159f1a 5169 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
9bbf1c3d 5170#endif
47a45d86
KT
5171 OVS_ALIGNED_VAR(CACHE_LINE_SIZE)
5172 struct netdev_flow_key keys[PKT_ARRAY_SIZE];
f7ce4811 5173 struct packet_batch_per_flow batches[PKT_ARRAY_SIZE];
11bfdadd 5174 long long now = time_msec();
72c84bc2 5175 size_t n_batches;
3453b4d6 5176 odp_port_t in_port;
9bbf1c3d 5177
8aaa125d 5178 n_batches = 0;
72c84bc2 5179 emc_processing(pmd, packets, keys, batches, &n_batches,
a90ed026 5180 md_is_valid, port_no);
72c84bc2 5181 if (!dp_packet_batch_is_empty(packets)) {
3453b4d6
JS
5182 /* Get ingress port from first packet's metadata. */
5183 in_port = packets->packets[0]->md.in_port.odp_port;
47a45d86
KT
5184 fast_path_processing(pmd, packets, keys, batches, &n_batches,
5185 in_port, now);
8aaa125d
DDP
5186 }
5187
ad9f0581
BB
5188 /* All the flow batches need to be reset before any call to
5189 * packet_batch_per_flow_execute() as it could potentially trigger
5190 * recirculation. When a packet matching flow ‘j’ happens to be
5191 * recirculated, the nested call to dp_netdev_input__() could potentially
5192 * classify the packet as matching another flow - say 'k'. It could happen
5193 * that in the previous call to dp_netdev_input__() that same flow 'k' had
5194 * already its own batches[k] still waiting to be served. So if its
5195 * ‘batch’ member is not reset, the recirculated packet would be wrongly
5196 * appended to batches[k] of the 1st call to dp_netdev_input__(). */
72c84bc2 5197 size_t i;
603f2ce0
EJ
5198 for (i = 0; i < n_batches; i++) {
5199 batches[i].flow->batch = NULL;
5200 }
5201
8aaa125d 5202 for (i = 0; i < n_batches; i++) {
f7ce4811 5203 packet_batch_per_flow_execute(&batches[i], pmd, now);
9bbf1c3d
DDP
5204 }
5205}
5206
a90ed026
DDP
5207static void
5208dp_netdev_input(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5209 struct dp_packet_batch *packets,
a90ed026
DDP
5210 odp_port_t port_no)
5211{
3453b4d6 5212 dp_netdev_input__(pmd, packets, false, port_no);
a90ed026
DDP
5213}
5214
5215static void
5216dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5217 struct dp_packet_batch *packets)
a90ed026 5218{
3453b4d6 5219 dp_netdev_input__(pmd, packets, true, 0);
a90ed026
DDP
5220}
5221
9080a111 5222struct dp_netdev_execute_aux {
65f13b50 5223 struct dp_netdev_pmd_thread *pmd;
324c8374 5224 long long now;
66e4ad8a 5225 const struct flow *flow;
9080a111
JR
5226};
5227
e4e74c3a
AW
5228static void
5229dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb,
5230 void *aux)
5231{
5232 struct dp_netdev *dp = get_dp_netdev(dpif);
5233 dp->dp_purge_aux = aux;
5234 dp->dp_purge_cb = cb;
5235}
5236
6b31e073 5237static void
623540e4
EJ
5238dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb,
5239 void *aux)
6b31e073
RW
5240{
5241 struct dp_netdev *dp = get_dp_netdev(dpif);
623540e4 5242 dp->upcall_aux = aux;
6b31e073
RW
5243 dp->upcall_cb = cb;
5244}
5245
324c8374
IM
5246static void
5247dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
5248 long long now, bool purge)
5249{
5250 struct tx_port *tx;
5251 struct dp_netdev_port *port;
5252 long long interval;
5253
57eebbb4 5254 HMAP_FOR_EACH (tx, node, &pmd->send_port_cache) {
9f7a3035 5255 if (!tx->port->dynamic_txqs) {
324c8374
IM
5256 continue;
5257 }
5258 interval = now - tx->last_used;
5259 if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) {
5260 port = tx->port;
5261 ovs_mutex_lock(&port->txq_used_mutex);
5262 port->txq_used[tx->qid]--;
5263 ovs_mutex_unlock(&port->txq_used_mutex);
5264 tx->qid = -1;
5265 }
5266 }
5267}
5268
5269static int
5270dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
5271 struct tx_port *tx, long long now)
5272{
5273 struct dp_netdev_port *port;
5274 long long interval;
5275 int i, min_cnt, min_qid;
5276
5277 if (OVS_UNLIKELY(!now)) {
5278 now = time_msec();
5279 }
5280
5281 interval = now - tx->last_used;
5282 tx->last_used = now;
5283
5284 if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) {
5285 return tx->qid;
5286 }
5287
5288 port = tx->port;
5289
5290 ovs_mutex_lock(&port->txq_used_mutex);
5291 if (tx->qid >= 0) {
5292 port->txq_used[tx->qid]--;
5293 tx->qid = -1;
5294 }
5295
5296 min_cnt = -1;
5297 min_qid = 0;
5298 for (i = 0; i < netdev_n_txq(port->netdev); i++) {
5299 if (port->txq_used[i] < min_cnt || min_cnt == -1) {
5300 min_cnt = port->txq_used[i];
5301 min_qid = i;
5302 }
5303 }
5304
5305 port->txq_used[min_qid]++;
5306 tx->qid = min_qid;
5307
5308 ovs_mutex_unlock(&port->txq_used_mutex);
5309
5310 dpif_netdev_xps_revalidate_pmd(pmd, now, false);
5311
5312 VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
5313 pmd->core_id, tx->qid, netdev_get_name(tx->port->netdev));
5314 return min_qid;
5315}
5316
d0cca6c3 5317static struct tx_port *
57eebbb4
DDP
5318pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
5319 odp_port_t port_no)
5320{
5321 return tx_port_lookup(&pmd->tnl_port_cache, port_no);
5322}
5323
5324static struct tx_port *
5325pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
5326 odp_port_t port_no)
d0cca6c3 5327{
57eebbb4 5328 return tx_port_lookup(&pmd->send_port_cache, port_no);
d0cca6c3
DDP
5329}
5330
a36de779 5331static int
d0cca6c3 5332push_tnl_action(const struct dp_netdev_pmd_thread *pmd,
1895cc8d
PS
5333 const struct nlattr *attr,
5334 struct dp_packet_batch *batch)
a36de779 5335{
d0cca6c3 5336 struct tx_port *tun_port;
a36de779 5337 const struct ovs_action_push_tnl *data;
4c742796 5338 int err;
a36de779
PS
5339
5340 data = nl_attr_get(attr);
5341
81765c00 5342 tun_port = pmd_tnl_port_cache_lookup(pmd, data->tnl_port);
a36de779 5343 if (!tun_port) {
4c742796
PS
5344 err = -EINVAL;
5345 goto error;
a36de779 5346 }
324c8374 5347 err = netdev_push_header(tun_port->port->netdev, batch, data);
4c742796
PS
5348 if (!err) {
5349 return 0;
5350 }
5351error:
5352 dp_packet_delete_batch(batch, true);
5353 return err;
a36de779
PS
5354}
5355
66525ef3
PS
5356static void
5357dp_execute_userspace_action(struct dp_netdev_pmd_thread *pmd,
5358 struct dp_packet *packet, bool may_steal,
5359 struct flow *flow, ovs_u128 *ufid,
5360 struct ofpbuf *actions,
324c8374 5361 const struct nlattr *userdata, long long now)
66525ef3
PS
5362{
5363 struct dp_packet_batch b;
5364 int error;
5365
5366 ofpbuf_clear(actions);
5367
5368 error = dp_netdev_upcall(pmd, packet, flow, NULL, ufid,
5369 DPIF_UC_ACTION, userdata, actions,
5370 NULL);
5371 if (!error || error == ENOSPC) {
72c84bc2 5372 dp_packet_batch_init_packet(&b, packet);
66e4ad8a 5373 dp_netdev_execute_actions(pmd, &b, may_steal, flow,
324c8374 5374 actions->data, actions->size, now);
66525ef3
PS
5375 } else if (may_steal) {
5376 dp_packet_delete(packet);
5377 }
5378}
5379
a36de779 5380static void
1895cc8d 5381dp_execute_cb(void *aux_, struct dp_packet_batch *packets_,
09f9da0b 5382 const struct nlattr *a, bool may_steal)
4b27db64 5383 OVS_NO_THREAD_SAFETY_ANALYSIS
9080a111
JR
5384{
5385 struct dp_netdev_execute_aux *aux = aux_;
623540e4 5386 uint32_t *depth = recirc_depth_get();
28e2fa02
DDP
5387 struct dp_netdev_pmd_thread *pmd = aux->pmd;
5388 struct dp_netdev *dp = pmd->dp;
09f9da0b 5389 int type = nl_attr_type(a);
324c8374 5390 long long now = aux->now;
d0cca6c3 5391 struct tx_port *p;
9080a111 5392
09f9da0b
JR
5393 switch ((enum ovs_action_attr)type) {
5394 case OVS_ACTION_ATTR_OUTPUT:
57eebbb4 5395 p = pmd_send_port_cache_lookup(pmd, nl_attr_get_odp_port(a));
26a5075b 5396 if (OVS_LIKELY(p)) {
347ba9bb 5397 int tx_qid;
324c8374 5398 bool dynamic_txqs;
347ba9bb 5399
324c8374
IM
5400 dynamic_txqs = p->port->dynamic_txqs;
5401 if (dynamic_txqs) {
5402 tx_qid = dpif_netdev_xps_get_tx_qid(pmd, p, now);
5403 } else {
82d765f6 5404 tx_qid = pmd->static_tx_qid;
324c8374 5405 }
347ba9bb 5406
324c8374
IM
5407 netdev_send(p->port->netdev, tx_qid, packets_, may_steal,
5408 dynamic_txqs);
ac8c2081 5409 return;
8a4e3a85 5410 }
09f9da0b
JR
5411 break;
5412
a36de779
PS
5413 case OVS_ACTION_ATTR_TUNNEL_PUSH:
5414 if (*depth < MAX_RECIRC_DEPTH) {
aaca4fe0 5415 dp_packet_batch_apply_cutlen(packets_);
7c12dfc5 5416 push_tnl_action(pmd, a, packets_);
a36de779
PS
5417 return;
5418 }
5419 break;
5420
5421 case OVS_ACTION_ATTR_TUNNEL_POP:
5422 if (*depth < MAX_RECIRC_DEPTH) {
aaca4fe0 5423 struct dp_packet_batch *orig_packets_ = packets_;
8611f9a4 5424 odp_port_t portno = nl_attr_get_odp_port(a);
a36de779 5425
57eebbb4 5426 p = pmd_tnl_port_cache_lookup(pmd, portno);
a36de779 5427 if (p) {
1895cc8d 5428 struct dp_packet_batch tnl_pkt;
a36de779
PS
5429
5430 if (!may_steal) {
aaca4fe0
WT
5431 dp_packet_batch_clone(&tnl_pkt, packets_);
5432 packets_ = &tnl_pkt;
5433 dp_packet_batch_reset_cutlen(orig_packets_);
a36de779
PS
5434 }
5435
aaca4fe0
WT
5436 dp_packet_batch_apply_cutlen(packets_);
5437
324c8374 5438 netdev_pop_header(p->port->netdev, packets_);
72c84bc2 5439 if (dp_packet_batch_is_empty(packets_)) {
1c8f98d9
PS
5440 return;
5441 }
9235b479 5442
72c84bc2
AZ
5443 struct dp_packet *packet;
5444 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
5445 packet->md.in_port.odp_port = portno;
a36de779 5446 }
9235b479
PS
5447
5448 (*depth)++;
5449 dp_netdev_recirculate(pmd, packets_);
5450 (*depth)--;
a36de779
PS
5451 return;
5452 }
5453 }
5454 break;
5455
623540e4
EJ
5456 case OVS_ACTION_ATTR_USERSPACE:
5457 if (!fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
aaca4fe0 5458 struct dp_packet_batch *orig_packets_ = packets_;
623540e4 5459 const struct nlattr *userdata;
aaca4fe0 5460 struct dp_packet_batch usr_pkt;
623540e4
EJ
5461 struct ofpbuf actions;
5462 struct flow flow;
7af12bd7 5463 ovs_u128 ufid;
aaca4fe0 5464 bool clone = false;
4fc65926 5465
623540e4
EJ
5466 userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
5467 ofpbuf_init(&actions, 0);
8cbf4f47 5468
aaca4fe0
WT
5469 if (packets_->trunc) {
5470 if (!may_steal) {
5471 dp_packet_batch_clone(&usr_pkt, packets_);
5472 packets_ = &usr_pkt;
aaca4fe0
WT
5473 clone = true;
5474 dp_packet_batch_reset_cutlen(orig_packets_);
5475 }
5476
5477 dp_packet_batch_apply_cutlen(packets_);
5478 }
5479
72c84bc2
AZ
5480 struct dp_packet *packet;
5481 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
5482 flow_extract(packet, &flow);
7af12bd7 5483 dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid);
72c84bc2 5484 dp_execute_userspace_action(pmd, packet, may_steal, &flow,
324c8374 5485 &ufid, &actions, userdata, now);
db73f716 5486 }
aaca4fe0
WT
5487
5488 if (clone) {
5489 dp_packet_delete_batch(packets_, true);
5490 }
5491
623540e4
EJ
5492 ofpbuf_uninit(&actions);
5493 fat_rwlock_unlock(&dp->upcall_rwlock);
6b31e073 5494
ac8c2081
DDP
5495 return;
5496 }
09f9da0b 5497 break;
572f732a 5498
adcf00ba
AZ
5499 case OVS_ACTION_ATTR_RECIRC:
5500 if (*depth < MAX_RECIRC_DEPTH) {
1895cc8d 5501 struct dp_packet_batch recirc_pkts;
572f732a 5502
28e2fa02 5503 if (!may_steal) {
1895cc8d
PS
5504 dp_packet_batch_clone(&recirc_pkts, packets_);
5505 packets_ = &recirc_pkts;
28e2fa02 5506 }
8cbf4f47 5507
72c84bc2
AZ
5508 struct dp_packet *packet;
5509 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
5510 packet->md.recirc_id = nl_attr_get_u32(a);
8cbf4f47 5511 }
28e2fa02
DDP
5512
5513 (*depth)++;
1895cc8d 5514 dp_netdev_recirculate(pmd, packets_);
adcf00ba
AZ
5515 (*depth)--;
5516
ac8c2081 5517 return;
adcf00ba 5518 }
ac8c2081
DDP
5519
5520 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
572f732a 5521 break;
572f732a 5522
5cf3edb3
DDP
5523 case OVS_ACTION_ATTR_CT: {
5524 const struct nlattr *b;
a76a37ef 5525 bool force = false;
5cf3edb3
DDP
5526 bool commit = false;
5527 unsigned int left;
5528 uint16_t zone = 0;
5529 const char *helper = NULL;
5530 const uint32_t *setmark = NULL;
5531 const struct ovs_key_ct_labels *setlabel = NULL;
4cddb1f0
DB
5532 struct nat_action_info_t nat_action_info;
5533 struct nat_action_info_t *nat_action_info_ref = NULL;
5534 bool nat_config = false;
5cf3edb3
DDP
5535
5536 NL_ATTR_FOR_EACH_UNSAFE (b, left, nl_attr_get(a),
5537 nl_attr_get_size(a)) {
5538 enum ovs_ct_attr sub_type = nl_attr_type(b);
5539
5540 switch(sub_type) {
b80e259f 5541 case OVS_CT_ATTR_FORCE_COMMIT:
a76a37ef
JR
5542 force = true;
5543 /* fall through. */
5cf3edb3
DDP
5544 case OVS_CT_ATTR_COMMIT:
5545 commit = true;
5546 break;
5547 case OVS_CT_ATTR_ZONE:
5548 zone = nl_attr_get_u16(b);
5549 break;
5550 case OVS_CT_ATTR_HELPER:
5551 helper = nl_attr_get_string(b);
5552 break;
5553 case OVS_CT_ATTR_MARK:
5554 setmark = nl_attr_get(b);
5555 break;
5556 case OVS_CT_ATTR_LABELS:
5557 setlabel = nl_attr_get(b);
5558 break;
8e83854c
JR
5559 case OVS_CT_ATTR_EVENTMASK:
5560 /* Silently ignored, as userspace datapath does not generate
5561 * netlink events. */
5562 break;
4cddb1f0
DB
5563 case OVS_CT_ATTR_NAT: {
5564 const struct nlattr *b_nest;
5565 unsigned int left_nest;
5566 bool ip_min_specified = false;
5567 bool proto_num_min_specified = false;
5568 bool ip_max_specified = false;
5569 bool proto_num_max_specified = false;
5570 memset(&nat_action_info, 0, sizeof nat_action_info);
5571 nat_action_info_ref = &nat_action_info;
5572
5573 NL_NESTED_FOR_EACH_UNSAFE (b_nest, left_nest, b) {
5574 enum ovs_nat_attr sub_type_nest = nl_attr_type(b_nest);
5575
5576 switch (sub_type_nest) {
5577 case OVS_NAT_ATTR_SRC:
5578 case OVS_NAT_ATTR_DST:
5579 nat_config = true;
5580 nat_action_info.nat_action |=
5581 ((sub_type_nest == OVS_NAT_ATTR_SRC)
5582 ? NAT_ACTION_SRC : NAT_ACTION_DST);
5583 break;
5584 case OVS_NAT_ATTR_IP_MIN:
5585 memcpy(&nat_action_info.min_addr,
5586 nl_attr_get(b_nest),
5587 nl_attr_get_size(b_nest));
5588 ip_min_specified = true;
5589 break;
5590 case OVS_NAT_ATTR_IP_MAX:
5591 memcpy(&nat_action_info.max_addr,
5592 nl_attr_get(b_nest),
5593 nl_attr_get_size(b_nest));
5594 ip_max_specified = true;
5595 break;
5596 case OVS_NAT_ATTR_PROTO_MIN:
5597 nat_action_info.min_port =
5598 nl_attr_get_u16(b_nest);
5599 proto_num_min_specified = true;
5600 break;
5601 case OVS_NAT_ATTR_PROTO_MAX:
5602 nat_action_info.max_port =
5603 nl_attr_get_u16(b_nest);
5604 proto_num_max_specified = true;
5605 break;
5606 case OVS_NAT_ATTR_PERSISTENT:
5607 case OVS_NAT_ATTR_PROTO_HASH:
5608 case OVS_NAT_ATTR_PROTO_RANDOM:
5609 break;
5610 case OVS_NAT_ATTR_UNSPEC:
5611 case __OVS_NAT_ATTR_MAX:
5612 OVS_NOT_REACHED();
5613 }
5614 }
5615
5616 if (ip_min_specified && !ip_max_specified) {
5617 nat_action_info.max_addr = nat_action_info.min_addr;
5618 }
5619 if (proto_num_min_specified && !proto_num_max_specified) {
5620 nat_action_info.max_port = nat_action_info.min_port;
5621 }
5622 if (proto_num_min_specified || proto_num_max_specified) {
5623 if (nat_action_info.nat_action & NAT_ACTION_SRC) {
5624 nat_action_info.nat_action |= NAT_ACTION_SRC_PORT;
5625 } else if (nat_action_info.nat_action & NAT_ACTION_DST) {
5626 nat_action_info.nat_action |= NAT_ACTION_DST_PORT;
5627 }
5628 }
5629 break;
5630 }
5cf3edb3
DDP
5631 case OVS_CT_ATTR_UNSPEC:
5632 case __OVS_CT_ATTR_MAX:
5633 OVS_NOT_REACHED();
5634 }
5635 }
5636
4cddb1f0
DB
5637 /* We won't be able to function properly in this case, hence
5638 * complain loudly. */
5639 if (nat_config && !commit) {
5640 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
5641 VLOG_WARN_RL(&rl, "NAT specified without commit.");
5642 }
5643
a76a37ef 5644 conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type, force,
4cddb1f0 5645 commit, zone, setmark, setlabel, helper,
94053e66 5646 nat_action_info_ref, now);
07659514 5647 break;
5cf3edb3 5648 }
07659514 5649
5dddf960 5650 case OVS_ACTION_ATTR_METER:
4b27db64
JR
5651 dp_netdev_run_meter(pmd->dp, packets_, nl_attr_get_u32(a),
5652 time_msec());
5653 break;
5654
09f9da0b
JR
5655 case OVS_ACTION_ATTR_PUSH_VLAN:
5656 case OVS_ACTION_ATTR_POP_VLAN:
5657 case OVS_ACTION_ATTR_PUSH_MPLS:
5658 case OVS_ACTION_ATTR_POP_MPLS:
5659 case OVS_ACTION_ATTR_SET:
6d670e7f 5660 case OVS_ACTION_ATTR_SET_MASKED:
09f9da0b 5661 case OVS_ACTION_ATTR_SAMPLE:
53e1d6f1 5662 case OVS_ACTION_ATTR_HASH:
09f9da0b 5663 case OVS_ACTION_ATTR_UNSPEC:
aaca4fe0 5664 case OVS_ACTION_ATTR_TRUNC:
6fcecb85
YY
5665 case OVS_ACTION_ATTR_PUSH_ETH:
5666 case OVS_ACTION_ATTR_POP_ETH:
535e3acf 5667 case OVS_ACTION_ATTR_CLONE:
1fc11c59
JS
5668 case OVS_ACTION_ATTR_ENCAP_NSH:
5669 case OVS_ACTION_ATTR_DECAP_NSH:
09f9da0b
JR
5670 case __OVS_ACTION_ATTR_MAX:
5671 OVS_NOT_REACHED();
da546e07 5672 }
ac8c2081 5673
1895cc8d 5674 dp_packet_delete_batch(packets_, may_steal);
98403001
BP
5675}
5676
4edb9ae9 5677static void
65f13b50 5678dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5679 struct dp_packet_batch *packets,
66e4ad8a 5680 bool may_steal, const struct flow *flow,
324c8374
IM
5681 const struct nlattr *actions, size_t actions_len,
5682 long long now)
72865317 5683{
66e4ad8a 5684 struct dp_netdev_execute_aux aux = { pmd, now, flow };
9080a111 5685
1895cc8d 5686 odp_execute_actions(&aux, packets, may_steal, actions,
8cbf4f47 5687 actions_len, dp_execute_cb);
72865317
BP
5688}
5689
4d4e68ed
DDP
5690struct dp_netdev_ct_dump {
5691 struct ct_dpif_dump_state up;
5692 struct conntrack_dump dump;
5693 struct conntrack *ct;
5694 struct dp_netdev *dp;
5695};
5696
5697static int
5698dpif_netdev_ct_dump_start(struct dpif *dpif, struct ct_dpif_dump_state **dump_,
ded30c74 5699 const uint16_t *pzone, int *ptot_bkts)
4d4e68ed
DDP
5700{
5701 struct dp_netdev *dp = get_dp_netdev(dpif);
5702 struct dp_netdev_ct_dump *dump;
5703
5704 dump = xzalloc(sizeof *dump);
5705 dump->dp = dp;
5706 dump->ct = &dp->conntrack;
5707
ded30c74 5708 conntrack_dump_start(&dp->conntrack, &dump->dump, pzone, ptot_bkts);
4d4e68ed
DDP
5709
5710 *dump_ = &dump->up;
5711
5712 return 0;
5713}
5714
5715static int
5716dpif_netdev_ct_dump_next(struct dpif *dpif OVS_UNUSED,
5717 struct ct_dpif_dump_state *dump_,
5718 struct ct_dpif_entry *entry)
5719{
5720 struct dp_netdev_ct_dump *dump;
5721
5722 INIT_CONTAINER(dump, dump_, up);
5723
5724 return conntrack_dump_next(&dump->dump, entry);
5725}
5726
5727static int
5728dpif_netdev_ct_dump_done(struct dpif *dpif OVS_UNUSED,
5729 struct ct_dpif_dump_state *dump_)
5730{
5731 struct dp_netdev_ct_dump *dump;
5732 int err;
5733
5734 INIT_CONTAINER(dump, dump_, up);
5735
5736 err = conntrack_dump_done(&dump->dump);
5737
5738 free(dump);
5739
5740 return err;
5741}
5742
5d9cbb4c
DDP
5743static int
5744dpif_netdev_ct_flush(struct dpif *dpif, const uint16_t *zone)
5745{
5746 struct dp_netdev *dp = get_dp_netdev(dpif);
5747
5748 return conntrack_flush(&dp->conntrack, zone);
5749}
5750
72865317 5751const struct dpif_class dpif_netdev_class = {
72865317 5752 "netdev",
6553d06b 5753 dpif_netdev_init,
2197d7ab 5754 dpif_netdev_enumerate,
0aeaabc8 5755 dpif_netdev_port_open_type,
72865317
BP
5756 dpif_netdev_open,
5757 dpif_netdev_close,
7dab847a 5758 dpif_netdev_destroy,
e4cfed38
PS
5759 dpif_netdev_run,
5760 dpif_netdev_wait,
72865317 5761 dpif_netdev_get_stats,
72865317
BP
5762 dpif_netdev_port_add,
5763 dpif_netdev_port_del,
3eb67853 5764 dpif_netdev_port_set_config,
72865317
BP
5765 dpif_netdev_port_query_by_number,
5766 dpif_netdev_port_query_by_name,
98403001 5767 NULL, /* port_get_pid */
b0ec0f27
BP
5768 dpif_netdev_port_dump_start,
5769 dpif_netdev_port_dump_next,
5770 dpif_netdev_port_dump_done,
72865317
BP
5771 dpif_netdev_port_poll,
5772 dpif_netdev_port_poll_wait,
72865317 5773 dpif_netdev_flow_flush,
ac64794a
BP
5774 dpif_netdev_flow_dump_create,
5775 dpif_netdev_flow_dump_destroy,
5776 dpif_netdev_flow_dump_thread_create,
5777 dpif_netdev_flow_dump_thread_destroy,
704a1e09 5778 dpif_netdev_flow_dump_next,
1a0c894a 5779 dpif_netdev_operate,
6b31e073
RW
5780 NULL, /* recv_set */
5781 NULL, /* handlers_set */
d4f6865c 5782 dpif_netdev_set_config,
5bf93d67 5783 dpif_netdev_queue_to_priority,
6b31e073
RW
5784 NULL, /* recv */
5785 NULL, /* recv_wait */
5786 NULL, /* recv_purge */
e4e74c3a 5787 dpif_netdev_register_dp_purge_cb,
6b31e073
RW
5788 dpif_netdev_register_upcall_cb,
5789 dpif_netdev_enable_upcall,
5790 dpif_netdev_disable_upcall,
b5cbbcf6 5791 dpif_netdev_get_datapath_version,
4d4e68ed
DDP
5792 dpif_netdev_ct_dump_start,
5793 dpif_netdev_ct_dump_next,
5794 dpif_netdev_ct_dump_done,
5d9cbb4c 5795 dpif_netdev_ct_flush,
5dddf960
JR
5796 dpif_netdev_meter_get_features,
5797 dpif_netdev_meter_set,
5798 dpif_netdev_meter_get,
5799 dpif_netdev_meter_del,
72865317 5800};
614c4892 5801
74cc3969
BP
5802static void
5803dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
5804 const char *argv[], void *aux OVS_UNUSED)
5805{
e9985d6a 5806 struct dp_netdev_port *port;
74cc3969 5807 struct dp_netdev *dp;
ff073a71 5808 odp_port_t port_no;
74cc3969 5809
8a4e3a85 5810 ovs_mutex_lock(&dp_netdev_mutex);
74cc3969
BP
5811 dp = shash_find_data(&dp_netdevs, argv[1]);
5812 if (!dp || !dpif_netdev_class_is_dummy(dp->class)) {
8a4e3a85 5813 ovs_mutex_unlock(&dp_netdev_mutex);
74cc3969
BP
5814 unixctl_command_reply_error(conn, "unknown datapath or not a dummy");
5815 return;
5816 }
8a4e3a85
BP
5817 ovs_refcount_ref(&dp->ref_cnt);
5818 ovs_mutex_unlock(&dp_netdev_mutex);
74cc3969 5819
59e6d833 5820 ovs_mutex_lock(&dp->port_mutex);
e9985d6a 5821 if (get_port_by_name(dp, argv[2], &port)) {
74cc3969 5822 unixctl_command_reply_error(conn, "unknown port");
8a4e3a85 5823 goto exit;
74cc3969
BP
5824 }
5825
ff073a71
BP
5826 port_no = u32_to_odp(atoi(argv[3]));
5827 if (!port_no || port_no == ODPP_NONE) {
74cc3969 5828 unixctl_command_reply_error(conn, "bad port number");
8a4e3a85 5829 goto exit;
74cc3969 5830 }
ff073a71 5831 if (dp_netdev_lookup_port(dp, port_no)) {
74cc3969 5832 unixctl_command_reply_error(conn, "port number already in use");
8a4e3a85 5833 goto exit;
74cc3969 5834 }
59e6d833 5835
e9985d6a
DDP
5836 /* Remove port. */
5837 hmap_remove(&dp->ports, &port->node);
e32971b8 5838 reconfigure_datapath(dp);
59e6d833 5839
e9985d6a
DDP
5840 /* Reinsert with new port number. */
5841 port->port_no = port_no;
5842 hmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
e32971b8 5843 reconfigure_datapath(dp);
59e6d833 5844
d33ed218 5845 seq_change(dp->port_seq);
74cc3969 5846 unixctl_command_reply(conn, NULL);
8a4e3a85
BP
5847
5848exit:
59e6d833 5849 ovs_mutex_unlock(&dp->port_mutex);
8a4e3a85 5850 dp_netdev_unref(dp);
74cc3969
BP
5851}
5852
0cbfe35d
BP
5853static void
5854dpif_dummy_register__(const char *type)
5855{
5856 struct dpif_class *class;
5857
5858 class = xmalloc(sizeof *class);
5859 *class = dpif_netdev_class;
5860 class->type = xstrdup(type);
5861 dp_register_provider(class);
5862}
5863
8420c7ad
BP
5864static void
5865dpif_dummy_override(const char *type)
5866{
65d43fdc
YT
5867 int error;
5868
5869 /*
5870 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
5871 * a userland-only build. It's useful for testsuite.
5872 */
5873 error = dp_unregister_provider(type);
5874 if (error == 0 || error == EAFNOSUPPORT) {
8420c7ad
BP
5875 dpif_dummy_register__(type);
5876 }
5877}
5878
614c4892 5879void
8420c7ad 5880dpif_dummy_register(enum dummy_level level)
614c4892 5881{
8420c7ad 5882 if (level == DUMMY_OVERRIDE_ALL) {
0cbfe35d
BP
5883 struct sset types;
5884 const char *type;
5885
5886 sset_init(&types);
5887 dp_enumerate_types(&types);
5888 SSET_FOR_EACH (type, &types) {
8420c7ad 5889 dpif_dummy_override(type);
0cbfe35d
BP
5890 }
5891 sset_destroy(&types);
8420c7ad
BP
5892 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
5893 dpif_dummy_override("system");
614c4892 5894 }
0cbfe35d
BP
5895
5896 dpif_dummy_register__("dummy");
74cc3969
BP
5897
5898 unixctl_command_register("dpif-dummy/change-port-number",
74467d5c 5899 "dp port new-number",
74cc3969 5900 3, 3, dpif_dummy_change_port_number, NULL);
614c4892 5901}
0de8783a
JR
5902\f
5903/* Datapath Classifier. */
5904
5905/* A set of rules that all have the same fields wildcarded. */
5906struct dpcls_subtable {
5907 /* The fields are only used by writers. */
5908 struct cmap_node cmap_node OVS_GUARDED; /* Within dpcls 'subtables_map'. */
5909
5910 /* These fields are accessed by readers. */
5911 struct cmap rules; /* Contains "struct dpcls_rule"s. */
3453b4d6
JS
5912 uint32_t hit_cnt; /* Number of match hits in subtable in current
5913 optimization interval. */
0de8783a
JR
5914 struct netdev_flow_key mask; /* Wildcards for fields (const). */
5915 /* 'mask' must be the last field, additional space is allocated here. */
5916};
5917
5918/* Initializes 'cls' as a classifier that initially contains no classification
5919 * rules. */
5920static void
5921dpcls_init(struct dpcls *cls)
5922{
5923 cmap_init(&cls->subtables_map);
da9cfca6 5924 pvector_init(&cls->subtables);
0de8783a
JR
5925}
5926
5927static void
5928dpcls_destroy_subtable(struct dpcls *cls, struct dpcls_subtable *subtable)
5929{
3453b4d6 5930 VLOG_DBG("Destroying subtable %p for in_port %d", subtable, cls->in_port);
da9cfca6 5931 pvector_remove(&cls->subtables, subtable);
0de8783a
JR
5932 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
5933 subtable->mask.hash);
5934 cmap_destroy(&subtable->rules);
5935 ovsrcu_postpone(free, subtable);
5936}
5937
5938/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
5939 * caller's responsibility.
5940 * May only be called after all the readers have been terminated. */
5941static void
5942dpcls_destroy(struct dpcls *cls)
5943{
5944 if (cls) {
5945 struct dpcls_subtable *subtable;
5946
5947 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
361d808d 5948 ovs_assert(cmap_count(&subtable->rules) == 0);
0de8783a
JR
5949 dpcls_destroy_subtable(cls, subtable);
5950 }
5951 cmap_destroy(&cls->subtables_map);
da9cfca6 5952 pvector_destroy(&cls->subtables);
0de8783a
JR
5953 }
5954}
5955
5956static struct dpcls_subtable *
5957dpcls_create_subtable(struct dpcls *cls, const struct netdev_flow_key *mask)
5958{
5959 struct dpcls_subtable *subtable;
5960
5961 /* Need to add one. */
caeb4906
JR
5962 subtable = xmalloc(sizeof *subtable
5963 - sizeof subtable->mask.mf + mask->len);
0de8783a 5964 cmap_init(&subtable->rules);
3453b4d6 5965 subtable->hit_cnt = 0;
0de8783a
JR
5966 netdev_flow_key_clone(&subtable->mask, mask);
5967 cmap_insert(&cls->subtables_map, &subtable->cmap_node, mask->hash);
3453b4d6 5968 /* Add the new subtable at the end of the pvector (with no hits yet) */
da9cfca6 5969 pvector_insert(&cls->subtables, subtable, 0);
84dbfb2b 5970 VLOG_DBG("Creating %"PRIuSIZE". subtable %p for in_port %d",
3453b4d6 5971 cmap_count(&cls->subtables_map), subtable, cls->in_port);
da9cfca6 5972 pvector_publish(&cls->subtables);
0de8783a
JR
5973
5974 return subtable;
5975}
5976
5977static inline struct dpcls_subtable *
5978dpcls_find_subtable(struct dpcls *cls, const struct netdev_flow_key *mask)
5979{
5980 struct dpcls_subtable *subtable;
5981
5982 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, mask->hash,
5983 &cls->subtables_map) {
5984 if (netdev_flow_key_equal(&subtable->mask, mask)) {
5985 return subtable;
5986 }
5987 }
5988 return dpcls_create_subtable(cls, mask);
5989}
5990
3453b4d6
JS
5991
5992/* Periodically sort the dpcls subtable vectors according to hit counts */
5993static void
5994dpcls_sort_subtable_vector(struct dpcls *cls)
5995{
5996 struct pvector *pvec = &cls->subtables;
5997 struct dpcls_subtable *subtable;
5998
5999 PVECTOR_FOR_EACH (subtable, pvec) {
6000 pvector_change_priority(pvec, subtable, subtable->hit_cnt);
6001 subtable->hit_cnt = 0;
6002 }
6003 pvector_publish(pvec);
6004}
6005
6006static inline void
4809891b
KT
6007dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
6008 struct polled_queue *poll_list, int poll_cnt)
3453b4d6
JS
6009{
6010 struct dpcls *cls;
6011 long long int now = time_msec();
6012
4809891b
KT
6013 if (now > pmd->rxq_interval) {
6014 /* Get the cycles that were used to process each queue and store. */
6015 for (unsigned i = 0; i < poll_cnt; i++) {
6016 uint64_t rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
6017 RXQ_CYCLES_PROC_CURR);
6018 dp_netdev_rxq_set_intrvl_cycles(poll_list[i].rxq, rxq_cyc_curr);
6019 dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR,
6020 0);
6021 }
6022 /* Start new measuring interval */
6023 pmd->rxq_interval = now + PMD_RXQ_INTERVAL_LEN;
6024 }
6025
3453b4d6
JS
6026 if (now > pmd->next_optimization) {
6027 /* Try to obtain the flow lock to block out revalidator threads.
6028 * If not possible, just try next time. */
6029 if (!ovs_mutex_trylock(&pmd->flow_mutex)) {
6030 /* Optimize each classifier */
6031 CMAP_FOR_EACH (cls, node, &pmd->classifiers) {
6032 dpcls_sort_subtable_vector(cls);
6033 }
6034 ovs_mutex_unlock(&pmd->flow_mutex);
6035 /* Start new measuring interval */
6036 pmd->next_optimization = now + DPCLS_OPTIMIZATION_INTERVAL;
6037 }
6038 }
6039}
6040
0de8783a
JR
6041/* Insert 'rule' into 'cls'. */
6042static void
6043dpcls_insert(struct dpcls *cls, struct dpcls_rule *rule,
6044 const struct netdev_flow_key *mask)
6045{
6046 struct dpcls_subtable *subtable = dpcls_find_subtable(cls, mask);
6047
3453b4d6 6048 /* Refer to subtable's mask, also for later removal. */
0de8783a
JR
6049 rule->mask = &subtable->mask;
6050 cmap_insert(&subtable->rules, &rule->cmap_node, rule->flow.hash);
6051}
6052
6053/* Removes 'rule' from 'cls', also destructing the 'rule'. */
6054static void
6055dpcls_remove(struct dpcls *cls, struct dpcls_rule *rule)
6056{
6057 struct dpcls_subtable *subtable;
6058
6059 ovs_assert(rule->mask);
6060
3453b4d6 6061 /* Get subtable from reference in rule->mask. */
0de8783a 6062 INIT_CONTAINER(subtable, rule->mask, mask);
0de8783a
JR
6063 if (cmap_remove(&subtable->rules, &rule->cmap_node, rule->flow.hash)
6064 == 0) {
3453b4d6 6065 /* Delete empty subtable. */
0de8783a 6066 dpcls_destroy_subtable(cls, subtable);
da9cfca6 6067 pvector_publish(&cls->subtables);
0de8783a
JR
6068 }
6069}
6070
361d808d
JR
6071/* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
6072 * in 'mask' the values in 'key' and 'target' are the same. */
0de8783a
JR
6073static inline bool
6074dpcls_rule_matches_key(const struct dpcls_rule *rule,
6075 const struct netdev_flow_key *target)
6076{
09b0fa9c
JR
6077 const uint64_t *keyp = miniflow_get_values(&rule->flow.mf);
6078 const uint64_t *maskp = miniflow_get_values(&rule->mask->mf);
5fcff47b 6079 uint64_t value;
0de8783a 6080
5fcff47b
JR
6081 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) {
6082 if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) {
0de8783a
JR
6083 return false;
6084 }
6085 }
6086 return true;
6087}
6088
5b1c9c78
FA
6089/* For each miniflow in 'keys' performs a classifier lookup writing the result
6090 * into the corresponding slot in 'rules'. If a particular entry in 'keys' is
0de8783a
JR
6091 * NULL it is skipped.
6092 *
6093 * This function is optimized for use in the userspace datapath and therefore
6094 * does not implement a lot of features available in the standard
6095 * classifier_lookup() function. Specifically, it does not implement
6096 * priorities, instead returning any rule which matches the flow.
6097 *
5b1c9c78 6098 * Returns true if all miniflows found a corresponding rule. */
0de8783a 6099static bool
3453b4d6
JS
6100dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[],
6101 struct dpcls_rule **rules, const size_t cnt,
6102 int *num_lookups_p)
0de8783a 6103{
5b1c9c78 6104 /* The received 'cnt' miniflows are the search-keys that will be processed
63906f18
BB
6105 * to find a matching entry into the available subtables.
6106 * The number of bits in map_type is equal to NETDEV_MAX_BURST. */
6107 typedef uint32_t map_type;
0de8783a 6108#define MAP_BITS (sizeof(map_type) * CHAR_BIT)
63906f18 6109 BUILD_ASSERT_DECL(MAP_BITS >= NETDEV_MAX_BURST);
0de8783a 6110
0de8783a
JR
6111 struct dpcls_subtable *subtable;
6112
63906f18
BB
6113 map_type keys_map = TYPE_MAXIMUM(map_type); /* Set all bits. */
6114 map_type found_map;
6115 uint32_t hashes[MAP_BITS];
6116 const struct cmap_node *nodes[MAP_BITS];
6117
6118 if (cnt != MAP_BITS) {
6119 keys_map >>= MAP_BITS - cnt; /* Clear extra bits. */
0de8783a
JR
6120 }
6121 memset(rules, 0, cnt * sizeof *rules);
6122
3453b4d6
JS
6123 int lookups_match = 0, subtable_pos = 1;
6124
5b1c9c78
FA
6125 /* The Datapath classifier - aka dpcls - is composed of subtables.
6126 * Subtables are dynamically created as needed when new rules are inserted.
6127 * Each subtable collects rules with matches on a specific subset of packet
6128 * fields as defined by the subtable's mask. We proceed to process every
6129 * search-key against each subtable, but when a match is found for a
6130 * search-key, the search for that key can stop because the rules are
6131 * non-overlapping. */
da9cfca6 6132 PVECTOR_FOR_EACH (subtable, &cls->subtables) {
63906f18
BB
6133 int i;
6134
6135 /* Compute hashes for the remaining keys. Each search-key is
6136 * masked with the subtable's mask to avoid hashing the wildcarded
6137 * bits. */
6138 ULLONG_FOR_EACH_1(i, keys_map) {
6139 hashes[i] = netdev_flow_key_hash_in_mask(&keys[i],
6140 &subtable->mask);
6141 }
6142 /* Lookup. */
6143 found_map = cmap_find_batch(&subtable->rules, keys_map, hashes, nodes);
6144 /* Check results. When the i-th bit of found_map is set, it means
6145 * that a set of nodes with a matching hash value was found for the
6146 * i-th search-key. Due to possible hash collisions we need to check
6147 * which of the found rules, if any, really matches our masked
6148 * search-key. */
6149 ULLONG_FOR_EACH_1(i, found_map) {
6150 struct dpcls_rule *rule;
6151
6152 CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) {
6153 if (OVS_LIKELY(dpcls_rule_matches_key(rule, &keys[i]))) {
6154 rules[i] = rule;
6155 /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap
6156 * within one second optimization interval. */
6157 subtable->hit_cnt++;
6158 lookups_match += subtable_pos;
6159 goto next;
0de8783a 6160 }
0de8783a 6161 }
63906f18
BB
6162 /* None of the found rules was a match. Reset the i-th bit to
6163 * keep searching this key in the next subtable. */
6164 ULLONG_SET0(found_map, i); /* Did not match. */
6165 next:
6166 ; /* Keep Sparse happy. */
0de8783a 6167 }
63906f18
BB
6168 keys_map &= ~found_map; /* Clear the found rules. */
6169 if (!keys_map) {
3453b4d6
JS
6170 if (num_lookups_p) {
6171 *num_lookups_p = lookups_match;
6172 }
0de8783a
JR
6173 return true; /* All found. */
6174 }
3453b4d6
JS
6175 subtable_pos++;
6176 }
6177 if (num_lookups_p) {
6178 *num_lookups_p = lookups_match;
0de8783a
JR
6179 }
6180 return false; /* Some misses. */
6181}