]> git.proxmox.com Git - mirror_ovs.git/blame - lib/dpif-netdev.c
dpctl: Fix comment describing get_one_dp().
[mirror_ovs.git] / lib / dpif-netdev.c
CommitLineData
72865317 1/*
f582b6df 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
72865317
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
db73f716 18#include "dpif-netdev.h"
72865317 19
72865317
BP
20#include <ctype.h>
21#include <errno.h>
22#include <fcntl.h>
23#include <inttypes.h>
7f3adc00 24#include <net/if.h>
7daedce4 25#include <netinet/in.h>
cdee00fd 26#include <stdint.h>
72865317
BP
27#include <stdlib.h>
28#include <string.h>
29#include <sys/ioctl.h>
7daedce4 30#include <sys/socket.h>
72865317 31#include <sys/stat.h>
72865317
BP
32#include <unistd.h>
33
01961bbd
DDP
34#ifdef DPDK_NETDEV
35#include <rte_cycles.h>
36#endif
37
9f861c91 38#include "bitmap.h"
59e6d833 39#include "cmap.h"
5cf3edb3 40#include "conntrack.h"
7daedce4 41#include "coverage.h"
4d4e68ed 42#include "ct-dpif.h"
72865317 43#include "csum.h"
e14deea0 44#include "dp-packet.h"
614c4892 45#include "dpif.h"
72865317 46#include "dpif-provider.h"
614c4892 47#include "dummy.h"
afae68b1 48#include "fat-rwlock.h"
72865317 49#include "flow.h"
762d146a 50#include "hmapx.h"
140dd699 51#include "id-pool.h"
6c3eee82 52#include "latch.h"
72865317 53#include "netdev.h"
de281153 54#include "netdev-vport.h"
cdee00fd 55#include "netlink.h"
f094af7b 56#include "odp-execute.h"
72865317 57#include "odp-util.h"
25d436fb
BW
58#include "openvswitch/dynamic-string.h"
59#include "openvswitch/list.h"
60#include "openvswitch/match.h"
61#include "openvswitch/ofp-print.h"
3eb67853 62#include "openvswitch/ofp-util.h"
64c96779 63#include "openvswitch/ofpbuf.h"
3eb67853 64#include "openvswitch/shash.h"
25d436fb 65#include "openvswitch/vlog.h"
5a034064 66#include "ovs-numa.h"
61e7deb1 67#include "ovs-rcu.h"
72865317 68#include "packets.h"
fd016ae3 69#include "openvswitch/poll-loop.h"
0de8783a 70#include "pvector.h"
26c6b6cd 71#include "random.h"
d33ed218 72#include "seq.h"
3eb67853 73#include "smap.h"
0cbfe35d 74#include "sset.h"
72865317 75#include "timeval.h"
53902038 76#include "tnl-neigh-cache.h"
7f9b8504 77#include "tnl-ports.h"
74cc3969 78#include "unixctl.h"
72865317 79#include "util.h"
7daedce4 80
d98e6007 81VLOG_DEFINE_THIS_MODULE(dpif_netdev);
72865317 82
8bb113da 83#define FLOW_DUMP_MAX_BATCH 50
adcf00ba 84/* Use per thread recirc_depth to prevent recirculation loop. */
3f9d3836 85#define MAX_RECIRC_DEPTH 6
adcf00ba 86DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
e4cfed38 87
72865317 88/* Configuration parameters. */
72865317 89enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
4b27db64
JR
90enum { MAX_METERS = 65536 }; /* Maximum number of meters. */
91enum { MAX_BANDS = 8 }; /* Maximum number of bands / meter. */
92enum { N_METER_LOCKS = 64 }; /* Maximum number of meters. */
72865317 93
8a4e3a85
BP
94/* Protects against changes to 'dp_netdevs'. */
95static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
96
97/* Contains all 'struct dp_netdev's. */
98static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex)
99 = SHASH_INITIALIZER(&dp_netdevs);
100
623540e4 101static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600);
6b31e073 102
5cf3edb3 103#define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
4cddb1f0
DB
104 | CS_INVALID | CS_REPLY_DIR | CS_TRACKED \
105 | CS_SRC_NAT | CS_DST_NAT)
5cf3edb3
DDP
106#define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
107
2494ccd7 108static struct odp_support dp_netdev_support = {
f0fb825a 109 .max_vlan_headers = SIZE_MAX,
2494ccd7
JS
110 .max_mpls_depth = SIZE_MAX,
111 .recirc = true,
5cf3edb3
DDP
112 .ct_state = true,
113 .ct_zone = true,
114 .ct_mark = true,
115 .ct_label = true,
2575df07
JP
116 .ct_state_nat = true,
117 .ct_orig_tuple = true,
118 .ct_orig_tuple6 = true,
2494ccd7
JS
119};
120
79df317f 121/* Stores a miniflow with inline values */
9bbf1c3d 122
9bbf1c3d 123struct netdev_flow_key {
caeb4906
JR
124 uint32_t hash; /* Hash function differs for different users. */
125 uint32_t len; /* Length of the following miniflow (incl. map). */
0de8783a 126 struct miniflow mf;
8fd47924 127 uint64_t buf[FLOW_MAX_PACKET_U64S];
9bbf1c3d
DDP
128};
129
130/* Exact match cache for frequently used flows
131 *
132 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
133 * search its entries for a miniflow that matches exactly the miniflow of the
0de8783a 134 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
9bbf1c3d
DDP
135 *
136 * A cache entry holds a reference to its 'dp_netdev_flow'.
137 *
138 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
139 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
140 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
141 * value is the index of a cache entry where the miniflow could be.
142 *
143 *
144 * Thread-safety
145 * =============
146 *
147 * Each pmd_thread has its own private exact match cache.
148 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
149 */
150
fc82e877 151#define EM_FLOW_HASH_SHIFT 13
9bbf1c3d
DDP
152#define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
153#define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
154#define EM_FLOW_HASH_SEGS 2
155
4c30b246
CL
156/* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */
157#define DEFAULT_EM_FLOW_INSERT_INV_PROB 100
158#define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
159 DEFAULT_EM_FLOW_INSERT_INV_PROB)
160
9bbf1c3d 161struct emc_entry {
9bbf1c3d 162 struct dp_netdev_flow *flow;
0de8783a 163 struct netdev_flow_key key; /* key.hash used for emc hash value. */
9bbf1c3d
DDP
164};
165
166struct emc_cache {
167 struct emc_entry entries[EM_FLOW_HASH_ENTRIES];
67ad54cb 168 int sweep_idx; /* For emc_cache_slow_sweep(). */
9bbf1c3d
DDP
169};
170
171/* Iterate in the exact match cache through every entry that might contain a
172 * miniflow with hash 'HASH'. */
173#define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
174 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
175 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
176 i__ < EM_FLOW_HASH_SEGS; \
177 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
0de8783a
JR
178\f
179/* Simple non-wildcarding single-priority classifier. */
180
3453b4d6
JS
181/* Time in ms between successive optimizations of the dpcls subtable vector */
182#define DPCLS_OPTIMIZATION_INTERVAL 1000
183
4809891b
KT
184/* Time in ms of the interval in which rxq processing cycles used in
185 * rxq to pmd assignments is measured and stored. */
186#define PMD_RXQ_INTERVAL_LEN 10000
187
c59e759f
KT
188/* Number of intervals for which cycles are stored
189 * and used during rxq to pmd assignment. */
190#define PMD_RXQ_INTERVAL_MAX 6
191
0de8783a 192struct dpcls {
3453b4d6
JS
193 struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */
194 odp_port_t in_port;
0de8783a 195 struct cmap subtables_map;
da9cfca6 196 struct pvector subtables;
0de8783a 197};
9bbf1c3d 198
0de8783a
JR
199/* A rule to be inserted to the classifier. */
200struct dpcls_rule {
201 struct cmap_node cmap_node; /* Within struct dpcls_subtable 'rules'. */
202 struct netdev_flow_key *mask; /* Subtable's mask. */
203 struct netdev_flow_key flow; /* Matching key. */
204 /* 'flow' must be the last field, additional space is allocated here. */
205};
206
207static void dpcls_init(struct dpcls *);
208static void dpcls_destroy(struct dpcls *);
3453b4d6 209static void dpcls_sort_subtable_vector(struct dpcls *);
0de8783a
JR
210static void dpcls_insert(struct dpcls *, struct dpcls_rule *,
211 const struct netdev_flow_key *mask);
212static void dpcls_remove(struct dpcls *, struct dpcls_rule *);
3453b4d6 213static bool dpcls_lookup(struct dpcls *cls,
0de8783a 214 const struct netdev_flow_key keys[],
3453b4d6
JS
215 struct dpcls_rule **rules, size_t cnt,
216 int *num_lookups_p);
0de8783a 217\f
4b27db64
JR
218/* Set of supported meter flags */
219#define DP_SUPPORTED_METER_FLAGS_MASK \
220 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
221
222/* Set of supported meter band types */
223#define DP_SUPPORTED_METER_BAND_TYPES \
224 ( 1 << OFPMBT13_DROP )
225
226struct dp_meter_band {
227 struct ofputil_meter_band up; /* type, prec_level, pad, rate, burst_size */
228 uint32_t bucket; /* In 1/1000 packets (for PKTPS), or in bits (for KBPS) */
229 uint64_t packet_count;
230 uint64_t byte_count;
231};
232
233struct dp_meter {
234 uint16_t flags;
235 uint16_t n_bands;
236 uint32_t max_delta_t;
237 uint64_t used;
238 uint64_t packet_count;
239 uint64_t byte_count;
240 struct dp_meter_band bands[];
241};
242
8a4e3a85
BP
243/* Datapath based on the network device interface from netdev.h.
244 *
245 *
246 * Thread-safety
247 * =============
248 *
249 * Some members, marked 'const', are immutable. Accessing other members
250 * requires synchronization, as noted in more detail below.
251 *
252 * Acquisition order is, from outermost to innermost:
253 *
254 * dp_netdev_mutex (global)
59e6d833 255 * port_mutex
d0cca6c3 256 * non_pmd_mutex
8a4e3a85 257 */
72865317 258struct dp_netdev {
8a4e3a85
BP
259 const struct dpif_class *const class;
260 const char *const name;
6b31e073 261 struct dpif *dpif;
6a8267c5
BP
262 struct ovs_refcount ref_cnt;
263 atomic_flag destroyed;
72865317 264
8a4e3a85
BP
265 /* Ports.
266 *
e9985d6a
DDP
267 * Any lookup into 'ports' or any access to the dp_netdev_ports found
268 * through 'ports' requires taking 'port_mutex'. */
59e6d833 269 struct ovs_mutex port_mutex;
e9985d6a 270 struct hmap ports;
d33ed218 271 struct seq *port_seq; /* Incremented whenever a port changes. */
6c3eee82 272
4b27db64
JR
273 /* Meters. */
274 struct ovs_mutex meter_locks[N_METER_LOCKS];
275 struct dp_meter *meters[MAX_METERS]; /* Meter bands. */
4b27db64 276
65dcf3da
BB
277 /* Probability of EMC insertions is a factor of 'emc_insert_min'.*/
278 OVS_ALIGNED_VAR(CACHE_LINE_SIZE) atomic_uint32_t emc_insert_min;
279
6b31e073
RW
280 /* Protects access to ofproto-dpif-upcall interface during revalidator
281 * thread synchronization. */
282 struct fat_rwlock upcall_rwlock;
623540e4
EJ
283 upcall_callback *upcall_cb; /* Callback function for executing upcalls. */
284 void *upcall_aux;
6b31e073 285
e4e74c3a
AW
286 /* Callback function for notifying the purging of dp flows (during
287 * reseting pmd deletion). */
288 dp_purge_callback *dp_purge_cb;
289 void *dp_purge_aux;
290
65f13b50
AW
291 /* Stores all 'struct dp_netdev_pmd_thread's. */
292 struct cmap poll_threads;
140dd699
IM
293 /* id pool for per thread static_tx_qid. */
294 struct id_pool *tx_qid_pool;
295 struct ovs_mutex tx_qid_pool_mutex;
65f13b50
AW
296
297 /* Protects the access of the 'struct dp_netdev_pmd_thread'
298 * instance for non-pmd thread. */
299 struct ovs_mutex non_pmd_mutex;
300
301 /* Each pmd thread will store its pointer to
302 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
303 ovsthread_key_t per_pmd_key;
f2eee189 304
a6a426d6
IM
305 struct seq *reconfigure_seq;
306 uint64_t last_reconfigure_seq;
307
a14b8947 308 /* Cpu mask for pin of pmd threads. */
f2eee189 309 char *pmd_cmask;
6e3c6fa4 310
a36de779 311 uint64_t last_tnl_conf_seq;
5cf3edb3
DDP
312
313 struct conntrack conntrack;
72865317
BP
314};
315
4b27db64
JR
316static void meter_lock(const struct dp_netdev *dp, uint32_t meter_id)
317 OVS_ACQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS])
318{
319 ovs_mutex_lock(&dp->meter_locks[meter_id % N_METER_LOCKS]);
320}
321
322static void meter_unlock(const struct dp_netdev *dp, uint32_t meter_id)
323 OVS_RELEASES(dp->meter_locks[meter_id % N_METER_LOCKS])
324{
325 ovs_mutex_unlock(&dp->meter_locks[meter_id % N_METER_LOCKS]);
326}
327
328
8a4e3a85 329static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
e9985d6a
DDP
330 odp_port_t)
331 OVS_REQUIRES(dp->port_mutex);
ff073a71 332
51852a57 333enum dp_stat_type {
abcf3ef4
DDP
334 DP_STAT_EXACT_HIT, /* Packets that had an exact match (emc). */
335 DP_STAT_MASKED_HIT, /* Packets that matched in the flow table. */
51852a57
BP
336 DP_STAT_MISS, /* Packets that did not match. */
337 DP_STAT_LOST, /* Packets not passed up to the client. */
3453b4d6
JS
338 DP_STAT_LOOKUP_HIT, /* Number of subtable lookups for flow table
339 hits */
51852a57
BP
340 DP_N_STATS
341};
342
55e3ca97 343enum pmd_cycles_counter_type {
a2ac666d
CL
344 PMD_CYCLES_IDLE, /* Cycles spent idle or unsuccessful polling */
345 PMD_CYCLES_PROCESSING, /* Cycles spent successfully polling and
346 * processing polled packets */
55e3ca97
DDP
347 PMD_N_CYCLES
348};
349
c59e759f
KT
350enum rxq_cycles_counter_type {
351 RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and
352 processing packets during the current
353 interval. */
354 RXQ_CYCLES_PROC_HIST, /* Total cycles of all intervals that are used
355 during rxq to pmd assignment. */
356 RXQ_N_CYCLES
357};
358
324c8374
IM
359#define XPS_TIMEOUT_MS 500LL
360
3eb67853
IM
361/* Contained by struct dp_netdev_port's 'rxqs' member. */
362struct dp_netdev_rxq {
947dc567
DDP
363 struct dp_netdev_port *port;
364 struct netdev_rxq *rx;
365 unsigned core_id; /* Core to which this queue should be
366 pinned. OVS_CORE_UNSPEC if the
367 queue doesn't need to be pinned to a
368 particular core. */
ee42dd70 369 unsigned intrvl_idx; /* Write index for 'cycles_intrvl'. */
47a45d86 370 struct dp_netdev_pmd_thread *pmd; /* pmd thread that polls this queue. */
c59e759f
KT
371
372 /* Counters of cycles spent successfully polling and processing pkts. */
373 atomic_ullong cycles[RXQ_N_CYCLES];
374 /* We store PMD_RXQ_INTERVAL_MAX intervals of data for an rxq and then
375 sum them to yield the cycles used for an rxq. */
376 atomic_ullong cycles_intrvl[PMD_RXQ_INTERVAL_MAX];
3eb67853
IM
377};
378
72865317
BP
379/* A port in a netdev-based datapath. */
380struct dp_netdev_port {
35303d71 381 odp_port_t port_no;
ca62bb16
BB
382 bool dynamic_txqs; /* If true XPS will be used. */
383 bool need_reconfigure; /* True if we should reconfigure netdev. */
72865317 384 struct netdev *netdev;
e9985d6a 385 struct hmap_node node; /* Node in dp_netdev's 'ports'. */
4b609110 386 struct netdev_saved_flags *sf;
3eb67853 387 struct dp_netdev_rxq *rxqs;
85a4f238 388 unsigned n_rxq; /* Number of elements in 'rxqs' */
47a45d86 389 unsigned *txq_used; /* Number of threads that use each tx queue. */
324c8374 390 struct ovs_mutex txq_used_mutex;
0cbfe35d 391 char *type; /* Port type as requested by user. */
3eb67853 392 char *rxq_affinity_list; /* Requested affinity of rx queues. */
72865317
BP
393};
394
1c1e46ed
AW
395/* Contained by struct dp_netdev_flow's 'stats' member. */
396struct dp_netdev_flow_stats {
eb94da30
DDP
397 atomic_llong used; /* Last used time, in monotonic msecs. */
398 atomic_ullong packet_count; /* Number of packets matched. */
399 atomic_ullong byte_count; /* Number of bytes matched. */
400 atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
1c1e46ed
AW
401};
402
403/* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
8a4e3a85
BP
404 *
405 *
406 * Thread-safety
407 * =============
408 *
409 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
1c1e46ed 410 * its pmd thread's classifier. The text below calls this classifier 'cls'.
8a4e3a85
BP
411 *
412 * Motivation
413 * ----------
414 *
415 * The thread safety rules described here for "struct dp_netdev_flow" are
416 * motivated by two goals:
417 *
418 * - Prevent threads that read members of "struct dp_netdev_flow" from
419 * reading bad data due to changes by some thread concurrently modifying
420 * those members.
421 *
422 * - Prevent two threads making changes to members of a given "struct
423 * dp_netdev_flow" from interfering with each other.
424 *
425 *
426 * Rules
427 * -----
428 *
ed79f89a
DDP
429 * A flow 'flow' may be accessed without a risk of being freed during an RCU
430 * grace period. Code that needs to hold onto a flow for a while
431 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
8a4e3a85
BP
432 *
433 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
ed79f89a
DDP
434 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
435 * from modification.
8a4e3a85
BP
436 *
437 * Some members, marked 'const', are immutable. Accessing other members
438 * requires synchronization, as noted in more detail below.
439 */
72865317 440struct dp_netdev_flow {
11e5cf1f 441 const struct flow flow; /* Unmasked flow that created this entry. */
8a4e3a85 442 /* Hash table index by unmasked flow. */
1c1e46ed
AW
443 const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */
444 /* 'flow_table'. */
70e5ed6f 445 const ovs_u128 ufid; /* Unique flow identifier. */
bd5131ba 446 const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */
1c1e46ed 447 /* flow. */
72865317 448
ed79f89a
DDP
449 /* Number of references.
450 * The classifier owns one reference.
451 * Any thread trying to keep a rule from being freed should hold its own
452 * reference. */
453 struct ovs_refcount ref_cnt;
454
11e5cf1f
DDP
455 bool dead;
456
1c1e46ed
AW
457 /* Statistics. */
458 struct dp_netdev_flow_stats stats;
8a4e3a85 459
45c626a3 460 /* Actions. */
61e7deb1 461 OVSRCU_TYPE(struct dp_netdev_actions *) actions;
0de8783a 462
11e5cf1f
DDP
463 /* While processing a group of input packets, the datapath uses the next
464 * member to store a pointer to the output batch for the flow. It is
465 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
f7ce4811
PS
466 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
467 struct packet_batch_per_flow *batch;
11e5cf1f 468
0de8783a
JR
469 /* Packet classification. */
470 struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */
471 /* 'cr' must be the last member. */
72865317
BP
472};
473
ed79f89a 474static void dp_netdev_flow_unref(struct dp_netdev_flow *);
9bbf1c3d 475static bool dp_netdev_flow_ref(struct dp_netdev_flow *);
70e5ed6f 476static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t,
f0fb825a 477 struct flow *, bool);
8a4e3a85 478
a84cb64a
BP
479/* A set of datapath actions within a "struct dp_netdev_flow".
480 *
481 *
482 * Thread-safety
483 * =============
484 *
45c626a3 485 * A struct dp_netdev_actions 'actions' is protected with RCU. */
a84cb64a 486struct dp_netdev_actions {
a84cb64a
BP
487 /* These members are immutable: they do not change during the struct's
488 * lifetime. */
a84cb64a 489 unsigned int size; /* Size of 'actions', in bytes. */
9ff55ae2 490 struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
a84cb64a
BP
491};
492
493struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *,
494 size_t);
61e7deb1
BP
495struct dp_netdev_actions *dp_netdev_flow_get_actions(
496 const struct dp_netdev_flow *);
497static void dp_netdev_actions_free(struct dp_netdev_actions *);
a84cb64a 498
1c1e46ed
AW
499/* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
500struct dp_netdev_pmd_stats {
501 /* Indexed by DP_STAT_*. */
eb94da30 502 atomic_ullong n[DP_N_STATS];
1c1e46ed
AW
503};
504
55e3ca97
DDP
505/* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */
506struct dp_netdev_pmd_cycles {
507 /* Indexed by PMD_CYCLES_*. */
508 atomic_ullong n[PMD_N_CYCLES];
509};
510
947dc567 511struct polled_queue {
922b28d4 512 struct dp_netdev_rxq *rxq;
947dc567
DDP
513 odp_port_t port_no;
514};
515
ae7ad0a1
IM
516/* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
517struct rxq_poll {
947dc567
DDP
518 struct dp_netdev_rxq *rxq;
519 struct hmap_node node;
ae7ad0a1
IM
520};
521
57eebbb4
DDP
522/* Contained by struct dp_netdev_pmd_thread's 'send_port_cache',
523 * 'tnl_port_cache' or 'tx_ports'. */
d0cca6c3 524struct tx_port {
324c8374
IM
525 struct dp_netdev_port *port;
526 int qid;
527 long long last_used;
d0cca6c3
DDP
528 struct hmap_node node;
529};
530
e4cfed38
PS
531/* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
532 * the performance overhead of interrupt processing. Therefore netdev can
533 * not implement rx-wait for these devices. dpif-netdev needs to poll
534 * these device to check for recv buffer. pmd-thread does polling for
1c1e46ed 535 * devices assigned to itself.
e4cfed38
PS
536 *
537 * DPDK used PMD for accessing NIC.
538 *
65f13b50
AW
539 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
540 * I/O of all non-pmd threads. There will be no actual thread created
541 * for the instance.
1c1e46ed 542 *
1859876c
BB
543 * Each struct has its own flow cache and classifier per managed ingress port.
544 * For packets received on ingress port, a look up is done on corresponding PMD
545 * thread's flow cache and in case of a miss, lookup is performed in the
546 * corresponding classifier of port. Packets are executed with the found
547 * actions in either case.
1c1e46ed 548 * */
65f13b50 549struct dp_netdev_pmd_thread {
a807c157
BB
550 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline0,
551 struct dp_netdev *dp;
552 struct cmap_node node; /* In 'dp->poll_threads'. */
553 pthread_cond_t cond; /* For synchronizing pmd thread
554 reload. */
555 );
556
557 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline1,
558 struct ovs_mutex cond_mutex; /* Mutex for condition variable. */
559 pthread_t thread;
560 unsigned core_id; /* CPU core id of this pmd thread. */
561 int numa_id; /* numa node id of this pmd thread. */
562 );
accf8626 563
65f13b50
AW
564 /* Per thread exact-match cache. Note, the instance for cpu core
565 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
d0cca6c3
DDP
566 * need to be protected by 'non_pmd_mutex'. Every other instance
567 * will only be accessed by its own pmd thread. */
a807c157
BB
568 OVS_ALIGNED_VAR(CACHE_LINE_SIZE) struct emc_cache flow_cache;
569 struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
570
571 /* Queue id used by this pmd thread to send packets on all netdevs if
572 * XPS disabled for this netdev. All static_tx_qid's are unique and less
573 * than 'cmap_count(dp->poll_threads)'. */
574 uint32_t static_tx_qid;
1c1e46ed 575
3453b4d6 576 /* Flow-Table and classifiers
1c1e46ed
AW
577 *
578 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
3453b4d6
JS
579 * changes to 'classifiers' must be made while still holding the
580 * 'flow_mutex'.
1c1e46ed
AW
581 */
582 struct ovs_mutex flow_mutex;
a807c157
BB
583 PADDED_MEMBERS(CACHE_LINE_SIZE,
584 struct cmap flow_table OVS_GUARDED; /* Flow table. */
585
586 /* One classifier per in_port polled by the pmd */
587 struct cmap classifiers;
588 /* Periodically sort subtable vectors according to hit frequencies */
589 long long int next_optimization;
590 /* End of the next time interval for which processing cycles
591 are stored for each polled rxq. */
64bf452e 592 long long int rxq_next_cycle_store;
a807c157
BB
593
594 /* Cycles counters */
595 struct dp_netdev_pmd_cycles cycles;
596
597 /* Used to count cycles. See 'cycles_counter_end()'. */
598 unsigned long long last_cycles;
599 struct latch exit_latch; /* For terminating the pmd thread. */
600 );
601
602 PADDED_MEMBERS(CACHE_LINE_SIZE,
603 /* Statistics. */
604 struct dp_netdev_pmd_stats stats;
605
606 struct seq *reload_seq;
607 uint64_t last_reload_seq;
608 atomic_bool reload; /* Do we need to reload ports? */
609 bool isolated;
610
611 /* Set to true if the pmd thread needs to be reloaded. */
612 bool need_reload;
613 /* 5 pad bytes. */
614 );
615
616 PADDED_MEMBERS(CACHE_LINE_SIZE,
617 struct ovs_mutex port_mutex; /* Mutex for 'poll_list'
618 and 'tx_ports'. */
619 /* 16 pad bytes. */
620 );
621 PADDED_MEMBERS(CACHE_LINE_SIZE,
622 /* List of rx queues to poll. */
623 struct hmap poll_list OVS_GUARDED;
624 /* Map of 'tx_port's used for transmission. Written by the main
625 * thread, read by the pmd thread. */
626 struct hmap tx_ports OVS_GUARDED;
627 );
628 PADDED_MEMBERS(CACHE_LINE_SIZE,
629 /* These are thread-local copies of 'tx_ports'. One contains only
630 * tunnel ports (that support push_tunnel/pop_tunnel), the other
631 * contains ports with at least one txq (that support send).
632 * A port can be in both.
633 *
634 * There are two separate maps to make sure that we don't try to
635 * execute OUTPUT on a device which has 0 txqs or PUSH/POP on a
636 * non-tunnel device.
637 *
638 * The instances for cpu core NON_PMD_CORE_ID can be accessed by
639 * multiple threads and thusly need to be protected by 'non_pmd_mutex'.
640 * Every other instance will only be accessed by its own pmd thread. */
641 struct hmap tnl_port_cache;
642 struct hmap send_port_cache;
643 );
644
645 PADDED_MEMBERS(CACHE_LINE_SIZE,
646 /* Only a pmd thread can write on its own 'cycles' and 'stats'.
647 * The main thread keeps 'stats_zero' and 'cycles_zero' as base
648 * values and subtracts them from 'stats' and 'cycles' before
649 * reporting to the user */
650 unsigned long long stats_zero[DP_N_STATS];
651 uint64_t cycles_zero[PMD_N_CYCLES];
652 /* 8 pad bytes. */
653 );
6c3eee82
BP
654};
655
72865317
BP
656/* Interface to netdev-based datapath. */
657struct dpif_netdev {
658 struct dpif dpif;
659 struct dp_netdev *dp;
d33ed218 660 uint64_t last_port_seq;
72865317
BP
661};
662
8a4e3a85 663static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
e9985d6a
DDP
664 struct dp_netdev_port **portp)
665 OVS_REQUIRES(dp->port_mutex);
8a4e3a85 666static int get_port_by_name(struct dp_netdev *dp, const char *devname,
e9985d6a
DDP
667 struct dp_netdev_port **portp)
668 OVS_REQUIRES(dp->port_mutex);
8a4e3a85
BP
669static void dp_netdev_free(struct dp_netdev *)
670 OVS_REQUIRES(dp_netdev_mutex);
8a4e3a85
BP
671static int do_add_port(struct dp_netdev *dp, const char *devname,
672 const char *type, odp_port_t port_no)
59e6d833 673 OVS_REQUIRES(dp->port_mutex);
c40b890f 674static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
59e6d833 675 OVS_REQUIRES(dp->port_mutex);
614c4892
BP
676static int dpif_netdev_open(const struct dpif_class *, const char *name,
677 bool create, struct dpif **);
65f13b50 678static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
1895cc8d 679 struct dp_packet_batch *,
66e4ad8a 680 bool may_steal, const struct flow *flow,
4edb9ae9 681 const struct nlattr *actions,
324c8374
IM
682 size_t actions_len,
683 long long now);
65f13b50 684static void dp_netdev_input(struct dp_netdev_pmd_thread *,
1895cc8d 685 struct dp_packet_batch *, odp_port_t port_no);
a90ed026 686static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
1895cc8d 687 struct dp_packet_batch *);
41ccaa24 688
6b31e073 689static void dp_netdev_disable_upcall(struct dp_netdev *);
ae7ad0a1 690static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd);
65f13b50 691static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
00873463
DDP
692 struct dp_netdev *dp, unsigned core_id,
693 int numa_id);
1c1e46ed 694static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
e9985d6a
DDP
695static void dp_netdev_set_nonpmd(struct dp_netdev *dp)
696 OVS_REQUIRES(dp->port_mutex);
697
e32971b8 698static void *pmd_thread_main(void *);
b19befae 699static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
bd5131ba 700 unsigned core_id);
1c1e46ed
AW
701static struct dp_netdev_pmd_thread *
702dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
140dd699
IM
703static void dp_netdev_del_pmd(struct dp_netdev *dp,
704 struct dp_netdev_pmd_thread *pmd);
e32971b8 705static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd);
d0cca6c3 706static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd);
d0cca6c3 707static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
e32971b8
DDP
708 struct dp_netdev_port *port)
709 OVS_REQUIRES(pmd->port_mutex);
710static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
711 struct tx_port *tx)
712 OVS_REQUIRES(pmd->port_mutex);
d0cca6c3 713static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
947dc567
DDP
714 struct dp_netdev_rxq *rxq)
715 OVS_REQUIRES(pmd->port_mutex);
e32971b8
DDP
716static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd,
717 struct rxq_poll *poll)
718 OVS_REQUIRES(pmd->port_mutex);
719static void reconfigure_datapath(struct dp_netdev *dp)
3eb67853 720 OVS_REQUIRES(dp->port_mutex);
1c1e46ed
AW
721static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
722static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
723static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);
d0cca6c3
DDP
724static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
725 OVS_REQUIRES(pmd->port_mutex);
3453b4d6 726static inline void
4809891b
KT
727dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
728 struct polled_queue *poll_list, int poll_cnt);
729static void
730dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
731 enum rxq_cycles_counter_type type,
732 unsigned long long cycles);
733static uint64_t
734dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
735 enum rxq_cycles_counter_type type);
736static void
737dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
738 unsigned long long cycles);
655856ef
KT
739static uint64_t
740dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx);
324c8374
IM
741static void
742dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
743 long long now, bool purge);
744static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
745 struct tx_port *tx, long long now);
746
67ad54cb 747static inline bool emc_entry_alive(struct emc_entry *ce);
9bbf1c3d
DDP
748static void emc_clear_entry(struct emc_entry *ce);
749
cd995c73
KT
750static void dp_netdev_request_reconfigure(struct dp_netdev *dp);
751
9bbf1c3d
DDP
752static void
753emc_cache_init(struct emc_cache *flow_cache)
754{
755 int i;
756
67ad54cb 757 flow_cache->sweep_idx = 0;
9bbf1c3d
DDP
758 for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
759 flow_cache->entries[i].flow = NULL;
0de8783a 760 flow_cache->entries[i].key.hash = 0;
09b0fa9c 761 flow_cache->entries[i].key.len = sizeof(struct miniflow);
5fcff47b 762 flowmap_init(&flow_cache->entries[i].key.mf.map);
9bbf1c3d
DDP
763 }
764}
765
766static void
767emc_cache_uninit(struct emc_cache *flow_cache)
768{
769 int i;
770
771 for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
772 emc_clear_entry(&flow_cache->entries[i]);
773 }
774}
775
67ad54cb
AW
776/* Check and clear dead flow references slowly (one entry at each
777 * invocation). */
778static void
779emc_cache_slow_sweep(struct emc_cache *flow_cache)
780{
781 struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx];
782
783 if (!emc_entry_alive(entry)) {
784 emc_clear_entry(entry);
785 }
786 flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
787}
788
c4ea7529
BP
789/* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
790bool
791dpif_is_netdev(const struct dpif *dpif)
792{
793 return dpif->dpif_class->open == dpif_netdev_open;
794}
795
72865317
BP
796static struct dpif_netdev *
797dpif_netdev_cast(const struct dpif *dpif)
798{
c4ea7529 799 ovs_assert(dpif_is_netdev(dpif));
72865317
BP
800 return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
801}
802
803static struct dp_netdev *
804get_dp_netdev(const struct dpif *dpif)
805{
806 return dpif_netdev_cast(dpif)->dp;
807}
6553d06b
DDP
808\f
809enum pmd_info_type {
ce179f11
IM
810 PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */
811 PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */
812 PMD_INFO_SHOW_RXQ /* Show poll-lists of pmd threads. */
6553d06b
DDP
813};
814
815static void
816pmd_info_show_stats(struct ds *reply,
817 struct dp_netdev_pmd_thread *pmd,
818 unsigned long long stats[DP_N_STATS],
819 uint64_t cycles[PMD_N_CYCLES])
820{
435c2797 821 unsigned long long total_packets;
6553d06b
DDP
822 uint64_t total_cycles = 0;
823 int i;
824
825 /* These loops subtracts reference values ('*_zero') from the counters.
826 * Since loads and stores are relaxed, it might be possible for a '*_zero'
827 * value to be more recent than the current value we're reading from the
828 * counter. This is not a big problem, since these numbers are not
829 * supposed to be too accurate, but we should at least make sure that
830 * the result is not negative. */
831 for (i = 0; i < DP_N_STATS; i++) {
832 if (stats[i] > pmd->stats_zero[i]) {
833 stats[i] -= pmd->stats_zero[i];
834 } else {
835 stats[i] = 0;
836 }
6553d06b
DDP
837 }
838
435c2797
IM
839 /* Sum of all the matched and not matched packets gives the total. */
840 total_packets = stats[DP_STAT_EXACT_HIT] + stats[DP_STAT_MASKED_HIT]
841 + stats[DP_STAT_MISS];
842
6553d06b
DDP
843 for (i = 0; i < PMD_N_CYCLES; i++) {
844 if (cycles[i] > pmd->cycles_zero[i]) {
845 cycles[i] -= pmd->cycles_zero[i];
846 } else {
847 cycles[i] = 0;
848 }
849
850 total_cycles += cycles[i];
851 }
852
853 ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID)
854 ? "main thread" : "pmd thread");
855
856 if (pmd->numa_id != OVS_NUMA_UNSPEC) {
857 ds_put_format(reply, " numa_id %d", pmd->numa_id);
858 }
d5c199ea 859 if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) {
bd5131ba 860 ds_put_format(reply, " core_id %u", pmd->core_id);
6553d06b
DDP
861 }
862 ds_put_cstr(reply, ":\n");
863
864 ds_put_format(reply,
865 "\temc hits:%llu\n\tmegaflow hits:%llu\n"
3453b4d6 866 "\tavg. subtable lookups per hit:%.2f\n"
6553d06b
DDP
867 "\tmiss:%llu\n\tlost:%llu\n",
868 stats[DP_STAT_EXACT_HIT], stats[DP_STAT_MASKED_HIT],
3453b4d6
JS
869 stats[DP_STAT_MASKED_HIT] > 0
870 ? (1.0*stats[DP_STAT_LOOKUP_HIT])/stats[DP_STAT_MASKED_HIT]
871 : 0,
6553d06b
DDP
872 stats[DP_STAT_MISS], stats[DP_STAT_LOST]);
873
874 if (total_cycles == 0) {
875 return;
876 }
877
878 ds_put_format(reply,
a2ac666d 879 "\tidle cycles:%"PRIu64" (%.02f%%)\n"
6553d06b 880 "\tprocessing cycles:%"PRIu64" (%.02f%%)\n",
a2ac666d
CL
881 cycles[PMD_CYCLES_IDLE],
882 cycles[PMD_CYCLES_IDLE] / (double)total_cycles * 100,
6553d06b
DDP
883 cycles[PMD_CYCLES_PROCESSING],
884 cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100);
885
886 if (total_packets == 0) {
887 return;
888 }
889
890 ds_put_format(reply,
891 "\tavg cycles per packet: %.02f (%"PRIu64"/%llu)\n",
892 total_cycles / (double)total_packets,
893 total_cycles, total_packets);
894
895 ds_put_format(reply,
896 "\tavg processing cycles per packet: "
897 "%.02f (%"PRIu64"/%llu)\n",
898 cycles[PMD_CYCLES_PROCESSING] / (double)total_packets,
899 cycles[PMD_CYCLES_PROCESSING], total_packets);
900}
901
902static void
903pmd_info_clear_stats(struct ds *reply OVS_UNUSED,
904 struct dp_netdev_pmd_thread *pmd,
905 unsigned long long stats[DP_N_STATS],
906 uint64_t cycles[PMD_N_CYCLES])
907{
908 int i;
909
910 /* We cannot write 'stats' and 'cycles' (because they're written by other
911 * threads) and we shouldn't change 'stats' (because they're used to count
912 * datapath stats, which must not be cleared here). Instead, we save the
913 * current values and subtract them from the values to be displayed in the
914 * future */
915 for (i = 0; i < DP_N_STATS; i++) {
916 pmd->stats_zero[i] = stats[i];
917 }
918 for (i = 0; i < PMD_N_CYCLES; i++) {
919 pmd->cycles_zero[i] = cycles[i];
920 }
921}
922
947dc567
DDP
923static int
924compare_poll_list(const void *a_, const void *b_)
925{
926 const struct rxq_poll *a = a_;
927 const struct rxq_poll *b = b_;
928
929 const char *namea = netdev_rxq_get_name(a->rxq->rx);
930 const char *nameb = netdev_rxq_get_name(b->rxq->rx);
931
932 int cmp = strcmp(namea, nameb);
933 if (!cmp) {
934 return netdev_rxq_get_queue_id(a->rxq->rx)
935 - netdev_rxq_get_queue_id(b->rxq->rx);
936 } else {
937 return cmp;
938 }
939}
940
941static void
942sorted_poll_list(struct dp_netdev_pmd_thread *pmd, struct rxq_poll **list,
943 size_t *n)
944{
945 struct rxq_poll *ret, *poll;
946 size_t i;
947
948 *n = hmap_count(&pmd->poll_list);
949 if (!*n) {
950 ret = NULL;
951 } else {
952 ret = xcalloc(*n, sizeof *ret);
953 i = 0;
954 HMAP_FOR_EACH (poll, node, &pmd->poll_list) {
955 ret[i] = *poll;
956 i++;
957 }
958 ovs_assert(i == *n);
1cc1b5f6 959 qsort(ret, *n, sizeof *ret, compare_poll_list);
947dc567
DDP
960 }
961
947dc567
DDP
962 *list = ret;
963}
964
ce179f11
IM
965static void
966pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
967{
968 if (pmd->core_id != NON_PMD_CORE_ID) {
ce179f11 969 const char *prev_name = NULL;
947dc567
DDP
970 struct rxq_poll *list;
971 size_t i, n;
ce179f11 972
3eb67853
IM
973 ds_put_format(reply,
974 "pmd thread numa_id %d core_id %u:\n\tisolated : %s\n",
975 pmd->numa_id, pmd->core_id, (pmd->isolated)
976 ? "true" : "false");
ce179f11 977
d0cca6c3 978 ovs_mutex_lock(&pmd->port_mutex);
947dc567
DDP
979 sorted_poll_list(pmd, &list, &n);
980 for (i = 0; i < n; i++) {
981 const char *name = netdev_rxq_get_name(list[i].rxq->rx);
ce179f11
IM
982
983 if (!prev_name || strcmp(name, prev_name)) {
984 if (prev_name) {
985 ds_put_cstr(reply, "\n");
986 }
947dc567 987 ds_put_format(reply, "\tport: %s\tqueue-id:", name);
ce179f11 988 }
947dc567
DDP
989 ds_put_format(reply, " %d",
990 netdev_rxq_get_queue_id(list[i].rxq->rx));
ce179f11
IM
991 prev_name = name;
992 }
d0cca6c3 993 ovs_mutex_unlock(&pmd->port_mutex);
ce179f11 994 ds_put_cstr(reply, "\n");
947dc567 995 free(list);
ce179f11
IM
996 }
997}
998
34d8e04b
EC
999static int
1000compare_poll_thread_list(const void *a_, const void *b_)
1001{
1002 const struct dp_netdev_pmd_thread *a, *b;
1003
1004 a = *(struct dp_netdev_pmd_thread **)a_;
1005 b = *(struct dp_netdev_pmd_thread **)b_;
1006
1007 if (a->core_id < b->core_id) {
1008 return -1;
1009 }
1010 if (a->core_id > b->core_id) {
1011 return 1;
1012 }
1013 return 0;
1014}
1015
1016/* Create a sorted list of pmd's from the dp->poll_threads cmap. We can use
1017 * this list, as long as we do not go to quiescent state. */
1018static void
1019sorted_poll_thread_list(struct dp_netdev *dp,
1020 struct dp_netdev_pmd_thread ***list,
1021 size_t *n)
1022{
1023 struct dp_netdev_pmd_thread *pmd;
1024 struct dp_netdev_pmd_thread **pmd_list;
1025 size_t k = 0, n_pmds;
1026
1027 n_pmds = cmap_count(&dp->poll_threads);
1028 pmd_list = xcalloc(n_pmds, sizeof *pmd_list);
1029
1030 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
1031 if (k >= n_pmds) {
1032 break;
1033 }
1034 pmd_list[k++] = pmd;
1035 }
1036
1037 qsort(pmd_list, k, sizeof *pmd_list, compare_poll_thread_list);
1038
1039 *list = pmd_list;
1040 *n = k;
1041}
1042
cd995c73
KT
1043static void
1044dpif_netdev_pmd_rebalance(struct unixctl_conn *conn, int argc,
1045 const char *argv[], void *aux OVS_UNUSED)
1046{
1047 struct ds reply = DS_EMPTY_INITIALIZER;
1048 struct dp_netdev *dp = NULL;
1049
1050 ovs_mutex_lock(&dp_netdev_mutex);
1051
1052 if (argc == 2) {
1053 dp = shash_find_data(&dp_netdevs, argv[1]);
1054 } else if (shash_count(&dp_netdevs) == 1) {
1055 /* There's only one datapath */
1056 dp = shash_first(&dp_netdevs)->data;
1057 }
1058
1059 if (!dp) {
1060 ovs_mutex_unlock(&dp_netdev_mutex);
1061 unixctl_command_reply_error(conn,
1062 "please specify an existing datapath");
1063 return;
1064 }
1065
1066 dp_netdev_request_reconfigure(dp);
1067 ovs_mutex_unlock(&dp_netdev_mutex);
1068 ds_put_cstr(&reply, "pmd rxq rebalance requested.\n");
1069 unixctl_command_reply(conn, ds_cstr(&reply));
1070 ds_destroy(&reply);
1071}
1072
6553d06b
DDP
1073static void
1074dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[],
1075 void *aux)
1076{
1077 struct ds reply = DS_EMPTY_INITIALIZER;
34d8e04b 1078 struct dp_netdev_pmd_thread **pmd_list;
6553d06b 1079 struct dp_netdev *dp = NULL;
34d8e04b 1080 size_t n;
6553d06b
DDP
1081 enum pmd_info_type type = *(enum pmd_info_type *) aux;
1082
1083 ovs_mutex_lock(&dp_netdev_mutex);
1084
1085 if (argc == 2) {
1086 dp = shash_find_data(&dp_netdevs, argv[1]);
1087 } else if (shash_count(&dp_netdevs) == 1) {
1088 /* There's only one datapath */
1089 dp = shash_first(&dp_netdevs)->data;
1090 }
1091
1092 if (!dp) {
1093 ovs_mutex_unlock(&dp_netdev_mutex);
1094 unixctl_command_reply_error(conn,
1095 "please specify an existing datapath");
1096 return;
1097 }
1098
34d8e04b
EC
1099 sorted_poll_thread_list(dp, &pmd_list, &n);
1100 for (size_t i = 0; i < n; i++) {
1101 struct dp_netdev_pmd_thread *pmd = pmd_list[i];
1102 if (!pmd) {
1103 break;
1104 }
1105
ce179f11
IM
1106 if (type == PMD_INFO_SHOW_RXQ) {
1107 pmd_info_show_rxq(&reply, pmd);
1108 } else {
1109 unsigned long long stats[DP_N_STATS];
1110 uint64_t cycles[PMD_N_CYCLES];
6553d06b 1111
ce179f11 1112 /* Read current stats and cycle counters */
71f21279
BP
1113 for (size_t j = 0; j < ARRAY_SIZE(stats); j++) {
1114 atomic_read_relaxed(&pmd->stats.n[j], &stats[j]);
ce179f11 1115 }
71f21279
BP
1116 for (size_t j = 0; j < ARRAY_SIZE(cycles); j++) {
1117 atomic_read_relaxed(&pmd->cycles.n[j], &cycles[j]);
ce179f11 1118 }
6553d06b 1119
ce179f11
IM
1120 if (type == PMD_INFO_CLEAR_STATS) {
1121 pmd_info_clear_stats(&reply, pmd, stats, cycles);
1122 } else if (type == PMD_INFO_SHOW_STATS) {
1123 pmd_info_show_stats(&reply, pmd, stats, cycles);
1124 }
6553d06b
DDP
1125 }
1126 }
34d8e04b 1127 free(pmd_list);
6553d06b
DDP
1128
1129 ovs_mutex_unlock(&dp_netdev_mutex);
1130
1131 unixctl_command_reply(conn, ds_cstr(&reply));
1132 ds_destroy(&reply);
1133}
1134\f
1135static int
1136dpif_netdev_init(void)
1137{
1138 static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS,
ce179f11
IM
1139 clear_aux = PMD_INFO_CLEAR_STATS,
1140 poll_aux = PMD_INFO_SHOW_RXQ;
6553d06b
DDP
1141
1142 unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
1143 0, 1, dpif_netdev_pmd_info,
1144 (void *)&show_aux);
1145 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
1146 0, 1, dpif_netdev_pmd_info,
1147 (void *)&clear_aux);
ce179f11
IM
1148 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]",
1149 0, 1, dpif_netdev_pmd_info,
1150 (void *)&poll_aux);
cd995c73
KT
1151 unixctl_command_register("dpif-netdev/pmd-rxq-rebalance", "[dp]",
1152 0, 1, dpif_netdev_pmd_rebalance,
1153 NULL);
6553d06b
DDP
1154 return 0;
1155}
72865317 1156
2197d7ab 1157static int
2240af25
DDP
1158dpif_netdev_enumerate(struct sset *all_dps,
1159 const struct dpif_class *dpif_class)
2197d7ab
GL
1160{
1161 struct shash_node *node;
1162
97be1538 1163 ovs_mutex_lock(&dp_netdev_mutex);
2197d7ab 1164 SHASH_FOR_EACH(node, &dp_netdevs) {
2240af25
DDP
1165 struct dp_netdev *dp = node->data;
1166 if (dpif_class != dp->class) {
1167 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
1168 * If the class doesn't match, skip this dpif. */
1169 continue;
1170 }
2197d7ab
GL
1171 sset_add(all_dps, node->name);
1172 }
97be1538 1173 ovs_mutex_unlock(&dp_netdev_mutex);
5279f8fd 1174
2197d7ab
GL
1175 return 0;
1176}
1177
add90f6f
EJ
1178static bool
1179dpif_netdev_class_is_dummy(const struct dpif_class *class)
1180{
1181 return class != &dpif_netdev_class;
1182}
1183
0aeaabc8
JP
1184static const char *
1185dpif_netdev_port_open_type(const struct dpif_class *class, const char *type)
1186{
1187 return strcmp(type, "internal") ? type
e98d0cb3 1188 : dpif_netdev_class_is_dummy(class) ? "dummy-internal"
0aeaabc8
JP
1189 : "tap";
1190}
1191
72865317
BP
1192static struct dpif *
1193create_dpif_netdev(struct dp_netdev *dp)
1194{
462278db 1195 uint16_t netflow_id = hash_string(dp->name, 0);
72865317 1196 struct dpif_netdev *dpif;
72865317 1197
6a8267c5 1198 ovs_refcount_ref(&dp->ref_cnt);
72865317 1199
72865317 1200 dpif = xmalloc(sizeof *dpif);
614c4892 1201 dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id);
72865317 1202 dpif->dp = dp;
d33ed218 1203 dpif->last_port_seq = seq_read(dp->port_seq);
72865317
BP
1204
1205 return &dpif->dpif;
1206}
1207
4e022ec0
AW
1208/* Choose an unused, non-zero port number and return it on success.
1209 * Return ODPP_NONE on failure. */
1210static odp_port_t
e44768b7 1211choose_port(struct dp_netdev *dp, const char *name)
59e6d833 1212 OVS_REQUIRES(dp->port_mutex)
e44768b7 1213{
4e022ec0 1214 uint32_t port_no;
e44768b7
JP
1215
1216 if (dp->class != &dpif_netdev_class) {
1217 const char *p;
1218 int start_no = 0;
1219
1220 /* If the port name begins with "br", start the number search at
1221 * 100 to make writing tests easier. */
1222 if (!strncmp(name, "br", 2)) {
1223 start_no = 100;
1224 }
1225
1226 /* If the port name contains a number, try to assign that port number.
1227 * This can make writing unit tests easier because port numbers are
1228 * predictable. */
1229 for (p = name; *p != '\0'; p++) {
1230 if (isdigit((unsigned char) *p)) {
1231 port_no = start_no + strtol(p, NULL, 10);
ff073a71
BP
1232 if (port_no > 0 && port_no != odp_to_u32(ODPP_NONE)
1233 && !dp_netdev_lookup_port(dp, u32_to_odp(port_no))) {
4e022ec0 1234 return u32_to_odp(port_no);
e44768b7
JP
1235 }
1236 break;
1237 }
1238 }
1239 }
1240
ff073a71
BP
1241 for (port_no = 1; port_no <= UINT16_MAX; port_no++) {
1242 if (!dp_netdev_lookup_port(dp, u32_to_odp(port_no))) {
4e022ec0 1243 return u32_to_odp(port_no);
e44768b7
JP
1244 }
1245 }
1246
4e022ec0 1247 return ODPP_NONE;
e44768b7
JP
1248}
1249
72865317 1250static int
614c4892
BP
1251create_dp_netdev(const char *name, const struct dpif_class *class,
1252 struct dp_netdev **dpp)
8a4e3a85 1253 OVS_REQUIRES(dp_netdev_mutex)
72865317
BP
1254{
1255 struct dp_netdev *dp;
1256 int error;
72865317 1257
462278db 1258 dp = xzalloc(sizeof *dp);
8a4e3a85
BP
1259 shash_add(&dp_netdevs, name, dp);
1260
1261 *CONST_CAST(const struct dpif_class **, &dp->class) = class;
1262 *CONST_CAST(const char **, &dp->name) = xstrdup(name);
6a8267c5 1263 ovs_refcount_init(&dp->ref_cnt);
1a65ba85 1264 atomic_flag_clear(&dp->destroyed);
8a4e3a85 1265
59e6d833 1266 ovs_mutex_init(&dp->port_mutex);
e9985d6a 1267 hmap_init(&dp->ports);
d33ed218 1268 dp->port_seq = seq_create();
6b31e073
RW
1269 fat_rwlock_init(&dp->upcall_rwlock);
1270
a6a426d6
IM
1271 dp->reconfigure_seq = seq_create();
1272 dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq);
1273
4b27db64
JR
1274 for (int i = 0; i < N_METER_LOCKS; ++i) {
1275 ovs_mutex_init_adaptive(&dp->meter_locks[i]);
1276 }
1277
6b31e073
RW
1278 /* Disable upcalls by default. */
1279 dp_netdev_disable_upcall(dp);
623540e4 1280 dp->upcall_aux = NULL;
6b31e073 1281 dp->upcall_cb = NULL;
e44768b7 1282
5cf3edb3
DDP
1283 conntrack_init(&dp->conntrack);
1284
4c30b246
CL
1285 atomic_init(&dp->emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN);
1286
65f13b50 1287 cmap_init(&dp->poll_threads);
140dd699
IM
1288
1289 ovs_mutex_init(&dp->tx_qid_pool_mutex);
1290 /* We need 1 Tx queue for each possible core + 1 for non-PMD threads. */
1291 dp->tx_qid_pool = id_pool_create(0, ovs_numa_get_n_cores() + 1);
1292
65f13b50
AW
1293 ovs_mutex_init_recursive(&dp->non_pmd_mutex);
1294 ovsthread_key_create(&dp->per_pmd_key, NULL);
1295
e9985d6a 1296 ovs_mutex_lock(&dp->port_mutex);
140dd699
IM
1297 /* non-PMD will be created before all other threads and will
1298 * allocate static_tx_qid = 0. */
f2eee189 1299 dp_netdev_set_nonpmd(dp);
65f13b50 1300
a3e8437a
TLSC
1301 error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
1302 "internal"),
1303 ODPP_LOCAL);
59e6d833 1304 ovs_mutex_unlock(&dp->port_mutex);
72865317
BP
1305 if (error) {
1306 dp_netdev_free(dp);
462278db 1307 return error;
72865317
BP
1308 }
1309
a36de779 1310 dp->last_tnl_conf_seq = seq_read(tnl_conf_seq);
462278db 1311 *dpp = dp;
72865317
BP
1312 return 0;
1313}
1314
a6a426d6
IM
1315static void
1316dp_netdev_request_reconfigure(struct dp_netdev *dp)
1317{
1318 seq_change(dp->reconfigure_seq);
1319}
1320
1321static bool
1322dp_netdev_is_reconf_required(struct dp_netdev *dp)
1323{
1324 return seq_read(dp->reconfigure_seq) != dp->last_reconfigure_seq;
1325}
1326
72865317 1327static int
614c4892 1328dpif_netdev_open(const struct dpif_class *class, const char *name,
4a387741 1329 bool create, struct dpif **dpifp)
72865317 1330{
462278db 1331 struct dp_netdev *dp;
5279f8fd 1332 int error;
462278db 1333
97be1538 1334 ovs_mutex_lock(&dp_netdev_mutex);
462278db
BP
1335 dp = shash_find_data(&dp_netdevs, name);
1336 if (!dp) {
5279f8fd 1337 error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
72865317 1338 } else {
5279f8fd
BP
1339 error = (dp->class != class ? EINVAL
1340 : create ? EEXIST
1341 : 0);
1342 }
1343 if (!error) {
1344 *dpifp = create_dpif_netdev(dp);
6b31e073 1345 dp->dpif = *dpifp;
72865317 1346 }
97be1538 1347 ovs_mutex_unlock(&dp_netdev_mutex);
462278db 1348
5279f8fd 1349 return error;
72865317
BP
1350}
1351
88ace79b
DDP
1352static void
1353dp_netdev_destroy_upcall_lock(struct dp_netdev *dp)
1354 OVS_NO_THREAD_SAFETY_ANALYSIS
1355{
1356 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
1357 ovs_assert(fat_rwlock_tryrdlock(&dp->upcall_rwlock));
1358
1359 /* Before freeing a lock we should release it */
1360 fat_rwlock_unlock(&dp->upcall_rwlock);
1361 fat_rwlock_destroy(&dp->upcall_rwlock);
1362}
1363
4b27db64
JR
1364static void
1365dp_delete_meter(struct dp_netdev *dp, uint32_t meter_id)
1366 OVS_REQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS])
1367{
1368 if (dp->meters[meter_id]) {
1369 free(dp->meters[meter_id]);
1370 dp->meters[meter_id] = NULL;
1371 }
1372}
1373
8a4e3a85
BP
1374/* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
1375 * through the 'dp_netdevs' shash while freeing 'dp'. */
1ba530f4
BP
1376static void
1377dp_netdev_free(struct dp_netdev *dp)
8a4e3a85 1378 OVS_REQUIRES(dp_netdev_mutex)
1ba530f4 1379{
e9985d6a 1380 struct dp_netdev_port *port, *next;
4ad28026 1381
8a4e3a85
BP
1382 shash_find_and_delete(&dp_netdevs, dp->name);
1383
59e6d833 1384 ovs_mutex_lock(&dp->port_mutex);
e9985d6a 1385 HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
c40b890f 1386 do_del_port(dp, port);
1ba530f4 1387 }
59e6d833 1388 ovs_mutex_unlock(&dp->port_mutex);
4b27db64 1389
e32971b8 1390 dp_netdev_destroy_all_pmds(dp, true);
d916785c 1391 cmap_destroy(&dp->poll_threads);
51852a57 1392
140dd699
IM
1393 ovs_mutex_destroy(&dp->tx_qid_pool_mutex);
1394 id_pool_destroy(dp->tx_qid_pool);
1395
b9584f21
DDP
1396 ovs_mutex_destroy(&dp->non_pmd_mutex);
1397 ovsthread_key_delete(dp->per_pmd_key);
1398
1399 conntrack_destroy(&dp->conntrack);
1400
1401
a6a426d6
IM
1402 seq_destroy(dp->reconfigure_seq);
1403
d33ed218 1404 seq_destroy(dp->port_seq);
e9985d6a 1405 hmap_destroy(&dp->ports);
3186ea46 1406 ovs_mutex_destroy(&dp->port_mutex);
88ace79b
DDP
1407
1408 /* Upcalls must be disabled at this point */
1409 dp_netdev_destroy_upcall_lock(dp);
9bbf1c3d 1410
4b27db64
JR
1411 int i;
1412
1413 for (i = 0; i < MAX_METERS; ++i) {
1414 meter_lock(dp, i);
1415 dp_delete_meter(dp, i);
1416 meter_unlock(dp, i);
1417 }
1418 for (i = 0; i < N_METER_LOCKS; ++i) {
1419 ovs_mutex_destroy(&dp->meter_locks[i]);
1420 }
1421
f2eee189 1422 free(dp->pmd_cmask);
8a4e3a85 1423 free(CONST_CAST(char *, dp->name));
72865317
BP
1424 free(dp);
1425}
1426
8a4e3a85
BP
1427static void
1428dp_netdev_unref(struct dp_netdev *dp)
1429{
1430 if (dp) {
1431 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1432 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1433 ovs_mutex_lock(&dp_netdev_mutex);
24f83812 1434 if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
8a4e3a85
BP
1435 dp_netdev_free(dp);
1436 }
1437 ovs_mutex_unlock(&dp_netdev_mutex);
1438 }
1439}
1440
72865317
BP
1441static void
1442dpif_netdev_close(struct dpif *dpif)
1443{
1444 struct dp_netdev *dp = get_dp_netdev(dpif);
5279f8fd 1445
8a4e3a85 1446 dp_netdev_unref(dp);
72865317
BP
1447 free(dpif);
1448}
1449
1450static int
7dab847a 1451dpif_netdev_destroy(struct dpif *dpif)
72865317
BP
1452{
1453 struct dp_netdev *dp = get_dp_netdev(dpif);
5279f8fd 1454
6a8267c5 1455 if (!atomic_flag_test_and_set(&dp->destroyed)) {
24f83812 1456 if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
6a8267c5
BP
1457 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1458 OVS_NOT_REACHED();
1459 }
1460 }
5279f8fd 1461
72865317
BP
1462 return 0;
1463}
1464
eb94da30
DDP
1465/* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1466 * load/store semantics. While the increment is not atomic, the load and
1467 * store operations are, making it impossible to read inconsistent values.
1468 *
1469 * This is used to update thread local stats counters. */
1470static void
1471non_atomic_ullong_add(atomic_ullong *var, unsigned long long n)
1472{
1473 unsigned long long tmp;
1474
1475 atomic_read_relaxed(var, &tmp);
1476 tmp += n;
1477 atomic_store_relaxed(var, tmp);
1478}
1479
72865317 1480static int
a8d9304d 1481dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
72865317
BP
1482{
1483 struct dp_netdev *dp = get_dp_netdev(dpif);
1c1e46ed 1484 struct dp_netdev_pmd_thread *pmd;
8a4e3a85 1485
1c1e46ed
AW
1486 stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0;
1487 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
eb94da30 1488 unsigned long long n;
1c1e46ed 1489 stats->n_flows += cmap_count(&pmd->flow_table);
eb94da30 1490
abcf3ef4
DDP
1491 atomic_read_relaxed(&pmd->stats.n[DP_STAT_MASKED_HIT], &n);
1492 stats->n_hit += n;
1493 atomic_read_relaxed(&pmd->stats.n[DP_STAT_EXACT_HIT], &n);
eb94da30
DDP
1494 stats->n_hit += n;
1495 atomic_read_relaxed(&pmd->stats.n[DP_STAT_MISS], &n);
1496 stats->n_missed += n;
1497 atomic_read_relaxed(&pmd->stats.n[DP_STAT_LOST], &n);
1498 stats->n_lost += n;
51852a57 1499 }
1ce3fa06 1500 stats->n_masks = UINT32_MAX;
847108dc 1501 stats->n_mask_hit = UINT64_MAX;
5279f8fd 1502
72865317
BP
1503 return 0;
1504}
1505
e4cfed38 1506static void
65f13b50 1507dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
e4cfed38 1508{
accf8626 1509 if (pmd->core_id == NON_PMD_CORE_ID) {
d0cca6c3
DDP
1510 ovs_mutex_lock(&pmd->dp->non_pmd_mutex);
1511 ovs_mutex_lock(&pmd->port_mutex);
1512 pmd_load_cached_ports(pmd);
1513 ovs_mutex_unlock(&pmd->port_mutex);
1514 ovs_mutex_unlock(&pmd->dp->non_pmd_mutex);
accf8626
AW
1515 return;
1516 }
1517
1518 ovs_mutex_lock(&pmd->cond_mutex);
2788a1b1 1519 seq_change(pmd->reload_seq);
14e3e12a 1520 atomic_store_relaxed(&pmd->reload, true);
accf8626
AW
1521 ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
1522 ovs_mutex_unlock(&pmd->cond_mutex);
65f13b50 1523}
e4cfed38 1524
59e6d833
BP
1525static uint32_t
1526hash_port_no(odp_port_t port_no)
1527{
1528 return hash_int(odp_to_u32(port_no), 0);
1529}
1530
72865317 1531static int
a3e8437a 1532port_create(const char *devname, const char *type,
b8d29252 1533 odp_port_t port_no, struct dp_netdev_port **portp)
72865317 1534{
4b609110 1535 struct netdev_saved_flags *sf;
72865317 1536 struct dp_netdev_port *port;
2499a8ce 1537 enum netdev_flags flags;
b8d29252 1538 struct netdev *netdev;
e32971b8 1539 int error;
72865317 1540
b8d29252 1541 *portp = NULL;
72865317
BP
1542
1543 /* Open and validate network device. */
a3e8437a 1544 error = netdev_open(devname, type, &netdev);
72865317 1545 if (error) {
b8d29252 1546 return error;
72865317 1547 }
72865317
BP
1548 /* XXX reject non-Ethernet devices */
1549
2499a8ce
AC
1550 netdev_get_flags(netdev, &flags);
1551 if (flags & NETDEV_LOOPBACK) {
1552 VLOG_ERR("%s: cannot add a loopback device", devname);
d17f4f08 1553 error = EINVAL;
b8d29252 1554 goto out;
2499a8ce
AC
1555 }
1556
e32971b8
DDP
1557 error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf);
1558 if (error) {
1559 VLOG_ERR("%s: cannot set promisc flag", devname);
1560 goto out;
324c8374
IM
1561 }
1562
e4cfed38 1563 port = xzalloc(sizeof *port);
35303d71 1564 port->port_no = port_no;
e4cfed38
PS
1565 port->netdev = netdev;
1566 port->type = xstrdup(type);
4b609110 1567 port->sf = sf;
e32971b8
DDP
1568 port->need_reconfigure = true;
1569 ovs_mutex_init(&port->txq_used_mutex);
e4cfed38 1570
b8d29252 1571 *portp = port;
72865317
BP
1572
1573 return 0;
d17f4f08 1574
d17f4f08 1575out:
b8d29252 1576 netdev_close(netdev);
d17f4f08 1577 return error;
72865317
BP
1578}
1579
b8d29252
DDP
1580static int
1581do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
1582 odp_port_t port_no)
1583 OVS_REQUIRES(dp->port_mutex)
1584{
1585 struct dp_netdev_port *port;
1586 int error;
1587
1588 /* Reject devices already in 'dp'. */
1589 if (!get_port_by_name(dp, devname, &port)) {
1590 return EEXIST;
1591 }
1592
a3e8437a 1593 error = port_create(devname, type, port_no, &port);
b8d29252
DDP
1594 if (error) {
1595 return error;
1596 }
1597
e9985d6a 1598 hmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
b8d29252
DDP
1599 seq_change(dp->port_seq);
1600
e32971b8
DDP
1601 reconfigure_datapath(dp);
1602
b8d29252
DDP
1603 return 0;
1604}
1605
247527db
BP
1606static int
1607dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
4e022ec0 1608 odp_port_t *port_nop)
247527db
BP
1609{
1610 struct dp_netdev *dp = get_dp_netdev(dpif);
3aa30359
BP
1611 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1612 const char *dpif_port;
4e022ec0 1613 odp_port_t port_no;
5279f8fd 1614 int error;
247527db 1615
59e6d833 1616 ovs_mutex_lock(&dp->port_mutex);
3aa30359 1617 dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
4e022ec0 1618 if (*port_nop != ODPP_NONE) {
ff073a71
BP
1619 port_no = *port_nop;
1620 error = dp_netdev_lookup_port(dp, *port_nop) ? EBUSY : 0;
232dfa4a 1621 } else {
3aa30359 1622 port_no = choose_port(dp, dpif_port);
5279f8fd 1623 error = port_no == ODPP_NONE ? EFBIG : 0;
232dfa4a 1624 }
5279f8fd 1625 if (!error) {
247527db 1626 *port_nop = port_no;
5279f8fd 1627 error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
247527db 1628 }
59e6d833 1629 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd
BP
1630
1631 return error;
72865317
BP
1632}
1633
1634static int
4e022ec0 1635dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
72865317
BP
1636{
1637 struct dp_netdev *dp = get_dp_netdev(dpif);
5279f8fd
BP
1638 int error;
1639
59e6d833 1640 ovs_mutex_lock(&dp->port_mutex);
c40b890f
BP
1641 if (port_no == ODPP_LOCAL) {
1642 error = EINVAL;
1643 } else {
1644 struct dp_netdev_port *port;
1645
1646 error = get_port_by_number(dp, port_no, &port);
1647 if (!error) {
1648 do_del_port(dp, port);
1649 }
1650 }
59e6d833 1651 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd
BP
1652
1653 return error;
72865317
BP
1654}
1655
1656static bool
4e022ec0 1657is_valid_port_number(odp_port_t port_no)
72865317 1658{
ff073a71
BP
1659 return port_no != ODPP_NONE;
1660}
1661
1662static struct dp_netdev_port *
1663dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
e9985d6a 1664 OVS_REQUIRES(dp->port_mutex)
ff073a71
BP
1665{
1666 struct dp_netdev_port *port;
1667
e9985d6a 1668 HMAP_FOR_EACH_WITH_HASH (port, node, hash_port_no(port_no), &dp->ports) {
35303d71 1669 if (port->port_no == port_no) {
ff073a71
BP
1670 return port;
1671 }
1672 }
1673 return NULL;
72865317
BP
1674}
1675
1676static int
1677get_port_by_number(struct dp_netdev *dp,
4e022ec0 1678 odp_port_t port_no, struct dp_netdev_port **portp)
e9985d6a 1679 OVS_REQUIRES(dp->port_mutex)
72865317
BP
1680{
1681 if (!is_valid_port_number(port_no)) {
1682 *portp = NULL;
1683 return EINVAL;
1684 } else {
ff073a71 1685 *portp = dp_netdev_lookup_port(dp, port_no);
0f6a066f 1686 return *portp ? 0 : ENODEV;
72865317
BP
1687 }
1688}
1689
b284085e 1690static void
62453dad 1691port_destroy(struct dp_netdev_port *port)
b284085e 1692{
62453dad
DDP
1693 if (!port) {
1694 return;
b284085e 1695 }
b284085e 1696
62453dad
DDP
1697 netdev_close(port->netdev);
1698 netdev_restore_flags(port->sf);
accf8626 1699
62453dad 1700 for (unsigned i = 0; i < port->n_rxq; i++) {
947dc567 1701 netdev_rxq_close(port->rxqs[i].rx);
b284085e 1702 }
324c8374 1703 ovs_mutex_destroy(&port->txq_used_mutex);
3eb67853 1704 free(port->rxq_affinity_list);
324c8374 1705 free(port->txq_used);
3eb67853 1706 free(port->rxqs);
62453dad
DDP
1707 free(port->type);
1708 free(port);
b284085e
PS
1709}
1710
72865317
BP
1711static int
1712get_port_by_name(struct dp_netdev *dp,
1713 const char *devname, struct dp_netdev_port **portp)
59e6d833 1714 OVS_REQUIRES(dp->port_mutex)
72865317
BP
1715{
1716 struct dp_netdev_port *port;
1717
e9985d6a 1718 HMAP_FOR_EACH (port, node, &dp->ports) {
3efb6063 1719 if (!strcmp(netdev_get_name(port->netdev), devname)) {
72865317
BP
1720 *portp = port;
1721 return 0;
1722 }
1723 }
0f6a066f
DDP
1724
1725 /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non
1726 * existing port. */
1727 return ENODEV;
72865317
BP
1728}
1729
b9584f21 1730/* Returns 'true' if there is a port with pmd netdev. */
65f13b50 1731static bool
b9584f21 1732has_pmd_port(struct dp_netdev *dp)
e9985d6a 1733 OVS_REQUIRES(dp->port_mutex)
65f13b50
AW
1734{
1735 struct dp_netdev_port *port;
1736
e9985d6a 1737 HMAP_FOR_EACH (port, node, &dp->ports) {
5dd57e80 1738 if (netdev_is_pmd(port->netdev)) {
b9584f21 1739 return true;
65f13b50
AW
1740 }
1741 }
1742
1743 return false;
1744}
1745
c40b890f
BP
1746static void
1747do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
59e6d833 1748 OVS_REQUIRES(dp->port_mutex)
72865317 1749{
e9985d6a 1750 hmap_remove(&dp->ports, &port->node);
d33ed218 1751 seq_change(dp->port_seq);
d0cca6c3 1752
e32971b8 1753 reconfigure_datapath(dp);
72865317 1754
62453dad 1755 port_destroy(port);
72865317
BP
1756}
1757
1758static void
4c738a8d
BP
1759answer_port_query(const struct dp_netdev_port *port,
1760 struct dpif_port *dpif_port)
72865317 1761{
3efb6063 1762 dpif_port->name = xstrdup(netdev_get_name(port->netdev));
0cbfe35d 1763 dpif_port->type = xstrdup(port->type);
35303d71 1764 dpif_port->port_no = port->port_no;
72865317
BP
1765}
1766
1767static int
4e022ec0 1768dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
4c738a8d 1769 struct dpif_port *dpif_port)
72865317
BP
1770{
1771 struct dp_netdev *dp = get_dp_netdev(dpif);
1772 struct dp_netdev_port *port;
1773 int error;
1774
e9985d6a 1775 ovs_mutex_lock(&dp->port_mutex);
72865317 1776 error = get_port_by_number(dp, port_no, &port);
4afba28d 1777 if (!error && dpif_port) {
4c738a8d 1778 answer_port_query(port, dpif_port);
72865317 1779 }
e9985d6a 1780 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd 1781
72865317
BP
1782 return error;
1783}
1784
1785static int
1786dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
4c738a8d 1787 struct dpif_port *dpif_port)
72865317
BP
1788{
1789 struct dp_netdev *dp = get_dp_netdev(dpif);
1790 struct dp_netdev_port *port;
1791 int error;
1792
59e6d833 1793 ovs_mutex_lock(&dp->port_mutex);
72865317 1794 error = get_port_by_name(dp, devname, &port);
4afba28d 1795 if (!error && dpif_port) {
4c738a8d 1796 answer_port_query(port, dpif_port);
72865317 1797 }
59e6d833 1798 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd 1799
72865317
BP
1800 return error;
1801}
1802
61e7deb1
BP
1803static void
1804dp_netdev_flow_free(struct dp_netdev_flow *flow)
1805{
61e7deb1 1806 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow));
61e7deb1
BP
1807 free(flow);
1808}
1809
ed79f89a
DDP
1810static void dp_netdev_flow_unref(struct dp_netdev_flow *flow)
1811{
1812 if (ovs_refcount_unref_relaxed(&flow->ref_cnt) == 1) {
1813 ovsrcu_postpone(dp_netdev_flow_free, flow);
1814 }
1815}
1816
70e5ed6f
JS
1817static uint32_t
1818dp_netdev_flow_hash(const ovs_u128 *ufid)
1819{
1820 return ufid->u32[0];
1821}
1822
3453b4d6
JS
1823static inline struct dpcls *
1824dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread *pmd,
1825 odp_port_t in_port)
1826{
1827 struct dpcls *cls;
1828 uint32_t hash = hash_port_no(in_port);
1829 CMAP_FOR_EACH_WITH_HASH (cls, node, hash, &pmd->classifiers) {
1830 if (cls->in_port == in_port) {
1831 /* Port classifier exists already */
1832 return cls;
1833 }
1834 }
1835 return NULL;
1836}
1837
1838static inline struct dpcls *
1839dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread *pmd,
1840 odp_port_t in_port)
1841 OVS_REQUIRES(pmd->flow_mutex)
1842{
1843 struct dpcls *cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
1844 uint32_t hash = hash_port_no(in_port);
1845
1846 if (!cls) {
1847 /* Create new classifier for in_port */
1848 cls = xmalloc(sizeof(*cls));
1849 dpcls_init(cls);
1850 cls->in_port = in_port;
1851 cmap_insert(&pmd->classifiers, &cls->node, hash);
1852 VLOG_DBG("Creating dpcls %p for in_port %d", cls, in_port);
1853 }
1854 return cls;
1855}
1856
72865317 1857static void
1c1e46ed
AW
1858dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd,
1859 struct dp_netdev_flow *flow)
1860 OVS_REQUIRES(pmd->flow_mutex)
72865317 1861{
9f361d6b 1862 struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node);
3453b4d6
JS
1863 struct dpcls *cls;
1864 odp_port_t in_port = flow->flow.in_port.odp_port;
2c0ea78f 1865
3453b4d6
JS
1866 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
1867 ovs_assert(cls != NULL);
1868 dpcls_remove(cls, &flow->cr);
1c1e46ed 1869 cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid));
9bbf1c3d 1870 flow->dead = true;
ed79f89a
DDP
1871
1872 dp_netdev_flow_unref(flow);
72865317
BP
1873}
1874
1875static void
1c1e46ed 1876dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd)
72865317 1877{
78c8df12 1878 struct dp_netdev_flow *netdev_flow;
72865317 1879
1c1e46ed
AW
1880 ovs_mutex_lock(&pmd->flow_mutex);
1881 CMAP_FOR_EACH (netdev_flow, node, &pmd->flow_table) {
1882 dp_netdev_pmd_remove_flow(pmd, netdev_flow);
72865317 1883 }
1c1e46ed 1884 ovs_mutex_unlock(&pmd->flow_mutex);
72865317
BP
1885}
1886
1887static int
1888dpif_netdev_flow_flush(struct dpif *dpif)
1889{
1890 struct dp_netdev *dp = get_dp_netdev(dpif);
1c1e46ed
AW
1891 struct dp_netdev_pmd_thread *pmd;
1892
1893 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
1894 dp_netdev_pmd_flow_flush(pmd);
1895 }
5279f8fd 1896
72865317
BP
1897 return 0;
1898}
1899
b0ec0f27 1900struct dp_netdev_port_state {
e9985d6a 1901 struct hmap_position position;
4c738a8d 1902 char *name;
b0ec0f27
BP
1903};
1904
1905static int
1906dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
1907{
1908 *statep = xzalloc(sizeof(struct dp_netdev_port_state));
1909 return 0;
1910}
1911
72865317 1912static int
b0ec0f27 1913dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
4c738a8d 1914 struct dpif_port *dpif_port)
72865317 1915{
b0ec0f27 1916 struct dp_netdev_port_state *state = state_;
72865317 1917 struct dp_netdev *dp = get_dp_netdev(dpif);
e9985d6a 1918 struct hmap_node *node;
ff073a71 1919 int retval;
72865317 1920
e9985d6a
DDP
1921 ovs_mutex_lock(&dp->port_mutex);
1922 node = hmap_at_position(&dp->ports, &state->position);
ff073a71
BP
1923 if (node) {
1924 struct dp_netdev_port *port;
5279f8fd 1925
ff073a71
BP
1926 port = CONTAINER_OF(node, struct dp_netdev_port, node);
1927
1928 free(state->name);
1929 state->name = xstrdup(netdev_get_name(port->netdev));
1930 dpif_port->name = state->name;
1931 dpif_port->type = port->type;
35303d71 1932 dpif_port->port_no = port->port_no;
ff073a71
BP
1933
1934 retval = 0;
1935 } else {
1936 retval = EOF;
72865317 1937 }
e9985d6a 1938 ovs_mutex_unlock(&dp->port_mutex);
5279f8fd 1939
ff073a71 1940 return retval;
b0ec0f27
BP
1941}
1942
1943static int
4c738a8d 1944dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
b0ec0f27 1945{
4c738a8d
BP
1946 struct dp_netdev_port_state *state = state_;
1947 free(state->name);
b0ec0f27
BP
1948 free(state);
1949 return 0;
72865317
BP
1950}
1951
1952static int
67a4917b 1953dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
72865317
BP
1954{
1955 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
d33ed218 1956 uint64_t new_port_seq;
5279f8fd
BP
1957 int error;
1958
d33ed218
BP
1959 new_port_seq = seq_read(dpif->dp->port_seq);
1960 if (dpif->last_port_seq != new_port_seq) {
1961 dpif->last_port_seq = new_port_seq;
5279f8fd 1962 error = ENOBUFS;
72865317 1963 } else {
5279f8fd 1964 error = EAGAIN;
72865317 1965 }
5279f8fd
BP
1966
1967 return error;
72865317
BP
1968}
1969
1970static void
1971dpif_netdev_port_poll_wait(const struct dpif *dpif_)
1972{
1973 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
5279f8fd 1974
d33ed218 1975 seq_wait(dpif->dp->port_seq, dpif->last_port_seq);
8a4e3a85
BP
1976}
1977
1978static struct dp_netdev_flow *
0de8783a 1979dp_netdev_flow_cast(const struct dpcls_rule *cr)
8a4e3a85
BP
1980{
1981 return cr ? CONTAINER_OF(cr, struct dp_netdev_flow, cr) : NULL;
72865317
BP
1982}
1983
9bbf1c3d
DDP
1984static bool dp_netdev_flow_ref(struct dp_netdev_flow *flow)
1985{
1986 return ovs_refcount_try_ref_rcu(&flow->ref_cnt);
1987}
1988
79df317f
DDP
1989/* netdev_flow_key utilities.
1990 *
1991 * netdev_flow_key is basically a miniflow. We use these functions
1992 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1993 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1994 *
1995 * - Since we are dealing exclusively with miniflows created by
1996 * miniflow_extract(), if the map is different the miniflow is different.
1997 * Therefore we can be faster by comparing the map and the miniflow in a
1998 * single memcmp().
5fcff47b 1999 * - These functions can be inlined by the compiler. */
79df317f 2000
361d808d 2001/* Given the number of bits set in miniflow's maps, returns the size of the
caeb4906 2002 * 'netdev_flow_key.mf' */
361d808d
JR
2003static inline size_t
2004netdev_flow_key_size(size_t flow_u64s)
79df317f 2005{
361d808d 2006 return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s);
79df317f
DDP
2007}
2008
79df317f
DDP
2009static inline bool
2010netdev_flow_key_equal(const struct netdev_flow_key *a,
0de8783a
JR
2011 const struct netdev_flow_key *b)
2012{
caeb4906
JR
2013 /* 'b->len' may be not set yet. */
2014 return a->hash == b->hash && !memcmp(&a->mf, &b->mf, a->len);
0de8783a
JR
2015}
2016
2017/* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
d79a39fe 2018 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
0de8783a
JR
2019 * generated by miniflow_extract. */
2020static inline bool
2021netdev_flow_key_equal_mf(const struct netdev_flow_key *key,
2022 const struct miniflow *mf)
79df317f 2023{
caeb4906 2024 return !memcmp(&key->mf, mf, key->len);
79df317f
DDP
2025}
2026
2027static inline void
2028netdev_flow_key_clone(struct netdev_flow_key *dst,
0de8783a
JR
2029 const struct netdev_flow_key *src)
2030{
caeb4906
JR
2031 memcpy(dst, src,
2032 offsetof(struct netdev_flow_key, mf) + src->len);
0de8783a
JR
2033}
2034
0de8783a
JR
2035/* Initialize a netdev_flow_key 'mask' from 'match'. */
2036static inline void
2037netdev_flow_mask_init(struct netdev_flow_key *mask,
2038 const struct match *match)
2039{
09b0fa9c 2040 uint64_t *dst = miniflow_values(&mask->mf);
5fcff47b 2041 struct flowmap fmap;
0de8783a 2042 uint32_t hash = 0;
5fcff47b 2043 size_t idx;
0de8783a
JR
2044
2045 /* Only check masks that make sense for the flow. */
5fcff47b
JR
2046 flow_wc_map(&match->flow, &fmap);
2047 flowmap_init(&mask->mf.map);
0de8783a 2048
5fcff47b
JR
2049 FLOWMAP_FOR_EACH_INDEX(idx, fmap) {
2050 uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx);
0de8783a 2051
5fcff47b
JR
2052 if (mask_u64) {
2053 flowmap_set(&mask->mf.map, idx, 1);
2054 *dst++ = mask_u64;
2055 hash = hash_add64(hash, mask_u64);
0de8783a 2056 }
0de8783a
JR
2057 }
2058
5fcff47b 2059 map_t map;
0de8783a 2060
5fcff47b
JR
2061 FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) {
2062 hash = hash_add64(hash, map);
2063 }
0de8783a 2064
5fcff47b 2065 size_t n = dst - miniflow_get_values(&mask->mf);
0de8783a 2066
d70e8c28 2067 mask->hash = hash_finish(hash, n * 8);
0de8783a
JR
2068 mask->len = netdev_flow_key_size(n);
2069}
2070
361d808d 2071/* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
0de8783a
JR
2072static inline void
2073netdev_flow_key_init_masked(struct netdev_flow_key *dst,
2074 const struct flow *flow,
2075 const struct netdev_flow_key *mask)
79df317f 2076{
09b0fa9c
JR
2077 uint64_t *dst_u64 = miniflow_values(&dst->mf);
2078 const uint64_t *mask_u64 = miniflow_get_values(&mask->mf);
0de8783a 2079 uint32_t hash = 0;
d70e8c28 2080 uint64_t value;
0de8783a
JR
2081
2082 dst->len = mask->len;
361d808d 2083 dst->mf = mask->mf; /* Copy maps. */
0de8783a 2084
5fcff47b 2085 FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) {
d70e8c28
JR
2086 *dst_u64 = value & *mask_u64++;
2087 hash = hash_add64(hash, *dst_u64++);
0de8783a 2088 }
09b0fa9c
JR
2089 dst->hash = hash_finish(hash,
2090 (dst_u64 - miniflow_get_values(&dst->mf)) * 8);
0de8783a
JR
2091}
2092
5fcff47b
JR
2093/* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
2094#define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
2095 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
0de8783a
JR
2096
2097/* Returns a hash value for the bits of 'key' where there are 1-bits in
2098 * 'mask'. */
2099static inline uint32_t
2100netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key,
2101 const struct netdev_flow_key *mask)
2102{
09b0fa9c 2103 const uint64_t *p = miniflow_get_values(&mask->mf);
0de8783a 2104 uint32_t hash = 0;
5fcff47b 2105 uint64_t value;
0de8783a 2106
5fcff47b
JR
2107 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) {
2108 hash = hash_add64(hash, value & *p++);
0de8783a
JR
2109 }
2110
09b0fa9c 2111 return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8);
79df317f
DDP
2112}
2113
9bbf1c3d
DDP
2114static inline bool
2115emc_entry_alive(struct emc_entry *ce)
2116{
2117 return ce->flow && !ce->flow->dead;
2118}
2119
2120static void
2121emc_clear_entry(struct emc_entry *ce)
2122{
2123 if (ce->flow) {
2124 dp_netdev_flow_unref(ce->flow);
2125 ce->flow = NULL;
2126 }
2127}
2128
2129static inline void
2130emc_change_entry(struct emc_entry *ce, struct dp_netdev_flow *flow,
0de8783a 2131 const struct netdev_flow_key *key)
9bbf1c3d
DDP
2132{
2133 if (ce->flow != flow) {
2134 if (ce->flow) {
2135 dp_netdev_flow_unref(ce->flow);
2136 }
2137
2138 if (dp_netdev_flow_ref(flow)) {
2139 ce->flow = flow;
2140 } else {
2141 ce->flow = NULL;
2142 }
2143 }
0de8783a
JR
2144 if (key) {
2145 netdev_flow_key_clone(&ce->key, key);
9bbf1c3d
DDP
2146 }
2147}
2148
2149static inline void
0de8783a 2150emc_insert(struct emc_cache *cache, const struct netdev_flow_key *key,
9bbf1c3d
DDP
2151 struct dp_netdev_flow *flow)
2152{
2153 struct emc_entry *to_be_replaced = NULL;
2154 struct emc_entry *current_entry;
2155
0de8783a
JR
2156 EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) {
2157 if (netdev_flow_key_equal(&current_entry->key, key)) {
9bbf1c3d 2158 /* We found the entry with the 'mf' miniflow */
0de8783a 2159 emc_change_entry(current_entry, flow, NULL);
9bbf1c3d
DDP
2160 return;
2161 }
2162
2163 /* Replacement policy: put the flow in an empty (not alive) entry, or
2164 * in the first entry where it can be */
2165 if (!to_be_replaced
2166 || (emc_entry_alive(to_be_replaced)
2167 && !emc_entry_alive(current_entry))
0de8783a 2168 || current_entry->key.hash < to_be_replaced->key.hash) {
9bbf1c3d
DDP
2169 to_be_replaced = current_entry;
2170 }
2171 }
2172 /* We didn't find the miniflow in the cache.
2173 * The 'to_be_replaced' entry is where the new flow will be stored */
2174
0de8783a 2175 emc_change_entry(to_be_replaced, flow, key);
9bbf1c3d
DDP
2176}
2177
4c30b246
CL
2178static inline void
2179emc_probabilistic_insert(struct dp_netdev_pmd_thread *pmd,
2180 const struct netdev_flow_key *key,
2181 struct dp_netdev_flow *flow)
2182{
2183 /* Insert an entry into the EMC based on probability value 'min'. By
2184 * default the value is UINT32_MAX / 100 which yields an insertion
2185 * probability of 1/100 ie. 1% */
2186
2187 uint32_t min;
2188 atomic_read_relaxed(&pmd->dp->emc_insert_min, &min);
2189
656238ee 2190 if (min && random_uint32() <= min) {
4c30b246
CL
2191 emc_insert(&pmd->flow_cache, key, flow);
2192 }
2193}
2194
9bbf1c3d 2195static inline struct dp_netdev_flow *
0de8783a 2196emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key)
9bbf1c3d
DDP
2197{
2198 struct emc_entry *current_entry;
2199
0de8783a
JR
2200 EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) {
2201 if (current_entry->key.hash == key->hash
2202 && emc_entry_alive(current_entry)
2203 && netdev_flow_key_equal_mf(&current_entry->key, &key->mf)) {
9bbf1c3d 2204
0de8783a 2205 /* We found the entry with the 'key->mf' miniflow */
9bbf1c3d
DDP
2206 return current_entry->flow;
2207 }
2208 }
2209
2210 return NULL;
2211}
2212
72865317 2213static struct dp_netdev_flow *
3453b4d6
JS
2214dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread *pmd,
2215 const struct netdev_flow_key *key,
2216 int *lookup_num_p)
2c0ea78f 2217{
3453b4d6 2218 struct dpcls *cls;
0de8783a 2219 struct dpcls_rule *rule;
3453b4d6
JS
2220 odp_port_t in_port = u32_to_odp(MINIFLOW_GET_U32(&key->mf, in_port));
2221 struct dp_netdev_flow *netdev_flow = NULL;
2c0ea78f 2222
3453b4d6
JS
2223 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
2224 if (OVS_LIKELY(cls)) {
2225 dpcls_lookup(cls, key, &rule, 1, lookup_num_p);
2226 netdev_flow = dp_netdev_flow_cast(rule);
2227 }
8a4e3a85 2228 return netdev_flow;
2c0ea78f
GS
2229}
2230
2231static struct dp_netdev_flow *
1c1e46ed
AW
2232dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd,
2233 const ovs_u128 *ufidp, const struct nlattr *key,
2234 size_t key_len)
72865317 2235{
1763b4b8 2236 struct dp_netdev_flow *netdev_flow;
70e5ed6f
JS
2237 struct flow flow;
2238 ovs_u128 ufid;
2239
2240 /* If a UFID is not provided, determine one based on the key. */
2241 if (!ufidp && key && key_len
f0fb825a 2242 && !dpif_netdev_flow_from_nlattrs(key, key_len, &flow, false)) {
1c1e46ed 2243 dpif_flow_hash(pmd->dp->dpif, &flow, sizeof flow, &ufid);
70e5ed6f
JS
2244 ufidp = &ufid;
2245 }
72865317 2246
70e5ed6f
JS
2247 if (ufidp) {
2248 CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp),
1c1e46ed 2249 &pmd->flow_table) {
2ff8484b 2250 if (ovs_u128_equals(netdev_flow->ufid, *ufidp)) {
70e5ed6f
JS
2251 return netdev_flow;
2252 }
72865317
BP
2253 }
2254 }
8a4e3a85 2255
72865317
BP
2256 return NULL;
2257}
2258
2259static void
eb94da30 2260get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
1763b4b8 2261 struct dpif_flow_stats *stats)
feebdea2 2262{
eb94da30
DDP
2263 struct dp_netdev_flow *netdev_flow;
2264 unsigned long long n;
2265 long long used;
2266 uint16_t flags;
2267
2268 netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_);
2269
2270 atomic_read_relaxed(&netdev_flow->stats.packet_count, &n);
2271 stats->n_packets = n;
2272 atomic_read_relaxed(&netdev_flow->stats.byte_count, &n);
2273 stats->n_bytes = n;
2274 atomic_read_relaxed(&netdev_flow->stats.used, &used);
2275 stats->used = used;
2276 atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
2277 stats->tcp_flags = flags;
72865317
BP
2278}
2279
7af12bd7
JS
2280/* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
2281 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
2282 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
2283 * protect them. */
6fe09f8c 2284static void
70e5ed6f 2285dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
7af12bd7 2286 struct ofpbuf *key_buf, struct ofpbuf *mask_buf,
64bb477f 2287 struct dpif_flow *flow, bool terse)
6fe09f8c 2288{
64bb477f
JS
2289 if (terse) {
2290 memset(flow, 0, sizeof *flow);
2291 } else {
2292 struct flow_wildcards wc;
2293 struct dp_netdev_actions *actions;
2294 size_t offset;
5262eea1
JG
2295 struct odp_flow_key_parms odp_parms = {
2296 .flow = &netdev_flow->flow,
2297 .mask = &wc.masks,
2494ccd7 2298 .support = dp_netdev_support,
5262eea1 2299 };
64bb477f
JS
2300
2301 miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks);
f4b835bb
JR
2302 /* in_port is exact matched, but we have left it out from the mask for
2303 * optimnization reasons. Add in_port back to the mask. */
2304 wc.masks.in_port.odp_port = ODPP_NONE;
64bb477f
JS
2305
2306 /* Key */
6fd6ed71 2307 offset = key_buf->size;
64bb477f 2308 flow->key = ofpbuf_tail(key_buf);
5262eea1 2309 odp_flow_key_from_flow(&odp_parms, key_buf);
6fd6ed71 2310 flow->key_len = key_buf->size - offset;
64bb477f
JS
2311
2312 /* Mask */
6fd6ed71 2313 offset = mask_buf->size;
64bb477f 2314 flow->mask = ofpbuf_tail(mask_buf);
ec1f6f32 2315 odp_parms.key_buf = key_buf;
5262eea1 2316 odp_flow_key_from_mask(&odp_parms, mask_buf);
6fd6ed71 2317 flow->mask_len = mask_buf->size - offset;
64bb477f
JS
2318
2319 /* Actions */
2320 actions = dp_netdev_flow_get_actions(netdev_flow);
2321 flow->actions = actions->actions;
2322 flow->actions_len = actions->size;
2323 }
6fe09f8c 2324
70e5ed6f
JS
2325 flow->ufid = netdev_flow->ufid;
2326 flow->ufid_present = true;
1c1e46ed 2327 flow->pmd_id = netdev_flow->pmd_id;
6fe09f8c
JS
2328 get_dpif_flow_stats(netdev_flow, &flow->stats);
2329}
2330
36956a7d 2331static int
8c301900
JR
2332dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len,
2333 const struct nlattr *mask_key,
2334 uint32_t mask_key_len, const struct flow *flow,
f0fb825a 2335 struct flow_wildcards *wc, bool probe)
8c301900 2336{
ca8d3442
DDP
2337 enum odp_key_fitness fitness;
2338
8d8ab6c2 2339 fitness = odp_flow_key_to_mask(mask_key, mask_key_len, wc, flow);
ca8d3442 2340 if (fitness) {
f0fb825a
EG
2341 if (!probe) {
2342 /* This should not happen: it indicates that
2343 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
2344 * disagree on the acceptable form of a mask. Log the problem
2345 * as an error, with enough details to enable debugging. */
2346 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2347
2348 if (!VLOG_DROP_ERR(&rl)) {
2349 struct ds s;
2350
2351 ds_init(&s);
2352 odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s,
2353 true);
2354 VLOG_ERR("internal error parsing flow mask %s (%s)",
2355 ds_cstr(&s), odp_key_fitness_to_string(fitness));
2356 ds_destroy(&s);
2357 }
8c301900 2358 }
ca8d3442
DDP
2359
2360 return EINVAL;
8c301900
JR
2361 }
2362
2363 return 0;
2364}
2365
2366static int
2367dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len,
f0fb825a 2368 struct flow *flow, bool probe)
36956a7d 2369{
8d8ab6c2 2370 if (odp_flow_key_to_flow(key, key_len, flow)) {
f0fb825a
EG
2371 if (!probe) {
2372 /* This should not happen: it indicates that
2373 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
2374 * the acceptable form of a flow. Log the problem as an error,
2375 * with enough details to enable debugging. */
2376 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2377
2378 if (!VLOG_DROP_ERR(&rl)) {
2379 struct ds s;
2380
2381 ds_init(&s);
2382 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
2383 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
2384 ds_destroy(&s);
2385 }
36956a7d
BP
2386 }
2387
2388 return EINVAL;
2389 }
2390
5cf3edb3 2391 if (flow->ct_state & DP_NETDEV_CS_UNSUPPORTED_MASK) {
07659514
JS
2392 return EINVAL;
2393 }
2394
36956a7d
BP
2395 return 0;
2396}
2397
72865317 2398static int
6fe09f8c 2399dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get)
72865317
BP
2400{
2401 struct dp_netdev *dp = get_dp_netdev(dpif);
1763b4b8 2402 struct dp_netdev_flow *netdev_flow;
1c1e46ed 2403 struct dp_netdev_pmd_thread *pmd;
c673049c
IM
2404 struct hmapx to_find = HMAPX_INITIALIZER(&to_find);
2405 struct hmapx_node *node;
2406 int error = EINVAL;
2407
2408 if (get->pmd_id == PMD_ID_NULL) {
2409 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2410 if (dp_netdev_pmd_try_ref(pmd) && !hmapx_add(&to_find, pmd)) {
2411 dp_netdev_pmd_unref(pmd);
2412 }
2413 }
2414 } else {
2415 pmd = dp_netdev_get_pmd(dp, get->pmd_id);
2416 if (!pmd) {
2417 goto out;
2418 }
2419 hmapx_add(&to_find, pmd);
1c1e46ed
AW
2420 }
2421
c673049c
IM
2422 if (!hmapx_count(&to_find)) {
2423 goto out;
72865317 2424 }
1c1e46ed 2425
c673049c
IM
2426 HMAPX_FOR_EACH (node, &to_find) {
2427 pmd = (struct dp_netdev_pmd_thread *) node->data;
2428 netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key,
2429 get->key_len);
2430 if (netdev_flow) {
2431 dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer,
2432 get->flow, false);
2433 error = 0;
2434 break;
2435 } else {
2436 error = ENOENT;
2437 }
2438 }
bc4a05c6 2439
c673049c
IM
2440 HMAPX_FOR_EACH (node, &to_find) {
2441 pmd = (struct dp_netdev_pmd_thread *) node->data;
2442 dp_netdev_pmd_unref(pmd);
2443 }
2444out:
2445 hmapx_destroy(&to_find);
5279f8fd 2446 return error;
72865317
BP
2447}
2448
0de8783a 2449static struct dp_netdev_flow *
1c1e46ed
AW
2450dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
2451 struct match *match, const ovs_u128 *ufid,
ae2ceebd 2452 const struct nlattr *actions, size_t actions_len)
1c1e46ed 2453 OVS_REQUIRES(pmd->flow_mutex)
72865317 2454{
0de8783a
JR
2455 struct dp_netdev_flow *flow;
2456 struct netdev_flow_key mask;
3453b4d6 2457 struct dpcls *cls;
f4b835bb
JR
2458
2459 /* Make sure in_port is exact matched before we read it. */
2460 ovs_assert(match->wc.masks.in_port.odp_port == ODPP_NONE);
3453b4d6 2461 odp_port_t in_port = match->flow.in_port.odp_port;
ed79f89a 2462
f4b835bb
JR
2463 /* As we select the dpcls based on the port number, each netdev flow
2464 * belonging to the same dpcls will have the same odp_port value.
2465 * For performance reasons we wildcard odp_port here in the mask. In the
2466 * typical case dp_hash is also wildcarded, and the resulting 8-byte
2467 * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and
2468 * will not be part of the subtable mask.
2469 * This will speed up the hash computation during dpcls_lookup() because
2470 * there is one less call to hash_add64() in this case. */
2471 match->wc.masks.in_port.odp_port = 0;
0de8783a 2472 netdev_flow_mask_init(&mask, match);
f4b835bb
JR
2473 match->wc.masks.in_port.odp_port = ODPP_NONE;
2474
0de8783a 2475 /* Make sure wc does not have metadata. */
5fcff47b
JR
2476 ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata)
2477 && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs));
679ba04c 2478
0de8783a 2479 /* Do not allocate extra space. */
caeb4906 2480 flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
1c1e46ed 2481 memset(&flow->stats, 0, sizeof flow->stats);
0de8783a 2482 flow->dead = false;
11e5cf1f 2483 flow->batch = NULL;
bd5131ba 2484 *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id;
0de8783a 2485 *CONST_CAST(struct flow *, &flow->flow) = match->flow;
70e5ed6f 2486 *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid;
0de8783a 2487 ovs_refcount_init(&flow->ref_cnt);
0de8783a 2488 ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len));
2c0ea78f 2489
0de8783a 2490 netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask);
3453b4d6 2491
f4b835bb 2492 /* Select dpcls for in_port. Relies on in_port to be exact match. */
3453b4d6
JS
2493 cls = dp_netdev_pmd_find_dpcls(pmd, in_port);
2494 dpcls_insert(cls, &flow->cr, &mask);
72865317 2495
4c75aaab
EJ
2496 cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node),
2497 dp_netdev_flow_hash(&flow->ufid));
2498
beb75a40 2499 if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl)))) {
623540e4 2500 struct ds ds = DS_EMPTY_INITIALIZER;
9044f2c1
JG
2501 struct ofpbuf key_buf, mask_buf;
2502 struct odp_flow_key_parms odp_parms = {
2503 .flow = &match->flow,
2504 .mask = &match->wc.masks,
2505 .support = dp_netdev_support,
2506 };
2507
2508 ofpbuf_init(&key_buf, 0);
2509 ofpbuf_init(&mask_buf, 0);
623540e4 2510
9044f2c1
JG
2511 odp_flow_key_from_flow(&odp_parms, &key_buf);
2512 odp_parms.key_buf = &key_buf;
2513 odp_flow_key_from_mask(&odp_parms, &mask_buf);
0de8783a 2514
623540e4 2515 ds_put_cstr(&ds, "flow_add: ");
70e5ed6f
JS
2516 odp_format_ufid(ufid, &ds);
2517 ds_put_cstr(&ds, " ");
9044f2c1
JG
2518 odp_flow_format(key_buf.data, key_buf.size,
2519 mask_buf.data, mask_buf.size,
2520 NULL, &ds, false);
623540e4 2521 ds_put_cstr(&ds, ", actions:");
0722f341 2522 format_odp_actions(&ds, actions, actions_len, NULL);
623540e4 2523
beb75a40 2524 VLOG_DBG("%s", ds_cstr(&ds));
623540e4 2525
9044f2c1
JG
2526 ofpbuf_uninit(&key_buf);
2527 ofpbuf_uninit(&mask_buf);
beb75a40
JS
2528
2529 /* Add a printout of the actual match installed. */
2530 struct match m;
2531 ds_clear(&ds);
2532 ds_put_cstr(&ds, "flow match: ");
2533 miniflow_expand(&flow->cr.flow.mf, &m.flow);
2534 miniflow_expand(&flow->cr.mask->mf, &m.wc.masks);
b2f4b622 2535 memset(&m.tun_md, 0, sizeof m.tun_md);
beb75a40
JS
2536 match_format(&m, NULL, &ds, OFP_DEFAULT_PRIORITY);
2537
2538 VLOG_DBG("%s", ds_cstr(&ds));
2539
623540e4
EJ
2540 ds_destroy(&ds);
2541 }
2542
0de8783a 2543 return flow;
72865317
BP
2544}
2545
72865317 2546static int
f5d317a1
DDP
2547flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
2548 struct netdev_flow_key *key,
2549 struct match *match,
2550 ovs_u128 *ufid,
2551 const struct dpif_flow_put *put,
2552 struct dpif_flow_stats *stats)
72865317 2553{
1763b4b8 2554 struct dp_netdev_flow *netdev_flow;
f5d317a1 2555 int error = 0;
72865317 2556
f5d317a1
DDP
2557 if (stats) {
2558 memset(stats, 0, sizeof *stats);
70e5ed6f
JS
2559 }
2560
1c1e46ed 2561 ovs_mutex_lock(&pmd->flow_mutex);
f5d317a1 2562 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL);
1763b4b8 2563 if (!netdev_flow) {
89625d1e 2564 if (put->flags & DPIF_FP_CREATE) {
1c1e46ed 2565 if (cmap_count(&pmd->flow_table) < MAX_FLOWS) {
f5d317a1 2566 dp_netdev_flow_add(pmd, match, ufid, put->actions,
70e5ed6f 2567 put->actions_len);
0de8783a 2568 error = 0;
72865317 2569 } else {
5279f8fd 2570 error = EFBIG;
72865317
BP
2571 }
2572 } else {
5279f8fd 2573 error = ENOENT;
72865317
BP
2574 }
2575 } else {
beb75a40 2576 if (put->flags & DPIF_FP_MODIFY) {
8a4e3a85
BP
2577 struct dp_netdev_actions *new_actions;
2578 struct dp_netdev_actions *old_actions;
2579
2580 new_actions = dp_netdev_actions_create(put->actions,
2581 put->actions_len);
2582
61e7deb1
BP
2583 old_actions = dp_netdev_flow_get_actions(netdev_flow);
2584 ovsrcu_set(&netdev_flow->actions, new_actions);
679ba04c 2585
f5d317a1
DDP
2586 if (stats) {
2587 get_dpif_flow_stats(netdev_flow, stats);
a84cb64a
BP
2588 }
2589 if (put->flags & DPIF_FP_ZERO_STATS) {
97447f55
DDP
2590 /* XXX: The userspace datapath uses thread local statistics
2591 * (for flows), which should be updated only by the owning
2592 * thread. Since we cannot write on stats memory here,
2593 * we choose not to support this flag. Please note:
2594 * - This feature is currently used only by dpctl commands with
2595 * option --clear.
2596 * - Should the need arise, this operation can be implemented
2597 * by keeping a base value (to be update here) for each
2598 * counter, and subtracting it before outputting the stats */
2599 error = EOPNOTSUPP;
72865317 2600 }
8a4e3a85 2601
61e7deb1 2602 ovsrcu_postpone(dp_netdev_actions_free, old_actions);
2c0ea78f 2603 } else if (put->flags & DPIF_FP_CREATE) {
5279f8fd 2604 error = EEXIST;
2c0ea78f
GS
2605 } else {
2606 /* Overlapping flow. */
2607 error = EINVAL;
72865317
BP
2608 }
2609 }
1c1e46ed 2610 ovs_mutex_unlock(&pmd->flow_mutex);
5279f8fd 2611 return error;
72865317
BP
2612}
2613
72865317 2614static int
f5d317a1 2615dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
72865317
BP
2616{
2617 struct dp_netdev *dp = get_dp_netdev(dpif);
beb75a40 2618 struct netdev_flow_key key, mask;
1c1e46ed 2619 struct dp_netdev_pmd_thread *pmd;
f5d317a1
DDP
2620 struct match match;
2621 ovs_u128 ufid;
2622 int error;
f0fb825a 2623 bool probe = put->flags & DPIF_FP_PROBE;
72865317 2624
f5d317a1
DDP
2625 if (put->stats) {
2626 memset(put->stats, 0, sizeof *put->stats);
2627 }
f0fb825a
EG
2628 error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow,
2629 probe);
f5d317a1
DDP
2630 if (error) {
2631 return error;
2632 }
2633 error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len,
2634 put->mask, put->mask_len,
f0fb825a 2635 &match.flow, &match.wc, probe);
f5d317a1
DDP
2636 if (error) {
2637 return error;
1c1e46ed
AW
2638 }
2639
f5d317a1
DDP
2640 if (put->ufid) {
2641 ufid = *put->ufid;
2642 } else {
2643 dpif_flow_hash(dpif, &match.flow, sizeof match.flow, &ufid);
2644 }
2645
2646 /* Must produce a netdev_flow_key for lookup.
beb75a40
JS
2647 * Use the same method as employed to create the key when adding
2648 * the flow to the dplcs to make sure they match. */
2649 netdev_flow_mask_init(&mask, &match);
2650 netdev_flow_key_init_masked(&key, &match.flow, &mask);
f5d317a1
DDP
2651
2652 if (put->pmd_id == PMD_ID_NULL) {
2653 if (cmap_count(&dp->poll_threads) == 0) {
2654 return EINVAL;
2655 }
2656 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2657 struct dpif_flow_stats pmd_stats;
2658 int pmd_error;
2659
2660 pmd_error = flow_put_on_pmd(pmd, &key, &match, &ufid, put,
2661 &pmd_stats);
2662 if (pmd_error) {
2663 error = pmd_error;
2664 } else if (put->stats) {
2665 put->stats->n_packets += pmd_stats.n_packets;
2666 put->stats->n_bytes += pmd_stats.n_bytes;
2667 put->stats->used = MAX(put->stats->used, pmd_stats.used);
2668 put->stats->tcp_flags |= pmd_stats.tcp_flags;
2669 }
2670 }
2671 } else {
2672 pmd = dp_netdev_get_pmd(dp, put->pmd_id);
2673 if (!pmd) {
2674 return EINVAL;
2675 }
2676 error = flow_put_on_pmd(pmd, &key, &match, &ufid, put, put->stats);
2677 dp_netdev_pmd_unref(pmd);
2678 }
2679
2680 return error;
2681}
2682
2683static int
2684flow_del_on_pmd(struct dp_netdev_pmd_thread *pmd,
2685 struct dpif_flow_stats *stats,
2686 const struct dpif_flow_del *del)
2687{
2688 struct dp_netdev_flow *netdev_flow;
2689 int error = 0;
2690
1c1e46ed
AW
2691 ovs_mutex_lock(&pmd->flow_mutex);
2692 netdev_flow = dp_netdev_pmd_find_flow(pmd, del->ufid, del->key,
2693 del->key_len);
1763b4b8 2694 if (netdev_flow) {
f5d317a1
DDP
2695 if (stats) {
2696 get_dpif_flow_stats(netdev_flow, stats);
feebdea2 2697 }
1c1e46ed 2698 dp_netdev_pmd_remove_flow(pmd, netdev_flow);
72865317 2699 } else {
5279f8fd 2700 error = ENOENT;
72865317 2701 }
1c1e46ed 2702 ovs_mutex_unlock(&pmd->flow_mutex);
f5d317a1
DDP
2703
2704 return error;
2705}
2706
2707static int
2708dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
2709{
2710 struct dp_netdev *dp = get_dp_netdev(dpif);
2711 struct dp_netdev_pmd_thread *pmd;
2712 int error = 0;
2713
2714 if (del->stats) {
2715 memset(del->stats, 0, sizeof *del->stats);
2716 }
2717
2718 if (del->pmd_id == PMD_ID_NULL) {
2719 if (cmap_count(&dp->poll_threads) == 0) {
2720 return EINVAL;
2721 }
2722 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
2723 struct dpif_flow_stats pmd_stats;
2724 int pmd_error;
2725
2726 pmd_error = flow_del_on_pmd(pmd, &pmd_stats, del);
2727 if (pmd_error) {
2728 error = pmd_error;
2729 } else if (del->stats) {
2730 del->stats->n_packets += pmd_stats.n_packets;
2731 del->stats->n_bytes += pmd_stats.n_bytes;
2732 del->stats->used = MAX(del->stats->used, pmd_stats.used);
2733 del->stats->tcp_flags |= pmd_stats.tcp_flags;
2734 }
2735 }
2736 } else {
2737 pmd = dp_netdev_get_pmd(dp, del->pmd_id);
2738 if (!pmd) {
2739 return EINVAL;
2740 }
2741 error = flow_del_on_pmd(pmd, del->stats, del);
2742 dp_netdev_pmd_unref(pmd);
2743 }
2744
5279f8fd
BP
2745
2746 return error;
72865317
BP
2747}
2748
ac64794a
BP
2749struct dpif_netdev_flow_dump {
2750 struct dpif_flow_dump up;
1c1e46ed
AW
2751 struct cmap_position poll_thread_pos;
2752 struct cmap_position flow_pos;
2753 struct dp_netdev_pmd_thread *cur_pmd;
d2ad7ef1
JS
2754 int status;
2755 struct ovs_mutex mutex;
e723fd32
JS
2756};
2757
ac64794a
BP
2758static struct dpif_netdev_flow_dump *
2759dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump)
72865317 2760{
ac64794a 2761 return CONTAINER_OF(dump, struct dpif_netdev_flow_dump, up);
e723fd32
JS
2762}
2763
ac64794a 2764static struct dpif_flow_dump *
7e8b7199
PB
2765dpif_netdev_flow_dump_create(const struct dpif *dpif_, bool terse,
2766 char *type OVS_UNUSED)
e723fd32 2767{
ac64794a 2768 struct dpif_netdev_flow_dump *dump;
e723fd32 2769
1c1e46ed 2770 dump = xzalloc(sizeof *dump);
ac64794a 2771 dpif_flow_dump_init(&dump->up, dpif_);
64bb477f 2772 dump->up.terse = terse;
ac64794a
BP
2773 ovs_mutex_init(&dump->mutex);
2774
2775 return &dump->up;
e723fd32
JS
2776}
2777
2778static int
ac64794a 2779dpif_netdev_flow_dump_destroy(struct dpif_flow_dump *dump_)
e723fd32 2780{
ac64794a 2781 struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
e723fd32 2782
ac64794a
BP
2783 ovs_mutex_destroy(&dump->mutex);
2784 free(dump);
704a1e09
BP
2785 return 0;
2786}
2787
ac64794a
BP
2788struct dpif_netdev_flow_dump_thread {
2789 struct dpif_flow_dump_thread up;
2790 struct dpif_netdev_flow_dump *dump;
8bb113da
RW
2791 struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
2792 struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
ac64794a
BP
2793};
2794
2795static struct dpif_netdev_flow_dump_thread *
2796dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
2797{
2798 return CONTAINER_OF(thread, struct dpif_netdev_flow_dump_thread, up);
2799}
2800
2801static struct dpif_flow_dump_thread *
2802dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump *dump_)
2803{
2804 struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
2805 struct dpif_netdev_flow_dump_thread *thread;
2806
2807 thread = xmalloc(sizeof *thread);
2808 dpif_flow_dump_thread_init(&thread->up, &dump->up);
2809 thread->dump = dump;
2810 return &thread->up;
2811}
2812
2813static void
2814dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
2815{
2816 struct dpif_netdev_flow_dump_thread *thread
2817 = dpif_netdev_flow_dump_thread_cast(thread_);
2818
2819 free(thread);
2820}
2821
704a1e09 2822static int
ac64794a 2823dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
8bb113da 2824 struct dpif_flow *flows, int max_flows)
ac64794a
BP
2825{
2826 struct dpif_netdev_flow_dump_thread *thread
2827 = dpif_netdev_flow_dump_thread_cast(thread_);
2828 struct dpif_netdev_flow_dump *dump = thread->dump;
8bb113da 2829 struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH];
8bb113da
RW
2830 int n_flows = 0;
2831 int i;
14608a15 2832
ac64794a 2833 ovs_mutex_lock(&dump->mutex);
8bb113da 2834 if (!dump->status) {
1c1e46ed
AW
2835 struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
2836 struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
2837 struct dp_netdev_pmd_thread *pmd = dump->cur_pmd;
2838 int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
2839
2840 /* First call to dump_next(), extracts the first pmd thread.
2841 * If there is no pmd thread, returns immediately. */
2842 if (!pmd) {
2843 pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos);
2844 if (!pmd) {
2845 ovs_mutex_unlock(&dump->mutex);
2846 return n_flows;
8bb113da 2847
8bb113da 2848 }
d2ad7ef1 2849 }
1c1e46ed
AW
2850
2851 do {
2852 for (n_flows = 0; n_flows < flow_limit; n_flows++) {
2853 struct cmap_node *node;
2854
2855 node = cmap_next_position(&pmd->flow_table, &dump->flow_pos);
2856 if (!node) {
2857 break;
2858 }
2859 netdev_flows[n_flows] = CONTAINER_OF(node,
2860 struct dp_netdev_flow,
2861 node);
2862 }
2863 /* When finishing dumping the current pmd thread, moves to
2864 * the next. */
2865 if (n_flows < flow_limit) {
2866 memset(&dump->flow_pos, 0, sizeof dump->flow_pos);
2867 dp_netdev_pmd_unref(pmd);
2868 pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos);
2869 if (!pmd) {
2870 dump->status = EOF;
2871 break;
2872 }
2873 }
2874 /* Keeps the reference to next caller. */
2875 dump->cur_pmd = pmd;
2876
2877 /* If the current dump is empty, do not exit the loop, since the
2878 * remaining pmds could have flows to be dumped. Just dumps again
2879 * on the new 'pmd'. */
2880 } while (!n_flows);
8a4e3a85 2881 }
ac64794a 2882 ovs_mutex_unlock(&dump->mutex);
ac64794a 2883
8bb113da
RW
2884 for (i = 0; i < n_flows; i++) {
2885 struct odputil_keybuf *maskbuf = &thread->maskbuf[i];
2886 struct odputil_keybuf *keybuf = &thread->keybuf[i];
2887 struct dp_netdev_flow *netdev_flow = netdev_flows[i];
2888 struct dpif_flow *f = &flows[i];
7af12bd7 2889 struct ofpbuf key, mask;
8bb113da 2890
7af12bd7
JS
2891 ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
2892 ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
64bb477f
JS
2893 dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f,
2894 dump->up.terse);
8bb113da 2895 }
feebdea2 2896
8bb113da 2897 return n_flows;
72865317
BP
2898}
2899
2900static int
758c456d 2901dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
65f13b50 2902 OVS_NO_THREAD_SAFETY_ANALYSIS
72865317
BP
2903{
2904 struct dp_netdev *dp = get_dp_netdev(dpif);
65f13b50 2905 struct dp_netdev_pmd_thread *pmd;
1895cc8d 2906 struct dp_packet_batch pp;
72865317 2907
cf62fa4c
PS
2908 if (dp_packet_size(execute->packet) < ETH_HEADER_LEN ||
2909 dp_packet_size(execute->packet) > UINT16_MAX) {
72865317
BP
2910 return EINVAL;
2911 }
2912
65f13b50
AW
2913 /* Tries finding the 'pmd'. If NULL is returned, that means
2914 * the current thread is a non-pmd thread and should use
b19befae 2915 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
65f13b50
AW
2916 pmd = ovsthread_getspecific(dp->per_pmd_key);
2917 if (!pmd) {
b19befae 2918 pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
546e57d4
DDP
2919 if (!pmd) {
2920 return EBUSY;
2921 }
65f13b50
AW
2922 }
2923
05267613
AZ
2924 if (execute->probe) {
2925 /* If this is part of a probe, Drop the packet, since executing
2926 * the action may actually cause spurious packets be sent into
2927 * the network. */
2928 return 0;
2929 }
2930
65f13b50
AW
2931 /* If the current thread is non-pmd thread, acquires
2932 * the 'non_pmd_mutex'. */
2933 if (pmd->core_id == NON_PMD_CORE_ID) {
2934 ovs_mutex_lock(&dp->non_pmd_mutex);
2935 }
1c1e46ed 2936
36d8de17
DDP
2937 /* The action processing expects the RSS hash to be valid, because
2938 * it's always initialized at the beginning of datapath processing.
2939 * In this case, though, 'execute->packet' may not have gone through
2940 * the datapath at all, it may have been generated by the upper layer
2941 * (OpenFlow packet-out, BFD frame, ...). */
2942 if (!dp_packet_rss_valid(execute->packet)) {
2943 dp_packet_set_rss_hash(execute->packet,
2944 flow_hash_5tuple(execute->flow, 0));
2945 }
2946
72c84bc2 2947 dp_packet_batch_init_packet(&pp, execute->packet);
66e4ad8a
DDP
2948 dp_netdev_execute_actions(pmd, &pp, false, execute->flow,
2949 execute->actions, execute->actions_len,
2950 time_msec());
36d8de17 2951
65f13b50
AW
2952 if (pmd->core_id == NON_PMD_CORE_ID) {
2953 ovs_mutex_unlock(&dp->non_pmd_mutex);
e9985d6a 2954 dp_netdev_pmd_unref(pmd);
65f13b50 2955 }
8a4e3a85 2956
758c456d 2957 return 0;
72865317
BP
2958}
2959
1a0c894a
BP
2960static void
2961dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
2962{
2963 size_t i;
2964
2965 for (i = 0; i < n_ops; i++) {
2966 struct dpif_op *op = ops[i];
2967
2968 switch (op->type) {
2969 case DPIF_OP_FLOW_PUT:
2970 op->error = dpif_netdev_flow_put(dpif, &op->u.flow_put);
2971 break;
2972
2973 case DPIF_OP_FLOW_DEL:
2974 op->error = dpif_netdev_flow_del(dpif, &op->u.flow_del);
2975 break;
2976
2977 case DPIF_OP_EXECUTE:
2978 op->error = dpif_netdev_execute(dpif, &op->u.execute);
2979 break;
6fe09f8c
JS
2980
2981 case DPIF_OP_FLOW_GET:
2982 op->error = dpif_netdev_flow_get(dpif, &op->u.flow_get);
2983 break;
1a0c894a
BP
2984 }
2985 }
2986}
2987
d4f6865c
DDP
2988/* Applies datapath configuration from the database. Some of the changes are
2989 * actually applied in dpif_netdev_run(). */
f2eee189 2990static int
d4f6865c 2991dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config)
f2eee189
AW
2992{
2993 struct dp_netdev *dp = get_dp_netdev(dpif);
d4f6865c 2994 const char *cmask = smap_get(other_config, "pmd-cpu-mask");
4c30b246
CL
2995 unsigned long long insert_prob =
2996 smap_get_ullong(other_config, "emc-insert-inv-prob",
2997 DEFAULT_EM_FLOW_INSERT_INV_PROB);
2998 uint32_t insert_min, cur_min;
f2eee189 2999
a6a426d6
IM
3000 if (!nullable_string_is_equal(dp->pmd_cmask, cmask)) {
3001 free(dp->pmd_cmask);
3002 dp->pmd_cmask = nullable_xstrdup(cmask);
3003 dp_netdev_request_reconfigure(dp);
f2eee189
AW
3004 }
3005
4c30b246
CL
3006 atomic_read_relaxed(&dp->emc_insert_min, &cur_min);
3007 if (insert_prob <= UINT32_MAX) {
3008 insert_min = insert_prob == 0 ? 0 : UINT32_MAX / insert_prob;
3009 } else {
3010 insert_min = DEFAULT_EM_FLOW_INSERT_MIN;
3011 insert_prob = DEFAULT_EM_FLOW_INSERT_INV_PROB;
3012 }
3013
3014 if (insert_min != cur_min) {
3015 atomic_store_relaxed(&dp->emc_insert_min, insert_min);
3016 if (insert_min == 0) {
3017 VLOG_INFO("EMC has been disabled");
3018 } else {
3019 VLOG_INFO("EMC insertion probability changed to 1/%llu (~%.2f%%)",
3020 insert_prob, (100 / (float)insert_prob));
3021 }
3022 }
3023
f2eee189
AW
3024 return 0;
3025}
3026
3eb67853
IM
3027/* Parses affinity list and returns result in 'core_ids'. */
3028static int
3029parse_affinity_list(const char *affinity_list, unsigned *core_ids, int n_rxq)
3030{
3031 unsigned i;
3032 char *list, *copy, *key, *value;
3033 int error = 0;
3034
3035 for (i = 0; i < n_rxq; i++) {
51c37a56 3036 core_ids[i] = OVS_CORE_UNSPEC;
3eb67853
IM
3037 }
3038
3039 if (!affinity_list) {
3040 return 0;
3041 }
3042
3043 list = copy = xstrdup(affinity_list);
3044
3045 while (ofputil_parse_key_value(&list, &key, &value)) {
3046 int rxq_id, core_id;
3047
3048 if (!str_to_int(key, 0, &rxq_id) || rxq_id < 0
3049 || !str_to_int(value, 0, &core_id) || core_id < 0) {
3050 error = EINVAL;
3051 break;
3052 }
3053
3054 if (rxq_id < n_rxq) {
3055 core_ids[rxq_id] = core_id;
3056 }
3057 }
3058
3059 free(copy);
3060 return error;
3061}
3062
3063/* Parses 'affinity_list' and applies configuration if it is valid. */
3064static int
3065dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port *port,
3066 const char *affinity_list)
3067{
3068 unsigned *core_ids, i;
3069 int error = 0;
3070
3071 core_ids = xmalloc(port->n_rxq * sizeof *core_ids);
3072 if (parse_affinity_list(affinity_list, core_ids, port->n_rxq)) {
3073 error = EINVAL;
3074 goto exit;
3075 }
3076
3077 for (i = 0; i < port->n_rxq; i++) {
3078 port->rxqs[i].core_id = core_ids[i];
3079 }
3080
3081exit:
3082 free(core_ids);
3083 return error;
3084}
3085
3086/* Changes the affinity of port's rx queues. The changes are actually applied
3087 * in dpif_netdev_run(). */
3088static int
3089dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
3090 const struct smap *cfg)
3091{
3092 struct dp_netdev *dp = get_dp_netdev(dpif);
3093 struct dp_netdev_port *port;
3094 int error = 0;
3095 const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity");
3096
3097 ovs_mutex_lock(&dp->port_mutex);
3098 error = get_port_by_number(dp, port_no, &port);
3099 if (error || !netdev_is_pmd(port->netdev)
3100 || nullable_string_is_equal(affinity_list, port->rxq_affinity_list)) {
3101 goto unlock;
3102 }
3103
3104 error = dpif_netdev_port_set_rxq_affinity(port, affinity_list);
3105 if (error) {
3106 goto unlock;
3107 }
3108 free(port->rxq_affinity_list);
3109 port->rxq_affinity_list = nullable_xstrdup(affinity_list);
3110
3111 dp_netdev_request_reconfigure(dp);
3112unlock:
3113 ovs_mutex_unlock(&dp->port_mutex);
3114 return error;
3115}
3116
5bf93d67
EJ
3117static int
3118dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
3119 uint32_t queue_id, uint32_t *priority)
3120{
3121 *priority = queue_id;
3122 return 0;
3123}
3124
72865317 3125\f
9ff55ae2 3126/* Creates and returns a new 'struct dp_netdev_actions', whose actions are
1401f6de 3127 * a copy of the 'size' bytes of 'actions' input parameters. */
a84cb64a
BP
3128struct dp_netdev_actions *
3129dp_netdev_actions_create(const struct nlattr *actions, size_t size)
3130{
3131 struct dp_netdev_actions *netdev_actions;
3132
9ff55ae2
DDP
3133 netdev_actions = xmalloc(sizeof *netdev_actions + size);
3134 memcpy(netdev_actions->actions, actions, size);
a84cb64a
BP
3135 netdev_actions->size = size;
3136
3137 return netdev_actions;
3138}
3139
a84cb64a 3140struct dp_netdev_actions *
61e7deb1 3141dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow)
a84cb64a 3142{
61e7deb1 3143 return ovsrcu_get(struct dp_netdev_actions *, &flow->actions);
a84cb64a
BP
3144}
3145
61e7deb1
BP
3146static void
3147dp_netdev_actions_free(struct dp_netdev_actions *actions)
a84cb64a 3148{
61e7deb1 3149 free(actions);
a84cb64a
BP
3150}
3151\f
55e3ca97
DDP
3152static inline unsigned long long
3153cycles_counter(void)
3154{
3155#ifdef DPDK_NETDEV
3156 return rte_get_tsc_cycles();
3157#else
3158 return 0;
3159#endif
3160}
3161
3162/* Fake mutex to make sure that the calls to cycles_count_* are balanced */
3163extern struct ovs_mutex cycles_counter_fake_mutex;
3164
3165/* Start counting cycles. Must be followed by 'cycles_count_end()' */
3166static inline void
3167cycles_count_start(struct dp_netdev_pmd_thread *pmd)
3168 OVS_ACQUIRES(&cycles_counter_fake_mutex)
3169 OVS_NO_THREAD_SAFETY_ANALYSIS
3170{
3171 pmd->last_cycles = cycles_counter();
3172}
3173
3174/* Stop counting cycles and add them to the counter 'type' */
3175static inline void
3176cycles_count_end(struct dp_netdev_pmd_thread *pmd,
3177 enum pmd_cycles_counter_type type)
3178 OVS_RELEASES(&cycles_counter_fake_mutex)
3179 OVS_NO_THREAD_SAFETY_ANALYSIS
3180{
3181 unsigned long long interval = cycles_counter() - pmd->last_cycles;
3182
3183 non_atomic_ullong_add(&pmd->cycles.n[type], interval);
3184}
e4cfed38 3185
a2ac666d
CL
3186/* Calculate the intermediate cycle result and add to the counter 'type' */
3187static inline void
3188cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
c59e759f 3189 struct dp_netdev_rxq *rxq,
a2ac666d
CL
3190 enum pmd_cycles_counter_type type)
3191 OVS_NO_THREAD_SAFETY_ANALYSIS
3192{
3193 unsigned long long new_cycles = cycles_counter();
3194 unsigned long long interval = new_cycles - pmd->last_cycles;
3195 pmd->last_cycles = new_cycles;
3196
3197 non_atomic_ullong_add(&pmd->cycles.n[type], interval);
c59e759f
KT
3198 if (rxq && (type == PMD_CYCLES_PROCESSING)) {
3199 /* Add to the amount of current processing cycles. */
3200 non_atomic_ullong_add(&rxq->cycles[RXQ_CYCLES_PROC_CURR], interval);
3201 }
a2ac666d
CL
3202}
3203
4809891b
KT
3204static void
3205dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
3206 enum rxq_cycles_counter_type type,
3207 unsigned long long cycles)
3208{
3209 atomic_store_relaxed(&rx->cycles[type], cycles);
3210}
3211
3212static uint64_t
3213dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
3214 enum rxq_cycles_counter_type type)
3215{
3216 unsigned long long processing_cycles;
3217 atomic_read_relaxed(&rx->cycles[type], &processing_cycles);
3218 return processing_cycles;
3219}
3220
3221static void
3222dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
3223 unsigned long long cycles)
3224{
4ee87ad3
BP
3225 unsigned int idx = rx->intrvl_idx++ % PMD_RXQ_INTERVAL_MAX;
3226 atomic_store_relaxed(&rx->cycles_intrvl[idx], cycles);
4809891b
KT
3227}
3228
655856ef
KT
3229static uint64_t
3230dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx)
3231{
3232 unsigned long long processing_cycles;
3233 atomic_read_relaxed(&rx->cycles_intrvl[idx], &processing_cycles);
3234 return processing_cycles;
3235}
3236
a2ac666d 3237static int
65f13b50 3238dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
947dc567
DDP
3239 struct netdev_rxq *rx,
3240 odp_port_t port_no)
e4cfed38 3241{
1895cc8d
PS
3242 struct dp_packet_batch batch;
3243 int error;
a2ac666d 3244 int batch_cnt = 0;
e4cfed38 3245
1895cc8d 3246 dp_packet_batch_init(&batch);
947dc567 3247 error = netdev_rxq_recv(rx, &batch);
e4cfed38 3248 if (!error) {
3c33f0ff 3249 *recirc_depth_get() = 0;
41ccaa24 3250
a2ac666d 3251 batch_cnt = batch.count;
947dc567 3252 dp_netdev_input(pmd, &batch, port_no);
e4cfed38 3253 } else if (error != EAGAIN && error != EOPNOTSUPP) {
3c33f0ff 3254 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
e4cfed38
PS
3255
3256 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
947dc567 3257 netdev_rxq_get_name(rx), ovs_strerror(error));
e4cfed38 3258 }
a2ac666d
CL
3259
3260 return batch_cnt;
e4cfed38
PS
3261}
3262
e32971b8
DDP
3263static struct tx_port *
3264tx_port_lookup(const struct hmap *hmap, odp_port_t port_no)
3265{
3266 struct tx_port *tx;
3267
3268 HMAP_FOR_EACH_IN_BUCKET (tx, node, hash_port_no(port_no), hmap) {
3269 if (tx->port->port_no == port_no) {
3270 return tx;
3271 }
3272 }
3273
3274 return NULL;
3275}
3276
dc36593c
DDP
3277static int
3278port_reconfigure(struct dp_netdev_port *port)
3279{
3280 struct netdev *netdev = port->netdev;
dc36593c
DDP
3281 int i, err;
3282
e32971b8 3283 port->need_reconfigure = false;
dc36593c
DDP
3284
3285 /* Closes the existing 'rxq's. */
3286 for (i = 0; i < port->n_rxq; i++) {
947dc567
DDP
3287 netdev_rxq_close(port->rxqs[i].rx);
3288 port->rxqs[i].rx = NULL;
dc36593c 3289 }
4809891b 3290 unsigned last_nrxq = port->n_rxq;
dc36593c
DDP
3291 port->n_rxq = 0;
3292
050c60bf 3293 /* Allows 'netdev' to apply the pending configuration changes. */
e32971b8
DDP
3294 if (netdev_is_reconf_required(netdev)) {
3295 err = netdev_reconfigure(netdev);
3296 if (err && (err != EOPNOTSUPP)) {
3297 VLOG_ERR("Failed to set interface %s new configuration",
3298 netdev_get_name(netdev));
3299 return err;
3300 }
dc36593c 3301 }
050c60bf 3302 /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */
3eb67853
IM
3303 port->rxqs = xrealloc(port->rxqs,
3304 sizeof *port->rxqs * netdev_n_rxq(netdev));
324c8374
IM
3305 /* Realloc 'used' counters for tx queues. */
3306 free(port->txq_used);
3307 port->txq_used = xcalloc(netdev_n_txq(netdev), sizeof *port->txq_used);
3308
dc36593c 3309 for (i = 0; i < netdev_n_rxq(netdev); i++) {
38259bd7
BP
3310 bool new_queue = i >= last_nrxq;
3311 if (new_queue) {
3312 memset(&port->rxqs[i], 0, sizeof port->rxqs[i]);
3313 }
3314
947dc567 3315 port->rxqs[i].port = port;
38259bd7 3316
947dc567 3317 err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
dc36593c
DDP
3318 if (err) {
3319 return err;
3320 }
3321 port->n_rxq++;
3322 }
3323
3eb67853
IM
3324 /* Parse affinity list to apply configuration for new queues. */
3325 dpif_netdev_port_set_rxq_affinity(port, port->rxq_affinity_list);
3326
dc36593c
DDP
3327 return 0;
3328}
3329
e32971b8
DDP
3330struct rr_numa_list {
3331 struct hmap numas; /* Contains 'struct rr_numa' */
3332};
3333
3334struct rr_numa {
3335 struct hmap_node node;
3336
3337 int numa_id;
3338
3339 /* Non isolated pmds on numa node 'numa_id' */
3340 struct dp_netdev_pmd_thread **pmds;
3341 int n_pmds;
3342
3343 int cur_index;
79da1e41 3344 bool idx_inc;
e32971b8
DDP
3345};
3346
3347static struct rr_numa *
3348rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id)
3349{
3350 struct rr_numa *numa;
3351
3352 HMAP_FOR_EACH_WITH_HASH (numa, node, hash_int(numa_id, 0), &rr->numas) {
3353 if (numa->numa_id == numa_id) {
3354 return numa;
3355 }
3356 }
3357
3358 return NULL;
3359}
3360
c37813fd
BM
3361/* Returns the next node in numa list following 'numa' in round-robin fashion.
3362 * Returns first node if 'numa' is a null pointer or the last node in 'rr'.
3363 * Returns NULL if 'rr' numa list is empty. */
3364static struct rr_numa *
3365rr_numa_list_next(struct rr_numa_list *rr, const struct rr_numa *numa)
3366{
3367 struct hmap_node *node = NULL;
3368
3369 if (numa) {
3370 node = hmap_next(&rr->numas, &numa->node);
3371 }
3372 if (!node) {
3373 node = hmap_first(&rr->numas);
3374 }
3375
3376 return (node) ? CONTAINER_OF(node, struct rr_numa, node) : NULL;
3377}
3378
e32971b8
DDP
3379static void
3380rr_numa_list_populate(struct dp_netdev *dp, struct rr_numa_list *rr)
3381{
3382 struct dp_netdev_pmd_thread *pmd;
3383 struct rr_numa *numa;
3384
3385 hmap_init(&rr->numas);
3386
3387 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3388 if (pmd->core_id == NON_PMD_CORE_ID || pmd->isolated) {
3389 continue;
3390 }
3391
3392 numa = rr_numa_list_lookup(rr, pmd->numa_id);
3393 if (!numa) {
3394 numa = xzalloc(sizeof *numa);
3395 numa->numa_id = pmd->numa_id;
3396 hmap_insert(&rr->numas, &numa->node, hash_int(pmd->numa_id, 0));
3397 }
3398 numa->n_pmds++;
3399 numa->pmds = xrealloc(numa->pmds, numa->n_pmds * sizeof *numa->pmds);
3400 numa->pmds[numa->n_pmds - 1] = pmd;
79da1e41
KT
3401 /* At least one pmd so initialise curr_idx and idx_inc. */
3402 numa->cur_index = 0;
3403 numa->idx_inc = true;
e32971b8
DDP
3404 }
3405}
3406
79da1e41
KT
3407/* Returns the next pmd from the numa node in
3408 * incrementing or decrementing order. */
e32971b8
DDP
3409static struct dp_netdev_pmd_thread *
3410rr_numa_get_pmd(struct rr_numa *numa)
3411{
79da1e41
KT
3412 int numa_idx = numa->cur_index;
3413
3414 if (numa->idx_inc == true) {
3415 /* Incrementing through list of pmds. */
3416 if (numa->cur_index == numa->n_pmds-1) {
3417 /* Reached the last pmd. */
3418 numa->idx_inc = false;
3419 } else {
3420 numa->cur_index++;
3421 }
3422 } else {
3423 /* Decrementing through list of pmds. */
3424 if (numa->cur_index == 0) {
3425 /* Reached the first pmd. */
3426 numa->idx_inc = true;
3427 } else {
3428 numa->cur_index--;
3429 }
3430 }
3431 return numa->pmds[numa_idx];
e32971b8
DDP
3432}
3433
3434static void
3435rr_numa_list_destroy(struct rr_numa_list *rr)
3436{
3437 struct rr_numa *numa;
3438
3439 HMAP_FOR_EACH_POP (numa, node, &rr->numas) {
3440 free(numa->pmds);
3441 free(numa);
3442 }
3443 hmap_destroy(&rr->numas);
3444}
3445
655856ef
KT
3446/* Sort Rx Queues by the processing cycles they are consuming. */
3447static int
3448rxq_cycle_sort(const void *a, const void *b)
3449{
28080276
KT
3450 struct dp_netdev_rxq *qa;
3451 struct dp_netdev_rxq *qb;
655856ef
KT
3452 uint64_t total_qa, total_qb;
3453 unsigned i;
3454
3455 qa = *(struct dp_netdev_rxq **) a;
3456 qb = *(struct dp_netdev_rxq **) b;
3457
3458 total_qa = total_qb = 0;
3459 for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
3460 total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
3461 total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
3462 }
3463 dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
3464 dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
3465
3466 if (total_qa >= total_qb) {
3467 return -1;
3468 }
3469 return 1;
3470}
3471
e32971b8
DDP
3472/* Assign pmds to queues. If 'pinned' is true, assign pmds to pinned
3473 * queues and marks the pmds as isolated. Otherwise, assign non isolated
3474 * pmds to unpinned queues.
3475 *
655856ef
KT
3476 * If 'pinned' is false queues will be sorted by processing cycles they are
3477 * consuming and then assigned to pmds in round robin order.
3478 *
e32971b8
DDP
3479 * The function doesn't touch the pmd threads, it just stores the assignment
3480 * in the 'pmd' member of each rxq. */
3481static void
3482rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
3483{
3484 struct dp_netdev_port *port;
3485 struct rr_numa_list rr;
c37813fd 3486 struct rr_numa *non_local_numa = NULL;
655856ef
KT
3487 struct dp_netdev_rxq ** rxqs = NULL;
3488 int i, n_rxqs = 0;
3489 struct rr_numa *numa = NULL;
3490 int numa_id;
e32971b8
DDP
3491
3492 HMAP_FOR_EACH (port, node, &dp->ports) {
e32971b8
DDP
3493 if (!netdev_is_pmd(port->netdev)) {
3494 continue;
3495 }
3496
e32971b8
DDP
3497 for (int qid = 0; qid < port->n_rxq; qid++) {
3498 struct dp_netdev_rxq *q = &port->rxqs[qid];
3499
3500 if (pinned && q->core_id != OVS_CORE_UNSPEC) {
3501 struct dp_netdev_pmd_thread *pmd;
3502
3503 pmd = dp_netdev_get_pmd(dp, q->core_id);
3504 if (!pmd) {
3505 VLOG_WARN("There is no PMD thread on core %d. Queue "
3506 "%d on port \'%s\' will not be polled.",
3507 q->core_id, qid, netdev_get_name(port->netdev));
3508 } else {
3509 q->pmd = pmd;
3510 pmd->isolated = true;
3511 dp_netdev_pmd_unref(pmd);
3512 }
3513 } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
655856ef
KT
3514 if (n_rxqs == 0) {
3515 rxqs = xmalloc(sizeof *rxqs);
e32971b8 3516 } else {
655856ef 3517 rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1));
e32971b8 3518 }
655856ef
KT
3519 /* Store the queue. */
3520 rxqs[n_rxqs++] = q;
e32971b8
DDP
3521 }
3522 }
3523 }
3524
655856ef
KT
3525 if (n_rxqs > 1) {
3526 /* Sort the queues in order of the processing cycles
3527 * they consumed during their last pmd interval. */
3528 qsort(rxqs, n_rxqs, sizeof *rxqs, rxq_cycle_sort);
3529 }
3530
3531 rr_numa_list_populate(dp, &rr);
3532 /* Assign the sorted queues to pmds in round robin. */
3533 for (i = 0; i < n_rxqs; i++) {
3534 numa_id = netdev_get_numa_id(rxqs[i]->port->netdev);
3535 numa = rr_numa_list_lookup(&rr, numa_id);
3536 if (!numa) {
3537 /* There are no pmds on the queue's local NUMA node.
3538 Round robin on the NUMA nodes that do have pmds. */
3539 non_local_numa = rr_numa_list_next(&rr, non_local_numa);
3540 if (!non_local_numa) {
3541 VLOG_ERR("There is no available (non-isolated) pmd "
3542 "thread for port \'%s\' queue %d. This queue "
3543 "will not be polled. Is pmd-cpu-mask set to "
3544 "zero? Or are all PMDs isolated to other "
3545 "queues?", netdev_rxq_get_name(rxqs[i]->rx),
3546 netdev_rxq_get_queue_id(rxqs[i]->rx));
3547 continue;
3548 }
3549 rxqs[i]->pmd = rr_numa_get_pmd(non_local_numa);
3550 VLOG_WARN("There's no available (non-isolated) pmd thread "
3551 "on numa node %d. Queue %d on port \'%s\' will "
3552 "be assigned to the pmd on core %d "
3553 "(numa node %d). Expect reduced performance.",
3554 numa_id, netdev_rxq_get_queue_id(rxqs[i]->rx),
3555 netdev_rxq_get_name(rxqs[i]->rx),
3556 rxqs[i]->pmd->core_id, rxqs[i]->pmd->numa_id);
3557 } else {
3558 rxqs[i]->pmd = rr_numa_get_pmd(numa);
3559 VLOG_INFO("Core %d on numa node %d assigned port \'%s\' "
3560 "rx queue %d (measured processing cycles %"PRIu64").",
3561 rxqs[i]->pmd->core_id, numa_id,
3562 netdev_rxq_get_name(rxqs[i]->rx),
3563 netdev_rxq_get_queue_id(rxqs[i]->rx),
3564 dp_netdev_rxq_get_cycles(rxqs[i], RXQ_CYCLES_PROC_HIST));
3565 }
3566 }
3567
e32971b8 3568 rr_numa_list_destroy(&rr);
655856ef 3569 free(rxqs);
e32971b8
DDP
3570}
3571
140dd699
IM
3572static void
3573reload_affected_pmds(struct dp_netdev *dp)
3574{
3575 struct dp_netdev_pmd_thread *pmd;
3576
3577 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3578 if (pmd->need_reload) {
3579 dp_netdev_reload_pmd__(pmd);
3580 pmd->need_reload = false;
3581 }
3582 }
3583}
3584
6e3c6fa4
DDP
3585static void
3586reconfigure_pmd_threads(struct dp_netdev *dp)
3587 OVS_REQUIRES(dp->port_mutex)
3588{
e32971b8
DDP
3589 struct dp_netdev_pmd_thread *pmd;
3590 struct ovs_numa_dump *pmd_cores;
140dd699
IM
3591 struct ovs_numa_info_core *core;
3592 struct hmapx to_delete = HMAPX_INITIALIZER(&to_delete);
3593 struct hmapx_node *node;
e32971b8 3594 bool changed = false;
140dd699 3595 bool need_to_adjust_static_tx_qids = false;
e32971b8
DDP
3596
3597 /* The pmd threads should be started only if there's a pmd port in the
3598 * datapath. If the user didn't provide any "pmd-cpu-mask", we start
3599 * NR_PMD_THREADS per numa node. */
3600 if (!has_pmd_port(dp)) {
3601 pmd_cores = ovs_numa_dump_n_cores_per_numa(0);
3602 } else if (dp->pmd_cmask && dp->pmd_cmask[0]) {
3603 pmd_cores = ovs_numa_dump_cores_with_cmask(dp->pmd_cmask);
3604 } else {
3605 pmd_cores = ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS);
3606 }
3607
140dd699
IM
3608 /* We need to adjust 'static_tx_qid's only if we're reducing number of
3609 * PMD threads. Otherwise, new threads will allocate all the freed ids. */
3610 if (ovs_numa_dump_count(pmd_cores) < cmap_count(&dp->poll_threads) - 1) {
3611 /* Adjustment is required to keep 'static_tx_qid's sequential and
3612 * avoid possible issues, for example, imbalanced tx queue usage
3613 * and unnecessary locking caused by remapping on netdev level. */
3614 need_to_adjust_static_tx_qids = true;
3615 }
3616
3617 /* Check for unwanted pmd threads */
3618 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3619 if (pmd->core_id == NON_PMD_CORE_ID) {
3620 continue;
3621 }
3622 if (!ovs_numa_dump_contains_core(pmd_cores, pmd->numa_id,
3623 pmd->core_id)) {
3624 hmapx_add(&to_delete, pmd);
3625 } else if (need_to_adjust_static_tx_qids) {
3626 pmd->need_reload = true;
e32971b8
DDP
3627 }
3628 }
3629
140dd699
IM
3630 HMAPX_FOR_EACH (node, &to_delete) {
3631 pmd = (struct dp_netdev_pmd_thread *) node->data;
3632 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d destroyed.",
3633 pmd->numa_id, pmd->core_id);
3634 dp_netdev_del_pmd(dp, pmd);
3635 }
3636 changed = !hmapx_is_empty(&to_delete);
3637 hmapx_destroy(&to_delete);
e32971b8 3638
140dd699
IM
3639 if (need_to_adjust_static_tx_qids) {
3640 /* 'static_tx_qid's are not sequential now.
3641 * Reload remaining threads to fix this. */
3642 reload_affected_pmds(dp);
3643 }
e32971b8 3644
140dd699
IM
3645 /* Check for required new pmd threads */
3646 FOR_EACH_CORE_ON_DUMP(core, pmd_cores) {
3647 pmd = dp_netdev_get_pmd(dp, core->core_id);
3648 if (!pmd) {
3649 pmd = xzalloc(sizeof *pmd);
e32971b8 3650 dp_netdev_configure_pmd(pmd, dp, core->core_id, core->numa_id);
e32971b8 3651 pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd);
140dd699
IM
3652 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d created.",
3653 pmd->numa_id, pmd->core_id);
3654 changed = true;
3655 } else {
3656 dp_netdev_pmd_unref(pmd);
e32971b8 3657 }
140dd699
IM
3658 }
3659
3660 if (changed) {
3661 struct ovs_numa_info_numa *numa;
e32971b8
DDP
3662
3663 /* Log the number of pmd threads per numa node. */
3664 FOR_EACH_NUMA_ON_DUMP (numa, pmd_cores) {
140dd699 3665 VLOG_INFO("There are %"PRIuSIZE" pmd threads on numa node %d",
e32971b8
DDP
3666 numa->n_cores, numa->numa_id);
3667 }
3668 }
3669
3670 ovs_numa_dump_destroy(pmd_cores);
3671}
3672
e32971b8
DDP
3673static void
3674pmd_remove_stale_ports(struct dp_netdev *dp,
3675 struct dp_netdev_pmd_thread *pmd)
3676 OVS_EXCLUDED(pmd->port_mutex)
3677 OVS_REQUIRES(dp->port_mutex)
3678{
3679 struct rxq_poll *poll, *poll_next;
3680 struct tx_port *tx, *tx_next;
3681
3682 ovs_mutex_lock(&pmd->port_mutex);
3683 HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
3684 struct dp_netdev_port *port = poll->rxq->port;
3685
3686 if (port->need_reconfigure
3687 || !hmap_contains(&dp->ports, &port->node)) {
3688 dp_netdev_del_rxq_from_pmd(pmd, poll);
3689 }
3690 }
3691 HMAP_FOR_EACH_SAFE (tx, tx_next, node, &pmd->tx_ports) {
3692 struct dp_netdev_port *port = tx->port;
3693
3694 if (port->need_reconfigure
3695 || !hmap_contains(&dp->ports, &port->node)) {
3696 dp_netdev_del_port_tx_from_pmd(pmd, tx);
3697 }
3698 }
3699 ovs_mutex_unlock(&pmd->port_mutex);
3700}
3701
3702/* Must be called each time a port is added/removed or the cmask changes.
3703 * This creates and destroys pmd threads, reconfigures ports, opens their
3704 * rxqs and assigns all rxqs/txqs to pmd threads. */
3705static void
3706reconfigure_datapath(struct dp_netdev *dp)
3707 OVS_REQUIRES(dp->port_mutex)
3708{
3709 struct dp_netdev_pmd_thread *pmd;
3710 struct dp_netdev_port *port;
3711 int wanted_txqs;
6e3c6fa4 3712
a6a426d6
IM
3713 dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq);
3714
e32971b8
DDP
3715 /* Step 1: Adjust the pmd threads based on the datapath ports, the cores
3716 * on the system and the user configuration. */
3717 reconfigure_pmd_threads(dp);
6e3c6fa4 3718
e32971b8 3719 wanted_txqs = cmap_count(&dp->poll_threads);
324c8374 3720
e32971b8
DDP
3721 /* The number of pmd threads might have changed, or a port can be new:
3722 * adjust the txqs. */
3723 HMAP_FOR_EACH (port, node, &dp->ports) {
3724 netdev_set_tx_multiq(port->netdev, wanted_txqs);
324c8374
IM
3725 }
3726
e32971b8
DDP
3727 /* Step 2: Remove from the pmd threads ports that have been removed or
3728 * need reconfiguration. */
3729
3730 /* Check for all the ports that need reconfiguration. We cache this in
85a4f238
IM
3731 * 'port->need_reconfigure', because netdev_is_reconf_required() can
3732 * change at any time. */
e32971b8
DDP
3733 HMAP_FOR_EACH (port, node, &dp->ports) {
3734 if (netdev_is_reconf_required(port->netdev)) {
3735 port->need_reconfigure = true;
3736 }
3737 }
3738
3739 /* Remove from the pmd threads all the ports that have been deleted or
3740 * need reconfiguration. */
3741 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3742 pmd_remove_stale_ports(dp, pmd);
3743 }
3744
3745 /* Reload affected pmd threads. We must wait for the pmd threads before
3746 * reconfiguring the ports, because a port cannot be reconfigured while
3747 * it's being used. */
3748 reload_affected_pmds(dp);
3749
3750 /* Step 3: Reconfigure ports. */
3751
3752 /* We only reconfigure the ports that we determined above, because they're
3753 * not being used by any pmd thread at the moment. If a port fails to
3754 * reconfigure we remove it from the datapath. */
f582b6df
BP
3755 struct dp_netdev_port *next_port;
3756 HMAP_FOR_EACH_SAFE (port, next_port, node, &dp->ports) {
dc36593c 3757 int err;
6e3c6fa4 3758
e32971b8
DDP
3759 if (!port->need_reconfigure) {
3760 continue;
3761 }
3762
dc36593c
DDP
3763 err = port_reconfigure(port);
3764 if (err) {
3765 hmap_remove(&dp->ports, &port->node);
3766 seq_change(dp->port_seq);
3767 port_destroy(port);
324c8374 3768 } else {
e32971b8 3769 port->dynamic_txqs = netdev_n_txq(port->netdev) < wanted_txqs;
6e3c6fa4
DDP
3770 }
3771 }
e32971b8
DDP
3772
3773 /* Step 4: Compute new rxq scheduling. We don't touch the pmd threads
3774 * for now, we just update the 'pmd' pointer in each rxq to point to the
3775 * wanted thread according to the scheduling policy. */
3776
3777 /* Reset all the pmd threads to non isolated. */
3778 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3779 pmd->isolated = false;
3780 }
3781
3782 /* Reset all the queues to unassigned */
3783 HMAP_FOR_EACH (port, node, &dp->ports) {
3784 for (int i = 0; i < port->n_rxq; i++) {
3785 port->rxqs[i].pmd = NULL;
3786 }
3787 }
3788
3789 /* Add pinned queues and mark pmd threads isolated. */
3790 rxq_scheduling(dp, true);
3791
3792 /* Add non-pinned queues. */
3793 rxq_scheduling(dp, false);
3794
3795 /* Step 5: Remove queues not compliant with new scheduling. */
3796 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3797 struct rxq_poll *poll, *poll_next;
3798
3799 ovs_mutex_lock(&pmd->port_mutex);
3800 HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
3801 if (poll->rxq->pmd != pmd) {
3802 dp_netdev_del_rxq_from_pmd(pmd, poll);
3803 }
3804 }
3805 ovs_mutex_unlock(&pmd->port_mutex);
3806 }
3807
3808 /* Reload affected pmd threads. We must wait for the pmd threads to remove
3809 * the old queues before readding them, otherwise a queue can be polled by
3810 * two threads at the same time. */
3811 reload_affected_pmds(dp);
3812
3813 /* Step 6: Add queues from scheduling, if they're not there already. */
3814 HMAP_FOR_EACH (port, node, &dp->ports) {
3815 if (!netdev_is_pmd(port->netdev)) {
3816 continue;
3817 }
3818
3819 for (int qid = 0; qid < port->n_rxq; qid++) {
3820 struct dp_netdev_rxq *q = &port->rxqs[qid];
3821
3822 if (q->pmd) {
3823 ovs_mutex_lock(&q->pmd->port_mutex);
3824 dp_netdev_add_rxq_to_pmd(q->pmd, q);
3825 ovs_mutex_unlock(&q->pmd->port_mutex);
3826 }
3827 }
3828 }
3829
3830 /* Add every port to the tx cache of every pmd thread, if it's not
3831 * there already and if this pmd has at least one rxq to poll. */
3832 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
3833 ovs_mutex_lock(&pmd->port_mutex);
3834 if (hmap_count(&pmd->poll_list) || pmd->core_id == NON_PMD_CORE_ID) {
3835 HMAP_FOR_EACH (port, node, &dp->ports) {
3836 dp_netdev_add_port_tx_to_pmd(pmd, port);
3837 }
3838 }
3839 ovs_mutex_unlock(&pmd->port_mutex);
3840 }
3841
3842 /* Reload affected pmd threads. */
3843 reload_affected_pmds(dp);
6e3c6fa4
DDP
3844}
3845
050c60bf
DDP
3846/* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
3847static bool
3848ports_require_restart(const struct dp_netdev *dp)
3849 OVS_REQUIRES(dp->port_mutex)
3850{
3851 struct dp_netdev_port *port;
3852
3853 HMAP_FOR_EACH (port, node, &dp->ports) {
3854 if (netdev_is_reconf_required(port->netdev)) {
3855 return true;
3856 }
3857 }
3858
3859 return false;
3860}
3861
a36de779
PS
3862/* Return true if needs to revalidate datapath flows. */
3863static bool
e4cfed38
PS
3864dpif_netdev_run(struct dpif *dpif)
3865{
3866 struct dp_netdev_port *port;
3867 struct dp_netdev *dp = get_dp_netdev(dpif);
546e57d4 3868 struct dp_netdev_pmd_thread *non_pmd;
a36de779 3869 uint64_t new_tnl_seq;
a2ac666d 3870 int process_packets = 0;
e4cfed38 3871
e9985d6a 3872 ovs_mutex_lock(&dp->port_mutex);
546e57d4
DDP
3873 non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
3874 if (non_pmd) {
3875 ovs_mutex_lock(&dp->non_pmd_mutex);
a2ac666d 3876 cycles_count_start(non_pmd);
546e57d4
DDP
3877 HMAP_FOR_EACH (port, node, &dp->ports) {
3878 if (!netdev_is_pmd(port->netdev)) {
3879 int i;
55c955bd 3880
546e57d4 3881 for (i = 0; i < port->n_rxq; i++) {
a2ac666d
CL
3882 process_packets =
3883 dp_netdev_process_rxq_port(non_pmd,
3884 port->rxqs[i].rx,
3885 port->port_no);
28080276
KT
3886 cycles_count_intermediate(non_pmd, NULL,
3887 process_packets
3888 ? PMD_CYCLES_PROCESSING
3889 : PMD_CYCLES_IDLE);
546e57d4 3890 }
55c955bd 3891 }
e4cfed38 3892 }
a2ac666d 3893 cycles_count_end(non_pmd, PMD_CYCLES_IDLE);
546e57d4
DDP
3894 dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false);
3895 ovs_mutex_unlock(&dp->non_pmd_mutex);
6e3c6fa4 3896
546e57d4
DDP
3897 dp_netdev_pmd_unref(non_pmd);
3898 }
1c1e46ed 3899
a6a426d6 3900 if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) {
e32971b8 3901 reconfigure_datapath(dp);
6e3c6fa4
DDP
3902 }
3903 ovs_mutex_unlock(&dp->port_mutex);
3904
53902038 3905 tnl_neigh_cache_run();
7f9b8504 3906 tnl_port_map_run();
a36de779
PS
3907 new_tnl_seq = seq_read(tnl_conf_seq);
3908
3909 if (dp->last_tnl_conf_seq != new_tnl_seq) {
3910 dp->last_tnl_conf_seq = new_tnl_seq;
3911 return true;
3912 }
3913 return false;
e4cfed38
PS
3914}
3915
3916static void
3917dpif_netdev_wait(struct dpif *dpif)
3918{
3919 struct dp_netdev_port *port;
3920 struct dp_netdev *dp = get_dp_netdev(dpif);
3921
59e6d833 3922 ovs_mutex_lock(&dp_netdev_mutex);
e9985d6a
DDP
3923 ovs_mutex_lock(&dp->port_mutex);
3924 HMAP_FOR_EACH (port, node, &dp->ports) {
050c60bf 3925 netdev_wait_reconf_required(port->netdev);
55c955bd
PS
3926 if (!netdev_is_pmd(port->netdev)) {
3927 int i;
3928
490e82af 3929 for (i = 0; i < port->n_rxq; i++) {
947dc567 3930 netdev_rxq_wait(port->rxqs[i].rx);
55c955bd 3931 }
e4cfed38
PS
3932 }
3933 }
e9985d6a 3934 ovs_mutex_unlock(&dp->port_mutex);
59e6d833 3935 ovs_mutex_unlock(&dp_netdev_mutex);
a36de779 3936 seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);
e4cfed38
PS
3937}
3938
d0cca6c3
DDP
3939static void
3940pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd)
3941{
3942 struct tx_port *tx_port_cached;
3943
324c8374
IM
3944 /* Free all used tx queue ids. */
3945 dpif_netdev_xps_revalidate_pmd(pmd, 0, true);
3946
57eebbb4
DDP
3947 HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->tnl_port_cache) {
3948 free(tx_port_cached);
3949 }
3950 HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->send_port_cache) {
d0cca6c3
DDP
3951 free(tx_port_cached);
3952 }
3953}
3954
3955/* Copies ports from 'pmd->tx_ports' (shared with the main thread) to
899363ed
BB
3956 * thread-local copies. Copy to 'pmd->tnl_port_cache' if it is a tunnel
3957 * device, otherwise to 'pmd->send_port_cache' if the port has at least
3958 * one txq. */
d0cca6c3
DDP
3959static void
3960pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
3961 OVS_REQUIRES(pmd->port_mutex)
3962{
3963 struct tx_port *tx_port, *tx_port_cached;
3964
3965 pmd_free_cached_ports(pmd);
57eebbb4
DDP
3966 hmap_shrink(&pmd->send_port_cache);
3967 hmap_shrink(&pmd->tnl_port_cache);
d0cca6c3
DDP
3968
3969 HMAP_FOR_EACH (tx_port, node, &pmd->tx_ports) {
57eebbb4
DDP
3970 if (netdev_has_tunnel_push_pop(tx_port->port->netdev)) {
3971 tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached);
3972 hmap_insert(&pmd->tnl_port_cache, &tx_port_cached->node,
3973 hash_port_no(tx_port_cached->port->port_no));
3974 }
3975
3976 if (netdev_n_txq(tx_port->port->netdev)) {
3977 tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached);
3978 hmap_insert(&pmd->send_port_cache, &tx_port_cached->node,
3979 hash_port_no(tx_port_cached->port->port_no));
3980 }
d0cca6c3
DDP
3981 }
3982}
3983
140dd699
IM
3984static void
3985pmd_alloc_static_tx_qid(struct dp_netdev_pmd_thread *pmd)
3986{
3987 ovs_mutex_lock(&pmd->dp->tx_qid_pool_mutex);
3988 if (!id_pool_alloc_id(pmd->dp->tx_qid_pool, &pmd->static_tx_qid)) {
3989 VLOG_ABORT("static_tx_qid allocation failed for PMD on core %2d"
3990 ", numa_id %d.", pmd->core_id, pmd->numa_id);
3991 }
3992 ovs_mutex_unlock(&pmd->dp->tx_qid_pool_mutex);
3993
3994 VLOG_DBG("static_tx_qid = %d allocated for PMD thread on core %2d"
3995 ", numa_id %d.", pmd->static_tx_qid, pmd->core_id, pmd->numa_id);
3996}
3997
3998static void
3999pmd_free_static_tx_qid(struct dp_netdev_pmd_thread *pmd)
4000{
4001 ovs_mutex_lock(&pmd->dp->tx_qid_pool_mutex);
4002 id_pool_free_id(pmd->dp->tx_qid_pool, pmd->static_tx_qid);
4003 ovs_mutex_unlock(&pmd->dp->tx_qid_pool_mutex);
4004}
4005
e4cfed38 4006static int
d0cca6c3 4007pmd_load_queues_and_ports(struct dp_netdev_pmd_thread *pmd,
947dc567 4008 struct polled_queue **ppoll_list)
e4cfed38 4009{
947dc567 4010 struct polled_queue *poll_list = *ppoll_list;
ae7ad0a1
IM
4011 struct rxq_poll *poll;
4012 int i;
e4cfed38 4013
d0cca6c3 4014 ovs_mutex_lock(&pmd->port_mutex);
947dc567
DDP
4015 poll_list = xrealloc(poll_list, hmap_count(&pmd->poll_list)
4016 * sizeof *poll_list);
a1fdee13 4017
ae7ad0a1 4018 i = 0;
947dc567 4019 HMAP_FOR_EACH (poll, node, &pmd->poll_list) {
922b28d4 4020 poll_list[i].rxq = poll->rxq;
947dc567
DDP
4021 poll_list[i].port_no = poll->rxq->port->port_no;
4022 i++;
e4cfed38 4023 }
d0cca6c3
DDP
4024
4025 pmd_load_cached_ports(pmd);
4026
4027 ovs_mutex_unlock(&pmd->port_mutex);
e4cfed38 4028
e4cfed38 4029 *ppoll_list = poll_list;
d42f9307 4030 return i;
e4cfed38
PS
4031}
4032
6c3eee82 4033static void *
e4cfed38 4034pmd_thread_main(void *f_)
6c3eee82 4035{
65f13b50 4036 struct dp_netdev_pmd_thread *pmd = f_;
e4cfed38 4037 unsigned int lc = 0;
947dc567 4038 struct polled_queue *poll_list;
d42f9307 4039 bool exiting;
e4cfed38
PS
4040 int poll_cnt;
4041 int i;
a2ac666d 4042 int process_packets = 0;
6c3eee82 4043
e4cfed38
PS
4044 poll_list = NULL;
4045
65f13b50
AW
4046 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
4047 ovsthread_setspecific(pmd->dp->per_pmd_key, pmd);
6930c7e0
DDP
4048 ovs_numa_thread_setaffinity_core(pmd->core_id);
4049 dpdk_set_lcore_id(pmd->core_id);
d0cca6c3 4050 poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
e215018b 4051 emc_cache_init(&pmd->flow_cache);
e4cfed38 4052reload:
140dd699 4053 pmd_alloc_static_tx_qid(pmd);
ae7ad0a1 4054
7dd671f0
MK
4055 /* List port/core affinity */
4056 for (i = 0; i < poll_cnt; i++) {
ce179f11 4057 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
922b28d4
KT
4058 pmd->core_id, netdev_rxq_get_name(poll_list[i].rxq->rx),
4059 netdev_rxq_get_queue_id(poll_list[i].rxq->rx));
7dd671f0
MK
4060 }
4061
2788a1b1
DDP
4062 if (!poll_cnt) {
4063 while (seq_read(pmd->reload_seq) == pmd->last_reload_seq) {
4064 seq_wait(pmd->reload_seq, pmd->last_reload_seq);
4065 poll_block();
4066 }
4067 lc = UINT_MAX;
4068 }
4069
a2ac666d 4070 cycles_count_start(pmd);
e4cfed38 4071 for (;;) {
e4cfed38 4072 for (i = 0; i < poll_cnt; i++) {
a2ac666d 4073 process_packets =
922b28d4 4074 dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx,
a2ac666d 4075 poll_list[i].port_no);
4809891b 4076 cycles_count_intermediate(pmd, poll_list[i].rxq,
a2ac666d
CL
4077 process_packets ? PMD_CYCLES_PROCESSING
4078 : PMD_CYCLES_IDLE);
e4cfed38
PS
4079 }
4080
4081 if (lc++ > 1024) {
14e3e12a 4082 bool reload;
6c3eee82 4083
e4cfed38 4084 lc = 0;
84067a4c 4085
fbe0962b 4086 coverage_try_clear();
4809891b 4087 dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
9dede5cf
FL
4088 if (!ovsrcu_try_quiesce()) {
4089 emc_cache_slow_sweep(&pmd->flow_cache);
4090 }
84067a4c 4091
14e3e12a
DDP
4092 atomic_read_relaxed(&pmd->reload, &reload);
4093 if (reload) {
6c3eee82
BP
4094 break;
4095 }
4096 }
e4cfed38 4097 }
6c3eee82 4098
a2ac666d
CL
4099 cycles_count_end(pmd, PMD_CYCLES_IDLE);
4100
d0cca6c3 4101 poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
d42f9307
DDP
4102 exiting = latch_is_set(&pmd->exit_latch);
4103 /* Signal here to make sure the pmd finishes
4104 * reloading the updated configuration. */
4105 dp_netdev_pmd_reload_done(pmd);
4106
140dd699 4107 pmd_free_static_tx_qid(pmd);
9bbf1c3d 4108
d42f9307 4109 if (!exiting) {
e4cfed38
PS
4110 goto reload;
4111 }
6c3eee82 4112
e215018b 4113 emc_cache_uninit(&pmd->flow_cache);
e4cfed38 4114 free(poll_list);
d0cca6c3 4115 pmd_free_cached_ports(pmd);
6c3eee82
BP
4116 return NULL;
4117}
4118
6b31e073
RW
4119static void
4120dp_netdev_disable_upcall(struct dp_netdev *dp)
4121 OVS_ACQUIRES(dp->upcall_rwlock)
4122{
4123 fat_rwlock_wrlock(&dp->upcall_rwlock);
4124}
4125
5dddf960
JR
4126\f
4127/* Meters */
4128static void
4129dpif_netdev_meter_get_features(const struct dpif * dpif OVS_UNUSED,
4130 struct ofputil_meter_features *features)
4131{
4b27db64
JR
4132 features->max_meters = MAX_METERS;
4133 features->band_types = DP_SUPPORTED_METER_BAND_TYPES;
4134 features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK;
4135 features->max_bands = MAX_BANDS;
5dddf960
JR
4136 features->max_color = 0;
4137}
4138
4b27db64
JR
4139/* Returns false when packet needs to be dropped. */
4140static void
4141dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_,
4142 uint32_t meter_id, long long int now)
4143{
4144 struct dp_meter *meter;
4145 struct dp_meter_band *band;
79c81260 4146 struct dp_packet *packet;
4b27db64
JR
4147 long long int long_delta_t; /* msec */
4148 uint32_t delta_t; /* msec */
4149 int i;
79c81260 4150 const size_t cnt = dp_packet_batch_size(packets_);
4b27db64
JR
4151 uint32_t bytes, volume;
4152 int exceeded_band[NETDEV_MAX_BURST];
4153 uint32_t exceeded_rate[NETDEV_MAX_BURST];
4154 int exceeded_pkt = cnt; /* First packet that exceeded a band rate. */
4155
4156 if (meter_id >= MAX_METERS) {
4157 return;
4158 }
4159
4160 meter_lock(dp, meter_id);
4161 meter = dp->meters[meter_id];
4162 if (!meter) {
4163 goto out;
4164 }
4165
4166 /* Initialize as negative values. */
4167 memset(exceeded_band, 0xff, cnt * sizeof *exceeded_band);
4168 /* Initialize as zeroes. */
4169 memset(exceeded_rate, 0, cnt * sizeof *exceeded_rate);
4170
4171 /* All packets will hit the meter at the same time. */
4172 long_delta_t = (now - meter->used); /* msec */
4173
4174 /* Make sure delta_t will not be too large, so that bucket will not
4175 * wrap around below. */
4176 delta_t = (long_delta_t > (long long int)meter->max_delta_t)
4177 ? meter->max_delta_t : (uint32_t)long_delta_t;
4178
4179 /* Update meter stats. */
4180 meter->used = now;
4181 meter->packet_count += cnt;
4182 bytes = 0;
79c81260
BB
4183 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
4184 bytes += dp_packet_size(packet);
4b27db64
JR
4185 }
4186 meter->byte_count += bytes;
4187
4188 /* Meters can operate in terms of packets per second or kilobits per
4189 * second. */
4190 if (meter->flags & OFPMF13_PKTPS) {
4191 /* Rate in packets/second, bucket 1/1000 packets. */
4192 /* msec * packets/sec = 1/1000 packets. */
4193 volume = cnt * 1000; /* Take 'cnt' packets from the bucket. */
4194 } else {
4195 /* Rate in kbps, bucket in bits. */
4196 /* msec * kbps = bits */
4197 volume = bytes * 8;
4198 }
4199
4200 /* Update all bands and find the one hit with the highest rate for each
4201 * packet (if any). */
4202 for (int m = 0; m < meter->n_bands; ++m) {
4203 band = &meter->bands[m];
4204
4205 /* Update band's bucket. */
4206 band->bucket += delta_t * band->up.rate;
4207 if (band->bucket > band->up.burst_size) {
4208 band->bucket = band->up.burst_size;
4209 }
4210
4211 /* Drain the bucket for all the packets, if possible. */
4212 if (band->bucket >= volume) {
4213 band->bucket -= volume;
4214 } else {
4215 int band_exceeded_pkt;
4216
4217 /* Band limit hit, must process packet-by-packet. */
4218 if (meter->flags & OFPMF13_PKTPS) {
4219 band_exceeded_pkt = band->bucket / 1000;
4220 band->bucket %= 1000; /* Remainder stays in bucket. */
4221
4222 /* Update the exceeding band for each exceeding packet.
4223 * (Only one band will be fired by a packet, and that
4224 * can be different for each packet.) */
4225 for (i = band_exceeded_pkt; i < cnt; i++) {
4226 if (band->up.rate > exceeded_rate[i]) {
4227 exceeded_rate[i] = band->up.rate;
4228 exceeded_band[i] = m;
4229 }
4230 }
4231 } else {
4232 /* Packet sizes differ, must process one-by-one. */
4233 band_exceeded_pkt = cnt;
79c81260
BB
4234 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
4235 uint32_t bits = dp_packet_size(packet) * 8;
4b27db64
JR
4236
4237 if (band->bucket >= bits) {
4238 band->bucket -= bits;
4239 } else {
4240 if (i < band_exceeded_pkt) {
4241 band_exceeded_pkt = i;
4242 }
4243 /* Update the exceeding band for the exceeding packet.
4244 * (Only one band will be fired by a packet, and that
4245 * can be different for each packet.) */
4246 if (band->up.rate > exceeded_rate[i]) {
4247 exceeded_rate[i] = band->up.rate;
4248 exceeded_band[i] = m;
4249 }
4250 }
4251 }
4252 }
4253 /* Remember the first exceeding packet. */
4254 if (exceeded_pkt > band_exceeded_pkt) {
4255 exceeded_pkt = band_exceeded_pkt;
4256 }
4257 }
4258 }
4259
4260 /* Fire the highest rate band exceeded by each packet.
4261 * Drop packets if needed, by swapping packet to the end that will be
4262 * ignored. */
4b27db64 4263 size_t j;
79c81260 4264 DP_PACKET_BATCH_REFILL_FOR_EACH (j, cnt, packet, packets_) {
4b27db64
JR
4265 if (exceeded_band[j] >= 0) {
4266 /* Meter drop packet. */
4267 band = &meter->bands[exceeded_band[j]];
4268 band->packet_count += 1;
4269 band->byte_count += dp_packet_size(packet);
4270
4271 dp_packet_delete(packet);
4272 } else {
4273 /* Meter accepts packet. */
4274 dp_packet_batch_refill(packets_, packet, j);
4275 }
4276 }
4277 out:
4278 meter_unlock(dp, meter_id);
4279}
4280
4281/* Meter set/get/del processing is still single-threaded. */
5dddf960 4282static int
4b27db64
JR
4283dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id *meter_id,
4284 struct ofputil_meter_config *config)
5dddf960 4285{
4b27db64
JR
4286 struct dp_netdev *dp = get_dp_netdev(dpif);
4287 uint32_t mid = meter_id->uint32;
4288 struct dp_meter *meter;
4289 int i;
4290
4b27db64
JR
4291 if (mid >= MAX_METERS) {
4292 return EFBIG; /* Meter_id out of range. */
4293 }
4294
4295 if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK ||
4296 !(config->flags & (OFPMF13_KBPS | OFPMF13_PKTPS))) {
4297 return EBADF; /* Unsupported flags set */
4298 }
2029ce9a 4299
4b27db64
JR
4300 /* Validate bands */
4301 if (config->n_bands == 0 || config->n_bands > MAX_BANDS) {
4302 return EINVAL; /* Too many bands */
4303 }
2029ce9a
AVA
4304
4305 /* Validate rates */
4306 for (i = 0; i < config->n_bands; i++) {
4307 if (config->bands[i].rate == 0) {
66a396d4 4308 return EDOM; /* rate must be non-zero */
2029ce9a
AVA
4309 }
4310 }
4311
4b27db64
JR
4312 for (i = 0; i < config->n_bands; ++i) {
4313 switch (config->bands[i].type) {
4314 case OFPMBT13_DROP:
4315 break;
4316 default:
4317 return ENODEV; /* Unsupported band type */
4318 }
4319 }
4320
4321 /* Allocate meter */
4322 meter = xzalloc(sizeof *meter
4323 + config->n_bands * sizeof(struct dp_meter_band));
4324 if (meter) {
4325 meter->flags = config->flags;
4326 meter->n_bands = config->n_bands;
4327 meter->max_delta_t = 0;
4328 meter->used = time_msec();
4329
4330 /* set up bands */
4331 for (i = 0; i < config->n_bands; ++i) {
4332 uint32_t band_max_delta_t;
4333
4334 /* Set burst size to a workable value if none specified. */
4335 if (config->bands[i].burst_size == 0) {
4336 config->bands[i].burst_size = config->bands[i].rate;
4337 }
4338
4339 meter->bands[i].up = config->bands[i];
4340 /* Convert burst size to the bucket units: */
4341 /* pkts => 1/1000 packets, kilobits => bits. */
4342 meter->bands[i].up.burst_size *= 1000;
4343 /* Initialize bucket to empty. */
4344 meter->bands[i].bucket = 0;
4345
4346 /* Figure out max delta_t that is enough to fill any bucket. */
4347 band_max_delta_t
4348 = meter->bands[i].up.burst_size / meter->bands[i].up.rate;
4349 if (band_max_delta_t > meter->max_delta_t) {
4350 meter->max_delta_t = band_max_delta_t;
4351 }
4352 }
4353
4354 meter_lock(dp, mid);
4355 dp_delete_meter(dp, mid); /* Free existing meter, if any */
4356 dp->meters[mid] = meter;
4357 meter_unlock(dp, mid);
4358
4b27db64
JR
4359 return 0;
4360 }
4361 return ENOMEM;
5dddf960
JR
4362}
4363
4364static int
4b27db64
JR
4365dpif_netdev_meter_get(const struct dpif *dpif,
4366 ofproto_meter_id meter_id_,
4367 struct ofputil_meter_stats *stats, uint16_t n_bands)
5dddf960 4368{
4b27db64
JR
4369 const struct dp_netdev *dp = get_dp_netdev(dpif);
4370 const struct dp_meter *meter;
4371 uint32_t meter_id = meter_id_.uint32;
4372
4373 if (meter_id >= MAX_METERS) {
4374 return EFBIG;
4375 }
4376 meter = dp->meters[meter_id];
4377 if (!meter) {
4378 return ENOENT;
4379 }
4380 if (stats) {
4381 int i = 0;
4382
4383 meter_lock(dp, meter_id);
4384 stats->packet_in_count = meter->packet_count;
4385 stats->byte_in_count = meter->byte_count;
4386
4387 for (i = 0; i < n_bands && i < meter->n_bands; ++i) {
4388 stats->bands[i].packet_count = meter->bands[i].packet_count;
4389 stats->bands[i].byte_count = meter->bands[i].byte_count;
4390 }
4391 meter_unlock(dp, meter_id);
4392
4393 stats->n_bands = i;
4394 }
4395 return 0;
5dddf960
JR
4396}
4397
4398static int
4b27db64
JR
4399dpif_netdev_meter_del(struct dpif *dpif,
4400 ofproto_meter_id meter_id_,
4401 struct ofputil_meter_stats *stats, uint16_t n_bands)
5dddf960 4402{
4b27db64
JR
4403 struct dp_netdev *dp = get_dp_netdev(dpif);
4404 int error;
4405
4406 error = dpif_netdev_meter_get(dpif, meter_id_, stats, n_bands);
4407 if (!error) {
4408 uint32_t meter_id = meter_id_.uint32;
4409
4410 meter_lock(dp, meter_id);
4411 dp_delete_meter(dp, meter_id);
4412 meter_unlock(dp, meter_id);
4b27db64
JR
4413 }
4414 return error;
5dddf960
JR
4415}
4416
4417\f
6b31e073
RW
4418static void
4419dpif_netdev_disable_upcall(struct dpif *dpif)
4420 OVS_NO_THREAD_SAFETY_ANALYSIS
4421{
4422 struct dp_netdev *dp = get_dp_netdev(dpif);
4423 dp_netdev_disable_upcall(dp);
4424}
4425
4426static void
4427dp_netdev_enable_upcall(struct dp_netdev *dp)
4428 OVS_RELEASES(dp->upcall_rwlock)
4429{
4430 fat_rwlock_unlock(&dp->upcall_rwlock);
4431}
4432
4433static void
4434dpif_netdev_enable_upcall(struct dpif *dpif)
4435 OVS_NO_THREAD_SAFETY_ANALYSIS
4436{
4437 struct dp_netdev *dp = get_dp_netdev(dpif);
4438 dp_netdev_enable_upcall(dp);
4439}
4440
ae7ad0a1 4441static void
accf8626
AW
4442dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
4443{
4444 ovs_mutex_lock(&pmd->cond_mutex);
14e3e12a 4445 atomic_store_relaxed(&pmd->reload, false);
2788a1b1 4446 pmd->last_reload_seq = seq_read(pmd->reload_seq);
accf8626
AW
4447 xpthread_cond_signal(&pmd->cond);
4448 ovs_mutex_unlock(&pmd->cond_mutex);
4449}
4450
1c1e46ed 4451/* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
546e57d4
DDP
4452 * the pointer if succeeds, otherwise, NULL (it can return NULL even if
4453 * 'core_id' is NON_PMD_CORE_ID).
1c1e46ed
AW
4454 *
4455 * Caller must unrefs the returned reference. */
65f13b50 4456static struct dp_netdev_pmd_thread *
bd5131ba 4457dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
65f13b50
AW
4458{
4459 struct dp_netdev_pmd_thread *pmd;
55847abe 4460 const struct cmap_node *pnode;
65f13b50 4461
b19befae 4462 pnode = cmap_find(&dp->poll_threads, hash_int(core_id, 0));
1c1e46ed
AW
4463 if (!pnode) {
4464 return NULL;
4465 }
65f13b50
AW
4466 pmd = CONTAINER_OF(pnode, struct dp_netdev_pmd_thread, node);
4467
1c1e46ed 4468 return dp_netdev_pmd_try_ref(pmd) ? pmd : NULL;
65f13b50
AW
4469}
4470
f2eee189
AW
4471/* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
4472static void
4473dp_netdev_set_nonpmd(struct dp_netdev *dp)
e9985d6a 4474 OVS_REQUIRES(dp->port_mutex)
f2eee189
AW
4475{
4476 struct dp_netdev_pmd_thread *non_pmd;
4477
4478 non_pmd = xzalloc(sizeof *non_pmd);
00873463 4479 dp_netdev_configure_pmd(non_pmd, dp, NON_PMD_CORE_ID, OVS_NUMA_UNSPEC);
f2eee189
AW
4480}
4481
1c1e46ed
AW
4482/* Caller must have valid pointer to 'pmd'. */
4483static bool
4484dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd)
4485{
4486 return ovs_refcount_try_ref_rcu(&pmd->ref_cnt);
4487}
4488
4489static void
4490dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd)
4491{
4492 if (pmd && ovs_refcount_unref(&pmd->ref_cnt) == 1) {
4493 ovsrcu_postpone(dp_netdev_destroy_pmd, pmd);
4494 }
4495}
4496
4497/* Given cmap position 'pos', tries to ref the next node. If try_ref()
4498 * fails, keeps checking for next node until reaching the end of cmap.
4499 *
4500 * Caller must unrefs the returned reference. */
4501static struct dp_netdev_pmd_thread *
4502dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos)
4503{
4504 struct dp_netdev_pmd_thread *next;
4505
4506 do {
4507 struct cmap_node *node;
4508
4509 node = cmap_next_position(&dp->poll_threads, pos);
4510 next = node ? CONTAINER_OF(node, struct dp_netdev_pmd_thread, node)
4511 : NULL;
4512 } while (next && !dp_netdev_pmd_try_ref(next));
4513
4514 return next;
4515}
4516
65f13b50 4517/* Configures the 'pmd' based on the input argument. */
6c3eee82 4518static void
65f13b50 4519dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
00873463 4520 unsigned core_id, int numa_id)
65f13b50
AW
4521{
4522 pmd->dp = dp;
65f13b50
AW
4523 pmd->core_id = core_id;
4524 pmd->numa_id = numa_id;
e32971b8 4525 pmd->need_reload = false;
1c1e46ed
AW
4526
4527 ovs_refcount_init(&pmd->ref_cnt);
65f13b50 4528 latch_init(&pmd->exit_latch);
2788a1b1
DDP
4529 pmd->reload_seq = seq_create();
4530 pmd->last_reload_seq = seq_read(pmd->reload_seq);
14e3e12a 4531 atomic_init(&pmd->reload, false);
accf8626
AW
4532 xpthread_cond_init(&pmd->cond, NULL);
4533 ovs_mutex_init(&pmd->cond_mutex);
1c1e46ed 4534 ovs_mutex_init(&pmd->flow_mutex);
d0cca6c3 4535 ovs_mutex_init(&pmd->port_mutex);
1c1e46ed 4536 cmap_init(&pmd->flow_table);
3453b4d6
JS
4537 cmap_init(&pmd->classifiers);
4538 pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL;
64bf452e 4539 pmd->rxq_next_cycle_store = time_msec() + PMD_RXQ_INTERVAL_LEN;
947dc567 4540 hmap_init(&pmd->poll_list);
d0cca6c3 4541 hmap_init(&pmd->tx_ports);
57eebbb4
DDP
4542 hmap_init(&pmd->tnl_port_cache);
4543 hmap_init(&pmd->send_port_cache);
65f13b50
AW
4544 /* init the 'flow_cache' since there is no
4545 * actual thread created for NON_PMD_CORE_ID. */
4546 if (core_id == NON_PMD_CORE_ID) {
4547 emc_cache_init(&pmd->flow_cache);
140dd699 4548 pmd_alloc_static_tx_qid(pmd);
65f13b50
AW
4549 }
4550 cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node),
4551 hash_int(core_id, 0));
4552}
4553
1c1e46ed
AW
4554static void
4555dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
4556{
3453b4d6
JS
4557 struct dpcls *cls;
4558
1c1e46ed 4559 dp_netdev_pmd_flow_flush(pmd);
57eebbb4
DDP
4560 hmap_destroy(&pmd->send_port_cache);
4561 hmap_destroy(&pmd->tnl_port_cache);
d0cca6c3 4562 hmap_destroy(&pmd->tx_ports);
947dc567 4563 hmap_destroy(&pmd->poll_list);
3453b4d6
JS
4564 /* All flows (including their dpcls_rules) have been deleted already */
4565 CMAP_FOR_EACH (cls, node, &pmd->classifiers) {
4566 dpcls_destroy(cls);
7c269972 4567 ovsrcu_postpone(free, cls);
3453b4d6
JS
4568 }
4569 cmap_destroy(&pmd->classifiers);
1c1e46ed
AW
4570 cmap_destroy(&pmd->flow_table);
4571 ovs_mutex_destroy(&pmd->flow_mutex);
4572 latch_destroy(&pmd->exit_latch);
2788a1b1 4573 seq_destroy(pmd->reload_seq);
1c1e46ed
AW
4574 xpthread_cond_destroy(&pmd->cond);
4575 ovs_mutex_destroy(&pmd->cond_mutex);
d0cca6c3 4576 ovs_mutex_destroy(&pmd->port_mutex);
1c1e46ed
AW
4577 free(pmd);
4578}
4579
4580/* Stops the pmd thread, removes it from the 'dp->poll_threads',
4581 * and unrefs the struct. */
65f13b50 4582static void
e4e74c3a 4583dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
6c3eee82 4584{
d0cca6c3
DDP
4585 /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize,
4586 * but extra cleanup is necessary */
65f13b50 4587 if (pmd->core_id == NON_PMD_CORE_ID) {
febf4a7a 4588 ovs_mutex_lock(&dp->non_pmd_mutex);
65f13b50 4589 emc_cache_uninit(&pmd->flow_cache);
d0cca6c3 4590 pmd_free_cached_ports(pmd);
140dd699 4591 pmd_free_static_tx_qid(pmd);
febf4a7a 4592 ovs_mutex_unlock(&dp->non_pmd_mutex);
65f13b50
AW
4593 } else {
4594 latch_set(&pmd->exit_latch);
4595 dp_netdev_reload_pmd__(pmd);
65f13b50
AW
4596 xpthread_join(pmd->thread, NULL);
4597 }
ae7ad0a1 4598
d0cca6c3 4599 dp_netdev_pmd_clear_ports(pmd);
ae7ad0a1 4600
e4e74c3a
AW
4601 /* Purges the 'pmd''s flows after stopping the thread, but before
4602 * destroying the flows, so that the flow stats can be collected. */
4603 if (dp->dp_purge_cb) {
4604 dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id);
4605 }
65f13b50 4606 cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0));
1c1e46ed 4607 dp_netdev_pmd_unref(pmd);
65f13b50 4608}
6c3eee82 4609
e32971b8
DDP
4610/* Destroys all pmd threads. If 'non_pmd' is true it also destroys the non pmd
4611 * thread. */
65f13b50 4612static void
e32971b8 4613dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd)
65f13b50
AW
4614{
4615 struct dp_netdev_pmd_thread *pmd;
d916785c
DDP
4616 struct dp_netdev_pmd_thread **pmd_list;
4617 size_t k = 0, n_pmds;
4618
e32971b8 4619 n_pmds = cmap_count(&dp->poll_threads);
d916785c 4620 pmd_list = xcalloc(n_pmds, sizeof *pmd_list);
65f13b50
AW
4621
4622 CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
e32971b8 4623 if (!non_pmd && pmd->core_id == NON_PMD_CORE_ID) {
b9584f21
DDP
4624 continue;
4625 }
d916785c
DDP
4626 /* We cannot call dp_netdev_del_pmd(), since it alters
4627 * 'dp->poll_threads' (while we're iterating it) and it
4628 * might quiesce. */
4629 ovs_assert(k < n_pmds);
4630 pmd_list[k++] = pmd;
6c3eee82 4631 }
d916785c
DDP
4632
4633 for (size_t i = 0; i < k; i++) {
4634 dp_netdev_del_pmd(dp, pmd_list[i]);
4635 }
4636 free(pmd_list);
65f13b50 4637}
6c3eee82 4638
d0cca6c3
DDP
4639/* Deletes all rx queues from pmd->poll_list and all the ports from
4640 * pmd->tx_ports. */
cc245ce8 4641static void
d0cca6c3 4642dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd)
cc245ce8
IM
4643{
4644 struct rxq_poll *poll;
d0cca6c3 4645 struct tx_port *port;
cc245ce8 4646
d0cca6c3 4647 ovs_mutex_lock(&pmd->port_mutex);
947dc567 4648 HMAP_FOR_EACH_POP (poll, node, &pmd->poll_list) {
cc245ce8
IM
4649 free(poll);
4650 }
d0cca6c3
DDP
4651 HMAP_FOR_EACH_POP (port, node, &pmd->tx_ports) {
4652 free(port);
4653 }
4654 ovs_mutex_unlock(&pmd->port_mutex);
cc245ce8
IM
4655}
4656
e32971b8 4657/* Adds rx queue to poll_list of PMD thread, if it's not there already. */
b68872d8 4658static void
e32971b8
DDP
4659dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
4660 struct dp_netdev_rxq *rxq)
4661 OVS_REQUIRES(pmd->port_mutex)
b68872d8 4662{
e32971b8
DDP
4663 int qid = netdev_rxq_get_queue_id(rxq->rx);
4664 uint32_t hash = hash_2words(odp_to_u32(rxq->port->port_no), qid);
4665 struct rxq_poll *poll;
b68872d8 4666
e32971b8
DDP
4667 HMAP_FOR_EACH_WITH_HASH (poll, node, hash, &pmd->poll_list) {
4668 if (poll->rxq == rxq) {
4669 /* 'rxq' is already polled by this thread. Do nothing. */
4670 return;
d0cca6c3 4671 }
cc245ce8 4672 }
cc245ce8 4673
e32971b8
DDP
4674 poll = xmalloc(sizeof *poll);
4675 poll->rxq = rxq;
4676 hmap_insert(&pmd->poll_list, &poll->node, hash);
b68872d8 4677
e32971b8 4678 pmd->need_reload = true;
ae7ad0a1
IM
4679}
4680
e32971b8 4681/* Delete 'poll' from poll_list of PMD thread. */
ae7ad0a1 4682static void
e32971b8
DDP
4683dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd,
4684 struct rxq_poll *poll)
d0cca6c3 4685 OVS_REQUIRES(pmd->port_mutex)
ae7ad0a1 4686{
e32971b8
DDP
4687 hmap_remove(&pmd->poll_list, &poll->node);
4688 free(poll);
ae7ad0a1 4689
e32971b8 4690 pmd->need_reload = true;
ae7ad0a1
IM
4691}
4692
d0cca6c3
DDP
4693/* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the
4694 * changes to take effect. */
cc245ce8 4695static void
d0cca6c3
DDP
4696dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
4697 struct dp_netdev_port *port)
e32971b8 4698 OVS_REQUIRES(pmd->port_mutex)
d0cca6c3 4699{
57eebbb4
DDP
4700 struct tx_port *tx;
4701
e32971b8
DDP
4702 tx = tx_port_lookup(&pmd->tx_ports, port->port_no);
4703 if (tx) {
4704 /* 'port' is already on this thread tx cache. Do nothing. */
4705 return;
4706 }
4707
57eebbb4 4708 tx = xzalloc(sizeof *tx);
d0cca6c3 4709
324c8374
IM
4710 tx->port = port;
4711 tx->qid = -1;
d0cca6c3 4712
324c8374 4713 hmap_insert(&pmd->tx_ports, &tx->node, hash_port_no(tx->port->port_no));
e32971b8 4714 pmd->need_reload = true;
d0cca6c3
DDP
4715}
4716
e32971b8
DDP
4717/* Del 'tx' from the tx port cache of 'pmd', which must be reloaded for the
4718 * changes to take effect. */
b9584f21 4719static void
e32971b8
DDP
4720dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
4721 struct tx_port *tx)
4722 OVS_REQUIRES(pmd->port_mutex)
b9584f21 4723{
e32971b8
DDP
4724 hmap_remove(&pmd->tx_ports, &tx->node);
4725 free(tx);
4726 pmd->need_reload = true;
6c3eee82
BP
4727}
4728\f
b5cbbcf6
AZ
4729static char *
4730dpif_netdev_get_datapath_version(void)
4731{
4732 return xstrdup("<built-in>");
4733}
4734
72865317 4735static void
1c1e46ed 4736dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size,
11bfdadd 4737 uint16_t tcp_flags, long long now)
72865317 4738{
eb94da30 4739 uint16_t flags;
72865317 4740
eb94da30
DDP
4741 atomic_store_relaxed(&netdev_flow->stats.used, now);
4742 non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt);
4743 non_atomic_ullong_add(&netdev_flow->stats.byte_count, size);
4744 atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
4745 flags |= tcp_flags;
4746 atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags);
51852a57
BP
4747}
4748
4749static void
1c1e46ed
AW
4750dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd,
4751 enum dp_stat_type type, int cnt)
51852a57 4752{
eb94da30 4753 non_atomic_ullong_add(&pmd->stats.n[type], cnt);
51852a57
BP
4754}
4755
623540e4 4756static int
e14deea0 4757dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet_,
7af12bd7 4758 struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid,
623540e4
EJ
4759 enum dpif_upcall_type type, const struct nlattr *userdata,
4760 struct ofpbuf *actions, struct ofpbuf *put_actions)
4761{
1c1e46ed 4762 struct dp_netdev *dp = pmd->dp;
623540e4 4763
623540e4
EJ
4764 if (OVS_UNLIKELY(!dp->upcall_cb)) {
4765 return ENODEV;
4766 }
4767
4768 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) {
4769 struct ds ds = DS_EMPTY_INITIALIZER;
623540e4 4770 char *packet_str;
cf62fa4c 4771 struct ofpbuf key;
5262eea1
JG
4772 struct odp_flow_key_parms odp_parms = {
4773 .flow = flow,
1dea1435 4774 .mask = wc ? &wc->masks : NULL,
2494ccd7 4775 .support = dp_netdev_support,
5262eea1 4776 };
623540e4
EJ
4777
4778 ofpbuf_init(&key, 0);
5262eea1 4779 odp_flow_key_from_flow(&odp_parms, &key);
2482b0b0 4780 packet_str = ofp_dp_packet_to_string(packet_);
623540e4 4781
6fd6ed71 4782 odp_flow_key_format(key.data, key.size, &ds);
623540e4
EJ
4783
4784 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name,
4785 dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str);
4786
4787 ofpbuf_uninit(&key);
4788 free(packet_str);
6fd6ed71 4789
623540e4
EJ
4790 ds_destroy(&ds);
4791 }
4792
8d8ab6c2
JG
4793 return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata,
4794 actions, wc, put_actions, dp->upcall_aux);
623540e4
EJ
4795}
4796
bde94613
FA
4797static inline uint32_t
4798dpif_netdev_packet_get_rss_hash_orig_pkt(struct dp_packet *packet,
4799 const struct miniflow *mf)
4800{
4801 uint32_t hash;
4802
4803 if (OVS_LIKELY(dp_packet_rss_valid(packet))) {
4804 hash = dp_packet_get_rss_hash(packet);
4805 } else {
4806 hash = miniflow_hash_5tuple(mf, 0);
4807 dp_packet_set_rss_hash(packet, hash);
4808 }
4809
4810 return hash;
4811}
4812
9bbf1c3d 4813static inline uint32_t
048963aa
DDP
4814dpif_netdev_packet_get_rss_hash(struct dp_packet *packet,
4815 const struct miniflow *mf)
9bbf1c3d 4816{
048963aa 4817 uint32_t hash, recirc_depth;
9bbf1c3d 4818
f2f44f5d
DDP
4819 if (OVS_LIKELY(dp_packet_rss_valid(packet))) {
4820 hash = dp_packet_get_rss_hash(packet);
4821 } else {
9bbf1c3d 4822 hash = miniflow_hash_5tuple(mf, 0);
2bc1bbd2 4823 dp_packet_set_rss_hash(packet, hash);
9bbf1c3d 4824 }
048963aa
DDP
4825
4826 /* The RSS hash must account for the recirculation depth to avoid
4827 * collisions in the exact match cache */
4828 recirc_depth = *recirc_depth_get_unsafe();
4829 if (OVS_UNLIKELY(recirc_depth)) {
4830 hash = hash_finish(hash, recirc_depth);
4831 dp_packet_set_rss_hash(packet, hash);
4832 }
9bbf1c3d
DDP
4833 return hash;
4834}
4835
f7ce4811 4836struct packet_batch_per_flow {
8cbf4f47
DDP
4837 unsigned int byte_count;
4838 uint16_t tcp_flags;
8cbf4f47
DDP
4839 struct dp_netdev_flow *flow;
4840
1895cc8d 4841 struct dp_packet_batch array;
8cbf4f47
DDP
4842};
4843
4844static inline void
f7ce4811
PS
4845packet_batch_per_flow_update(struct packet_batch_per_flow *batch,
4846 struct dp_packet *packet,
4847 const struct miniflow *mf)
8cbf4f47 4848{
cf62fa4c 4849 batch->byte_count += dp_packet_size(packet);
1895cc8d
PS
4850 batch->tcp_flags |= miniflow_get_tcp_flags(mf);
4851 batch->array.packets[batch->array.count++] = packet;
8cbf4f47
DDP
4852}
4853
4854static inline void
f7ce4811
PS
4855packet_batch_per_flow_init(struct packet_batch_per_flow *batch,
4856 struct dp_netdev_flow *flow)
8cbf4f47 4857{
11e5cf1f 4858 flow->batch = batch;
8cbf4f47 4859
11e5cf1f 4860 batch->flow = flow;
1895cc8d 4861 dp_packet_batch_init(&batch->array);
8cbf4f47
DDP
4862 batch->byte_count = 0;
4863 batch->tcp_flags = 0;
8cbf4f47
DDP
4864}
4865
4866static inline void
f7ce4811
PS
4867packet_batch_per_flow_execute(struct packet_batch_per_flow *batch,
4868 struct dp_netdev_pmd_thread *pmd,
4869 long long now)
8cbf4f47
DDP
4870{
4871 struct dp_netdev_actions *actions;
4872 struct dp_netdev_flow *flow = batch->flow;
4873
1895cc8d 4874 dp_netdev_flow_used(flow, batch->array.count, batch->byte_count,
11bfdadd 4875 batch->tcp_flags, now);
8cbf4f47
DDP
4876
4877 actions = dp_netdev_flow_get_actions(flow);
4878
66e4ad8a 4879 dp_netdev_execute_actions(pmd, &batch->array, true, &flow->flow,
324c8374 4880 actions->actions, actions->size, now);
8cbf4f47
DDP
4881}
4882
8aaa125d 4883static inline void
e14deea0 4884dp_netdev_queue_batches(struct dp_packet *pkt,
9bbf1c3d 4885 struct dp_netdev_flow *flow, const struct miniflow *mf,
47a45d86
KT
4886 struct packet_batch_per_flow *batches,
4887 size_t *n_batches)
9bbf1c3d 4888{
f7ce4811 4889 struct packet_batch_per_flow *batch = flow->batch;
11e5cf1f 4890
f9fe365b
AZ
4891 if (OVS_UNLIKELY(!batch)) {
4892 batch = &batches[(*n_batches)++];
f7ce4811 4893 packet_batch_per_flow_init(batch, flow);
9bbf1c3d
DDP
4894 }
4895
f7ce4811 4896 packet_batch_per_flow_update(batch, pkt, mf);
9bbf1c3d
DDP
4897}
4898
9bbf1c3d 4899/* Try to process all ('cnt') the 'packets' using only the exact match cache
a90ed026 4900 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
8aaa125d
DDP
4901 * miniflow is copied into 'keys' and the packet pointer is moved at the
4902 * beginning of the 'packets' array.
9bbf1c3d
DDP
4903 *
4904 * The function returns the number of packets that needs to be processed in the
4905 * 'packets' array (they have been moved to the beginning of the vector).
a90ed026 4906 *
02305520
FA
4907 * For performance reasons a caller may choose not to initialize the metadata
4908 * in 'packets_'. If 'md_is_valid' is false, the metadata in 'packets'
4909 * is not valid and must be initialized by this function using 'port_no'.
4910 * If 'md_is_valid' is true, the metadata is already valid and 'port_no'
4911 * will be ignored.
9bbf1c3d
DDP
4912 */
4913static inline size_t
72c84bc2
AZ
4914emc_processing(struct dp_netdev_pmd_thread *pmd,
4915 struct dp_packet_batch *packets_,
1895cc8d 4916 struct netdev_flow_key *keys,
f7ce4811 4917 struct packet_batch_per_flow batches[], size_t *n_batches,
a90ed026 4918 bool md_is_valid, odp_port_t port_no)
72865317 4919{
65f13b50 4920 struct emc_cache *flow_cache = &pmd->flow_cache;
b89c678b 4921 struct netdev_flow_key *key = &keys[0];
72c84bc2
AZ
4922 size_t n_missed = 0, n_dropped = 0;
4923 struct dp_packet *packet;
45df9fef 4924 const size_t cnt = dp_packet_batch_size(packets_);
f79b1ddb 4925 uint32_t cur_min;
72c84bc2 4926 int i;
8cbf4f47 4927
f79b1ddb
BB
4928 atomic_read_relaxed(&pmd->dp->emc_insert_min, &cur_min);
4929
45df9fef 4930 DP_PACKET_BATCH_REFILL_FOR_EACH (i, cnt, packet, packets_) {
9bbf1c3d 4931 struct dp_netdev_flow *flow;
9bbf1c3d 4932
5a2fed48
AZ
4933 if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
4934 dp_packet_delete(packet);
3d88a620 4935 n_dropped++;
84d6d5eb
EJ
4936 continue;
4937 }
8cbf4f47 4938
45df9fef 4939 if (i != cnt - 1) {
72c84bc2 4940 struct dp_packet **packets = packets_->packets;
a90ed026 4941 /* Prefetch next packet data and metadata. */
72a5e2b8 4942 OVS_PREFETCH(dp_packet_data(packets[i+1]));
a90ed026 4943 pkt_metadata_prefetch_init(&packets[i+1]->md);
72a5e2b8
DDP
4944 }
4945
a90ed026
DDP
4946 if (!md_is_valid) {
4947 pkt_metadata_init(&packet->md, port_no);
4948 }
5a2fed48 4949 miniflow_extract(packet, &key->mf);
d262ac2c 4950 key->len = 0; /* Not computed yet. */
bde94613
FA
4951 /* If EMC is disabled skip hash computation and emc_lookup */
4952 if (cur_min) {
4953 if (!md_is_valid) {
4954 key->hash = dpif_netdev_packet_get_rss_hash_orig_pkt(packet,
4955 &key->mf);
4956 } else {
4957 key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf);
4958 }
4959 flow = emc_lookup(flow_cache, key);
4960 } else {
4961 flow = NULL;
4962 }
8aaa125d 4963 if (OVS_LIKELY(flow)) {
5a2fed48 4964 dp_netdev_queue_batches(packet, flow, &key->mf, batches,
8aaa125d
DDP
4965 n_batches);
4966 } else {
d1aa0b94 4967 /* Exact match cache missed. Group missed packets together at
72c84bc2
AZ
4968 * the beginning of the 'packets' array. */
4969 dp_packet_batch_refill(packets_, packet, i);
400486f7
DDP
4970 /* 'key[n_missed]' contains the key of the current packet and it
4971 * must be returned to the caller. The next key should be extracted
4972 * to 'keys[n_missed + 1]'. */
4973 key = &keys[++n_missed];
9bbf1c3d
DDP
4974 }
4975 }
4976
47a45d86 4977 dp_netdev_count_packet(pmd, DP_STAT_EXACT_HIT,
45df9fef 4978 cnt - n_dropped - n_missed);
4f150744 4979
72c84bc2 4980 return dp_packet_batch_size(packets_);
9bbf1c3d
DDP
4981}
4982
a260d966 4983static inline void
47a45d86
KT
4984handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
4985 struct dp_packet *packet,
a260d966
PS
4986 const struct netdev_flow_key *key,
4987 struct ofpbuf *actions, struct ofpbuf *put_actions,
324c8374 4988 int *lost_cnt, long long now)
a260d966
PS
4989{
4990 struct ofpbuf *add_actions;
4991 struct dp_packet_batch b;
4992 struct match match;
4993 ovs_u128 ufid;
4994 int error;
4995
4996 match.tun_md.valid = false;
4997 miniflow_expand(&key->mf, &match.flow);
4998
4999 ofpbuf_clear(actions);
5000 ofpbuf_clear(put_actions);
5001
5002 dpif_flow_hash(pmd->dp->dpif, &match.flow, sizeof match.flow, &ufid);
5003 error = dp_netdev_upcall(pmd, packet, &match.flow, &match.wc,
5004 &ufid, DPIF_UC_MISS, NULL, actions,
5005 put_actions);
5006 if (OVS_UNLIKELY(error && error != ENOSPC)) {
5007 dp_packet_delete(packet);
5008 (*lost_cnt)++;
5009 return;
5010 }
5011
5012 /* The Netlink encoding of datapath flow keys cannot express
5013 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
5014 * tag is interpreted as exact match on the fact that there is no
5015 * VLAN. Unless we refactor a lot of code that translates between
5016 * Netlink and struct flow representations, we have to do the same
5017 * here. */
f0fb825a
EG
5018 if (!match.wc.masks.vlans[0].tci) {
5019 match.wc.masks.vlans[0].tci = htons(0xffff);
a260d966
PS
5020 }
5021
5022 /* We can't allow the packet batching in the next loop to execute
5023 * the actions. Otherwise, if there are any slow path actions,
5024 * we'll send the packet up twice. */
72c84bc2 5025 dp_packet_batch_init_packet(&b, packet);
66e4ad8a 5026 dp_netdev_execute_actions(pmd, &b, true, &match.flow,
324c8374 5027 actions->data, actions->size, now);
a260d966
PS
5028
5029 add_actions = put_actions->size ? put_actions : actions;
5030 if (OVS_LIKELY(error != ENOSPC)) {
5031 struct dp_netdev_flow *netdev_flow;
5032
5033 /* XXX: There's a race window where a flow covering this packet
5034 * could have already been installed since we last did the flow
5035 * lookup before upcall. This could be solved by moving the
5036 * mutex lock outside the loop, but that's an awful long time
5037 * to be locking everyone out of making flow installs. If we
5038 * move to a per-core classifier, it would be reasonable. */
5039 ovs_mutex_lock(&pmd->flow_mutex);
3453b4d6 5040 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL);
a260d966
PS
5041 if (OVS_LIKELY(!netdev_flow)) {
5042 netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid,
5043 add_actions->data,
5044 add_actions->size);
5045 }
5046 ovs_mutex_unlock(&pmd->flow_mutex);
4c30b246 5047 emc_probabilistic_insert(pmd, key, netdev_flow);
a260d966
PS
5048 }
5049}
5050
9bbf1c3d 5051static inline void
65f13b50 5052fast_path_processing(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5053 struct dp_packet_batch *packets_,
8aaa125d 5054 struct netdev_flow_key *keys,
324c8374 5055 struct packet_batch_per_flow batches[], size_t *n_batches,
3453b4d6 5056 odp_port_t in_port,
324c8374 5057 long long now)
9bbf1c3d 5058{
31c82130 5059 const size_t cnt = dp_packet_batch_size(packets_);
1a0d5831 5060#if !defined(__CHECKER__) && !defined(_WIN32)
9bbf1c3d
DDP
5061 const size_t PKT_ARRAY_SIZE = cnt;
5062#else
1a0d5831 5063 /* Sparse or MSVC doesn't like variable length array. */
cd159f1a 5064 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
9bbf1c3d 5065#endif
31c82130 5066 struct dp_packet *packet;
3453b4d6 5067 struct dpcls *cls;
0de8783a 5068 struct dpcls_rule *rules[PKT_ARRAY_SIZE];
65f13b50 5069 struct dp_netdev *dp = pmd->dp;
8aaa125d 5070 int miss_cnt = 0, lost_cnt = 0;
3453b4d6 5071 int lookup_cnt = 0, add_lookup_cnt;
9bbf1c3d 5072 bool any_miss;
8aaa125d 5073 size_t i;
9bbf1c3d
DDP
5074
5075 for (i = 0; i < cnt; i++) {
0de8783a 5076 /* Key length is needed in all the cases, hash computed on demand. */
361d808d 5077 keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf));
9bbf1c3d 5078 }
3453b4d6
JS
5079 /* Get the classifier for the in_port */
5080 cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port);
5081 if (OVS_LIKELY(cls)) {
5082 any_miss = !dpcls_lookup(cls, keys, rules, cnt, &lookup_cnt);
5083 } else {
5084 any_miss = true;
5085 memset(rules, 0, sizeof(rules));
5086 }
623540e4
EJ
5087 if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
5088 uint64_t actions_stub[512 / 8], slow_stub[512 / 8];
5089 struct ofpbuf actions, put_actions;
623540e4
EJ
5090
5091 ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub);
5092 ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub);
5093
31c82130 5094 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
0de8783a 5095 struct dp_netdev_flow *netdev_flow;
623540e4 5096
0de8783a 5097 if (OVS_LIKELY(rules[i])) {
623540e4
EJ
5098 continue;
5099 }
5100
5101 /* It's possible that an earlier slow path execution installed
0de8783a 5102 * a rule covering this flow. In this case, it's a lot cheaper
623540e4 5103 * to catch it here than execute a miss. */
3453b4d6
JS
5104 netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i],
5105 &add_lookup_cnt);
623540e4 5106 if (netdev_flow) {
3453b4d6 5107 lookup_cnt += add_lookup_cnt;
0de8783a 5108 rules[i] = &netdev_flow->cr;
623540e4
EJ
5109 continue;
5110 }
5111
60fc3b7b 5112 miss_cnt++;
31c82130 5113 handle_packet_upcall(pmd, packet, &keys[i], &actions,
324c8374 5114 &put_actions, &lost_cnt, now);
623540e4
EJ
5115 }
5116
5117 ofpbuf_uninit(&actions);
5118 ofpbuf_uninit(&put_actions);
5119 fat_rwlock_unlock(&dp->upcall_rwlock);
ac8c2081 5120 } else if (OVS_UNLIKELY(any_miss)) {
31c82130 5121 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
0de8783a 5122 if (OVS_UNLIKELY(!rules[i])) {
31c82130 5123 dp_packet_delete(packet);
8aaa125d
DDP
5124 lost_cnt++;
5125 miss_cnt++;
ac8c2081
DDP
5126 }
5127 }
623540e4 5128 }
84d6d5eb 5129
31c82130 5130 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
84d6d5eb 5131 struct dp_netdev_flow *flow;
8cbf4f47 5132
0de8783a 5133 if (OVS_UNLIKELY(!rules[i])) {
84d6d5eb
EJ
5134 continue;
5135 }
5136
84d6d5eb 5137 flow = dp_netdev_flow_cast(rules[i]);
0de8783a 5138
4c30b246 5139 emc_probabilistic_insert(pmd, &keys[i], flow);
8aaa125d 5140 dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches);
8cbf4f47
DDP
5141 }
5142
8aaa125d 5143 dp_netdev_count_packet(pmd, DP_STAT_MASKED_HIT, cnt - miss_cnt);
3453b4d6 5144 dp_netdev_count_packet(pmd, DP_STAT_LOOKUP_HIT, lookup_cnt);
8aaa125d
DDP
5145 dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt);
5146 dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt);
72865317
BP
5147}
5148
a90ed026
DDP
5149/* Packets enter the datapath from a port (or from recirculation) here.
5150 *
02305520
FA
5151 * When 'md_is_valid' is true the metadata in 'packets' are already valid.
5152 * When false the metadata in 'packets' need to be initialized. */
adcf00ba 5153static void
a90ed026 5154dp_netdev_input__(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5155 struct dp_packet_batch *packets,
a90ed026 5156 bool md_is_valid, odp_port_t port_no)
9bbf1c3d 5157{
1a0d5831 5158#if !defined(__CHECKER__) && !defined(_WIN32)
37eabc70 5159 const size_t PKT_ARRAY_SIZE = dp_packet_batch_size(packets);
9bbf1c3d 5160#else
1a0d5831 5161 /* Sparse or MSVC doesn't like variable length array. */
cd159f1a 5162 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
9bbf1c3d 5163#endif
47a45d86
KT
5164 OVS_ALIGNED_VAR(CACHE_LINE_SIZE)
5165 struct netdev_flow_key keys[PKT_ARRAY_SIZE];
f7ce4811 5166 struct packet_batch_per_flow batches[PKT_ARRAY_SIZE];
11bfdadd 5167 long long now = time_msec();
72c84bc2 5168 size_t n_batches;
3453b4d6 5169 odp_port_t in_port;
9bbf1c3d 5170
8aaa125d 5171 n_batches = 0;
72c84bc2 5172 emc_processing(pmd, packets, keys, batches, &n_batches,
a90ed026 5173 md_is_valid, port_no);
72c84bc2 5174 if (!dp_packet_batch_is_empty(packets)) {
3453b4d6
JS
5175 /* Get ingress port from first packet's metadata. */
5176 in_port = packets->packets[0]->md.in_port.odp_port;
47a45d86
KT
5177 fast_path_processing(pmd, packets, keys, batches, &n_batches,
5178 in_port, now);
8aaa125d
DDP
5179 }
5180
ad9f0581
BB
5181 /* All the flow batches need to be reset before any call to
5182 * packet_batch_per_flow_execute() as it could potentially trigger
5183 * recirculation. When a packet matching flow ‘j’ happens to be
5184 * recirculated, the nested call to dp_netdev_input__() could potentially
5185 * classify the packet as matching another flow - say 'k'. It could happen
5186 * that in the previous call to dp_netdev_input__() that same flow 'k' had
5187 * already its own batches[k] still waiting to be served. So if its
5188 * ‘batch’ member is not reset, the recirculated packet would be wrongly
5189 * appended to batches[k] of the 1st call to dp_netdev_input__(). */
72c84bc2 5190 size_t i;
603f2ce0
EJ
5191 for (i = 0; i < n_batches; i++) {
5192 batches[i].flow->batch = NULL;
5193 }
5194
8aaa125d 5195 for (i = 0; i < n_batches; i++) {
f7ce4811 5196 packet_batch_per_flow_execute(&batches[i], pmd, now);
9bbf1c3d
DDP
5197 }
5198}
5199
a90ed026
DDP
5200static void
5201dp_netdev_input(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5202 struct dp_packet_batch *packets,
a90ed026
DDP
5203 odp_port_t port_no)
5204{
3453b4d6 5205 dp_netdev_input__(pmd, packets, false, port_no);
a90ed026
DDP
5206}
5207
5208static void
5209dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5210 struct dp_packet_batch *packets)
a90ed026 5211{
3453b4d6 5212 dp_netdev_input__(pmd, packets, true, 0);
a90ed026
DDP
5213}
5214
9080a111 5215struct dp_netdev_execute_aux {
65f13b50 5216 struct dp_netdev_pmd_thread *pmd;
324c8374 5217 long long now;
66e4ad8a 5218 const struct flow *flow;
9080a111
JR
5219};
5220
e4e74c3a
AW
5221static void
5222dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb,
5223 void *aux)
5224{
5225 struct dp_netdev *dp = get_dp_netdev(dpif);
5226 dp->dp_purge_aux = aux;
5227 dp->dp_purge_cb = cb;
5228}
5229
6b31e073 5230static void
623540e4
EJ
5231dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb,
5232 void *aux)
6b31e073
RW
5233{
5234 struct dp_netdev *dp = get_dp_netdev(dpif);
623540e4 5235 dp->upcall_aux = aux;
6b31e073
RW
5236 dp->upcall_cb = cb;
5237}
5238
324c8374
IM
5239static void
5240dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
5241 long long now, bool purge)
5242{
5243 struct tx_port *tx;
5244 struct dp_netdev_port *port;
5245 long long interval;
5246
57eebbb4 5247 HMAP_FOR_EACH (tx, node, &pmd->send_port_cache) {
9f7a3035 5248 if (!tx->port->dynamic_txqs) {
324c8374
IM
5249 continue;
5250 }
5251 interval = now - tx->last_used;
5252 if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) {
5253 port = tx->port;
5254 ovs_mutex_lock(&port->txq_used_mutex);
5255 port->txq_used[tx->qid]--;
5256 ovs_mutex_unlock(&port->txq_used_mutex);
5257 tx->qid = -1;
5258 }
5259 }
5260}
5261
5262static int
5263dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
5264 struct tx_port *tx, long long now)
5265{
5266 struct dp_netdev_port *port;
5267 long long interval;
5268 int i, min_cnt, min_qid;
5269
5270 if (OVS_UNLIKELY(!now)) {
5271 now = time_msec();
5272 }
5273
5274 interval = now - tx->last_used;
5275 tx->last_used = now;
5276
5277 if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) {
5278 return tx->qid;
5279 }
5280
5281 port = tx->port;
5282
5283 ovs_mutex_lock(&port->txq_used_mutex);
5284 if (tx->qid >= 0) {
5285 port->txq_used[tx->qid]--;
5286 tx->qid = -1;
5287 }
5288
5289 min_cnt = -1;
5290 min_qid = 0;
5291 for (i = 0; i < netdev_n_txq(port->netdev); i++) {
5292 if (port->txq_used[i] < min_cnt || min_cnt == -1) {
5293 min_cnt = port->txq_used[i];
5294 min_qid = i;
5295 }
5296 }
5297
5298 port->txq_used[min_qid]++;
5299 tx->qid = min_qid;
5300
5301 ovs_mutex_unlock(&port->txq_used_mutex);
5302
5303 dpif_netdev_xps_revalidate_pmd(pmd, now, false);
5304
5305 VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
5306 pmd->core_id, tx->qid, netdev_get_name(tx->port->netdev));
5307 return min_qid;
5308}
5309
d0cca6c3 5310static struct tx_port *
57eebbb4
DDP
5311pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
5312 odp_port_t port_no)
5313{
5314 return tx_port_lookup(&pmd->tnl_port_cache, port_no);
5315}
5316
5317static struct tx_port *
5318pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
5319 odp_port_t port_no)
d0cca6c3 5320{
57eebbb4 5321 return tx_port_lookup(&pmd->send_port_cache, port_no);
d0cca6c3
DDP
5322}
5323
a36de779 5324static int
d0cca6c3 5325push_tnl_action(const struct dp_netdev_pmd_thread *pmd,
1895cc8d
PS
5326 const struct nlattr *attr,
5327 struct dp_packet_batch *batch)
a36de779 5328{
d0cca6c3 5329 struct tx_port *tun_port;
a36de779 5330 const struct ovs_action_push_tnl *data;
4c742796 5331 int err;
a36de779
PS
5332
5333 data = nl_attr_get(attr);
5334
81765c00 5335 tun_port = pmd_tnl_port_cache_lookup(pmd, data->tnl_port);
a36de779 5336 if (!tun_port) {
4c742796
PS
5337 err = -EINVAL;
5338 goto error;
a36de779 5339 }
324c8374 5340 err = netdev_push_header(tun_port->port->netdev, batch, data);
4c742796
PS
5341 if (!err) {
5342 return 0;
5343 }
5344error:
5345 dp_packet_delete_batch(batch, true);
5346 return err;
a36de779
PS
5347}
5348
66525ef3
PS
5349static void
5350dp_execute_userspace_action(struct dp_netdev_pmd_thread *pmd,
5351 struct dp_packet *packet, bool may_steal,
5352 struct flow *flow, ovs_u128 *ufid,
5353 struct ofpbuf *actions,
324c8374 5354 const struct nlattr *userdata, long long now)
66525ef3
PS
5355{
5356 struct dp_packet_batch b;
5357 int error;
5358
5359 ofpbuf_clear(actions);
5360
5361 error = dp_netdev_upcall(pmd, packet, flow, NULL, ufid,
5362 DPIF_UC_ACTION, userdata, actions,
5363 NULL);
5364 if (!error || error == ENOSPC) {
72c84bc2 5365 dp_packet_batch_init_packet(&b, packet);
66e4ad8a 5366 dp_netdev_execute_actions(pmd, &b, may_steal, flow,
324c8374 5367 actions->data, actions->size, now);
66525ef3
PS
5368 } else if (may_steal) {
5369 dp_packet_delete(packet);
5370 }
5371}
5372
a36de779 5373static void
1895cc8d 5374dp_execute_cb(void *aux_, struct dp_packet_batch *packets_,
09f9da0b 5375 const struct nlattr *a, bool may_steal)
4b27db64 5376 OVS_NO_THREAD_SAFETY_ANALYSIS
9080a111
JR
5377{
5378 struct dp_netdev_execute_aux *aux = aux_;
623540e4 5379 uint32_t *depth = recirc_depth_get();
28e2fa02
DDP
5380 struct dp_netdev_pmd_thread *pmd = aux->pmd;
5381 struct dp_netdev *dp = pmd->dp;
09f9da0b 5382 int type = nl_attr_type(a);
324c8374 5383 long long now = aux->now;
d0cca6c3 5384 struct tx_port *p;
9080a111 5385
09f9da0b
JR
5386 switch ((enum ovs_action_attr)type) {
5387 case OVS_ACTION_ATTR_OUTPUT:
57eebbb4 5388 p = pmd_send_port_cache_lookup(pmd, nl_attr_get_odp_port(a));
26a5075b 5389 if (OVS_LIKELY(p)) {
347ba9bb 5390 int tx_qid;
324c8374 5391 bool dynamic_txqs;
347ba9bb 5392
324c8374
IM
5393 dynamic_txqs = p->port->dynamic_txqs;
5394 if (dynamic_txqs) {
5395 tx_qid = dpif_netdev_xps_get_tx_qid(pmd, p, now);
5396 } else {
82d765f6 5397 tx_qid = pmd->static_tx_qid;
324c8374 5398 }
347ba9bb 5399
324c8374
IM
5400 netdev_send(p->port->netdev, tx_qid, packets_, may_steal,
5401 dynamic_txqs);
ac8c2081 5402 return;
8a4e3a85 5403 }
09f9da0b
JR
5404 break;
5405
a36de779
PS
5406 case OVS_ACTION_ATTR_TUNNEL_PUSH:
5407 if (*depth < MAX_RECIRC_DEPTH) {
aaca4fe0 5408 dp_packet_batch_apply_cutlen(packets_);
7c12dfc5 5409 push_tnl_action(pmd, a, packets_);
a36de779
PS
5410 return;
5411 }
5412 break;
5413
5414 case OVS_ACTION_ATTR_TUNNEL_POP:
5415 if (*depth < MAX_RECIRC_DEPTH) {
aaca4fe0 5416 struct dp_packet_batch *orig_packets_ = packets_;
8611f9a4 5417 odp_port_t portno = nl_attr_get_odp_port(a);
a36de779 5418
57eebbb4 5419 p = pmd_tnl_port_cache_lookup(pmd, portno);
a36de779 5420 if (p) {
1895cc8d 5421 struct dp_packet_batch tnl_pkt;
a36de779
PS
5422
5423 if (!may_steal) {
aaca4fe0
WT
5424 dp_packet_batch_clone(&tnl_pkt, packets_);
5425 packets_ = &tnl_pkt;
5426 dp_packet_batch_reset_cutlen(orig_packets_);
a36de779
PS
5427 }
5428
aaca4fe0
WT
5429 dp_packet_batch_apply_cutlen(packets_);
5430
324c8374 5431 netdev_pop_header(p->port->netdev, packets_);
72c84bc2 5432 if (dp_packet_batch_is_empty(packets_)) {
1c8f98d9
PS
5433 return;
5434 }
9235b479 5435
72c84bc2
AZ
5436 struct dp_packet *packet;
5437 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
5438 packet->md.in_port.odp_port = portno;
a36de779 5439 }
9235b479
PS
5440
5441 (*depth)++;
5442 dp_netdev_recirculate(pmd, packets_);
5443 (*depth)--;
a36de779
PS
5444 return;
5445 }
5446 }
5447 break;
5448
623540e4
EJ
5449 case OVS_ACTION_ATTR_USERSPACE:
5450 if (!fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
aaca4fe0 5451 struct dp_packet_batch *orig_packets_ = packets_;
623540e4 5452 const struct nlattr *userdata;
aaca4fe0 5453 struct dp_packet_batch usr_pkt;
623540e4
EJ
5454 struct ofpbuf actions;
5455 struct flow flow;
7af12bd7 5456 ovs_u128 ufid;
aaca4fe0 5457 bool clone = false;
4fc65926 5458
623540e4
EJ
5459 userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
5460 ofpbuf_init(&actions, 0);
8cbf4f47 5461
aaca4fe0
WT
5462 if (packets_->trunc) {
5463 if (!may_steal) {
5464 dp_packet_batch_clone(&usr_pkt, packets_);
5465 packets_ = &usr_pkt;
aaca4fe0
WT
5466 clone = true;
5467 dp_packet_batch_reset_cutlen(orig_packets_);
5468 }
5469
5470 dp_packet_batch_apply_cutlen(packets_);
5471 }
5472
72c84bc2
AZ
5473 struct dp_packet *packet;
5474 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
5475 flow_extract(packet, &flow);
7af12bd7 5476 dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid);
72c84bc2 5477 dp_execute_userspace_action(pmd, packet, may_steal, &flow,
324c8374 5478 &ufid, &actions, userdata, now);
db73f716 5479 }
aaca4fe0
WT
5480
5481 if (clone) {
5482 dp_packet_delete_batch(packets_, true);
5483 }
5484
623540e4
EJ
5485 ofpbuf_uninit(&actions);
5486 fat_rwlock_unlock(&dp->upcall_rwlock);
6b31e073 5487
ac8c2081
DDP
5488 return;
5489 }
09f9da0b 5490 break;
572f732a 5491
adcf00ba
AZ
5492 case OVS_ACTION_ATTR_RECIRC:
5493 if (*depth < MAX_RECIRC_DEPTH) {
1895cc8d 5494 struct dp_packet_batch recirc_pkts;
572f732a 5495
28e2fa02 5496 if (!may_steal) {
1895cc8d
PS
5497 dp_packet_batch_clone(&recirc_pkts, packets_);
5498 packets_ = &recirc_pkts;
28e2fa02 5499 }
8cbf4f47 5500
72c84bc2
AZ
5501 struct dp_packet *packet;
5502 DP_PACKET_BATCH_FOR_EACH (packet, packets_) {
5503 packet->md.recirc_id = nl_attr_get_u32(a);
8cbf4f47 5504 }
28e2fa02
DDP
5505
5506 (*depth)++;
1895cc8d 5507 dp_netdev_recirculate(pmd, packets_);
adcf00ba
AZ
5508 (*depth)--;
5509
ac8c2081 5510 return;
adcf00ba 5511 }
ac8c2081
DDP
5512
5513 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
572f732a 5514 break;
572f732a 5515
5cf3edb3
DDP
5516 case OVS_ACTION_ATTR_CT: {
5517 const struct nlattr *b;
a76a37ef 5518 bool force = false;
5cf3edb3
DDP
5519 bool commit = false;
5520 unsigned int left;
5521 uint16_t zone = 0;
5522 const char *helper = NULL;
5523 const uint32_t *setmark = NULL;
5524 const struct ovs_key_ct_labels *setlabel = NULL;
4cddb1f0
DB
5525 struct nat_action_info_t nat_action_info;
5526 struct nat_action_info_t *nat_action_info_ref = NULL;
5527 bool nat_config = false;
5cf3edb3
DDP
5528
5529 NL_ATTR_FOR_EACH_UNSAFE (b, left, nl_attr_get(a),
5530 nl_attr_get_size(a)) {
5531 enum ovs_ct_attr sub_type = nl_attr_type(b);
5532
5533 switch(sub_type) {
b80e259f 5534 case OVS_CT_ATTR_FORCE_COMMIT:
a76a37ef
JR
5535 force = true;
5536 /* fall through. */
5cf3edb3
DDP
5537 case OVS_CT_ATTR_COMMIT:
5538 commit = true;
5539 break;
5540 case OVS_CT_ATTR_ZONE:
5541 zone = nl_attr_get_u16(b);
5542 break;
5543 case OVS_CT_ATTR_HELPER:
5544 helper = nl_attr_get_string(b);
5545 break;
5546 case OVS_CT_ATTR_MARK:
5547 setmark = nl_attr_get(b);
5548 break;
5549 case OVS_CT_ATTR_LABELS:
5550 setlabel = nl_attr_get(b);
5551 break;
8e83854c
JR
5552 case OVS_CT_ATTR_EVENTMASK:
5553 /* Silently ignored, as userspace datapath does not generate
5554 * netlink events. */
5555 break;
4cddb1f0
DB
5556 case OVS_CT_ATTR_NAT: {
5557 const struct nlattr *b_nest;
5558 unsigned int left_nest;
5559 bool ip_min_specified = false;
5560 bool proto_num_min_specified = false;
5561 bool ip_max_specified = false;
5562 bool proto_num_max_specified = false;
5563 memset(&nat_action_info, 0, sizeof nat_action_info);
5564 nat_action_info_ref = &nat_action_info;
5565
5566 NL_NESTED_FOR_EACH_UNSAFE (b_nest, left_nest, b) {
5567 enum ovs_nat_attr sub_type_nest = nl_attr_type(b_nest);
5568
5569 switch (sub_type_nest) {
5570 case OVS_NAT_ATTR_SRC:
5571 case OVS_NAT_ATTR_DST:
5572 nat_config = true;
5573 nat_action_info.nat_action |=
5574 ((sub_type_nest == OVS_NAT_ATTR_SRC)
5575 ? NAT_ACTION_SRC : NAT_ACTION_DST);
5576 break;
5577 case OVS_NAT_ATTR_IP_MIN:
5578 memcpy(&nat_action_info.min_addr,
5579 nl_attr_get(b_nest),
5580 nl_attr_get_size(b_nest));
5581 ip_min_specified = true;
5582 break;
5583 case OVS_NAT_ATTR_IP_MAX:
5584 memcpy(&nat_action_info.max_addr,
5585 nl_attr_get(b_nest),
5586 nl_attr_get_size(b_nest));
5587 ip_max_specified = true;
5588 break;
5589 case OVS_NAT_ATTR_PROTO_MIN:
5590 nat_action_info.min_port =
5591 nl_attr_get_u16(b_nest);
5592 proto_num_min_specified = true;
5593 break;
5594 case OVS_NAT_ATTR_PROTO_MAX:
5595 nat_action_info.max_port =
5596 nl_attr_get_u16(b_nest);
5597 proto_num_max_specified = true;
5598 break;
5599 case OVS_NAT_ATTR_PERSISTENT:
5600 case OVS_NAT_ATTR_PROTO_HASH:
5601 case OVS_NAT_ATTR_PROTO_RANDOM:
5602 break;
5603 case OVS_NAT_ATTR_UNSPEC:
5604 case __OVS_NAT_ATTR_MAX:
5605 OVS_NOT_REACHED();
5606 }
5607 }
5608
5609 if (ip_min_specified && !ip_max_specified) {
5610 nat_action_info.max_addr = nat_action_info.min_addr;
5611 }
5612 if (proto_num_min_specified && !proto_num_max_specified) {
5613 nat_action_info.max_port = nat_action_info.min_port;
5614 }
5615 if (proto_num_min_specified || proto_num_max_specified) {
5616 if (nat_action_info.nat_action & NAT_ACTION_SRC) {
5617 nat_action_info.nat_action |= NAT_ACTION_SRC_PORT;
5618 } else if (nat_action_info.nat_action & NAT_ACTION_DST) {
5619 nat_action_info.nat_action |= NAT_ACTION_DST_PORT;
5620 }
5621 }
5622 break;
5623 }
5cf3edb3
DDP
5624 case OVS_CT_ATTR_UNSPEC:
5625 case __OVS_CT_ATTR_MAX:
5626 OVS_NOT_REACHED();
5627 }
5628 }
5629
4cddb1f0
DB
5630 /* We won't be able to function properly in this case, hence
5631 * complain loudly. */
5632 if (nat_config && !commit) {
5633 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
5634 VLOG_WARN_RL(&rl, "NAT specified without commit.");
5635 }
5636
a76a37ef 5637 conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type, force,
4cddb1f0 5638 commit, zone, setmark, setlabel, helper,
94053e66 5639 nat_action_info_ref, now);
07659514 5640 break;
5cf3edb3 5641 }
07659514 5642
5dddf960 5643 case OVS_ACTION_ATTR_METER:
4b27db64
JR
5644 dp_netdev_run_meter(pmd->dp, packets_, nl_attr_get_u32(a),
5645 time_msec());
5646 break;
5647
09f9da0b
JR
5648 case OVS_ACTION_ATTR_PUSH_VLAN:
5649 case OVS_ACTION_ATTR_POP_VLAN:
5650 case OVS_ACTION_ATTR_PUSH_MPLS:
5651 case OVS_ACTION_ATTR_POP_MPLS:
5652 case OVS_ACTION_ATTR_SET:
6d670e7f 5653 case OVS_ACTION_ATTR_SET_MASKED:
09f9da0b 5654 case OVS_ACTION_ATTR_SAMPLE:
53e1d6f1 5655 case OVS_ACTION_ATTR_HASH:
09f9da0b 5656 case OVS_ACTION_ATTR_UNSPEC:
aaca4fe0 5657 case OVS_ACTION_ATTR_TRUNC:
6fcecb85
YY
5658 case OVS_ACTION_ATTR_PUSH_ETH:
5659 case OVS_ACTION_ATTR_POP_ETH:
535e3acf 5660 case OVS_ACTION_ATTR_CLONE:
1fc11c59
JS
5661 case OVS_ACTION_ATTR_ENCAP_NSH:
5662 case OVS_ACTION_ATTR_DECAP_NSH:
09f9da0b
JR
5663 case __OVS_ACTION_ATTR_MAX:
5664 OVS_NOT_REACHED();
da546e07 5665 }
ac8c2081 5666
1895cc8d 5667 dp_packet_delete_batch(packets_, may_steal);
98403001
BP
5668}
5669
4edb9ae9 5670static void
65f13b50 5671dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
1895cc8d 5672 struct dp_packet_batch *packets,
66e4ad8a 5673 bool may_steal, const struct flow *flow,
324c8374
IM
5674 const struct nlattr *actions, size_t actions_len,
5675 long long now)
72865317 5676{
66e4ad8a 5677 struct dp_netdev_execute_aux aux = { pmd, now, flow };
9080a111 5678
1895cc8d 5679 odp_execute_actions(&aux, packets, may_steal, actions,
8cbf4f47 5680 actions_len, dp_execute_cb);
72865317
BP
5681}
5682
4d4e68ed
DDP
5683struct dp_netdev_ct_dump {
5684 struct ct_dpif_dump_state up;
5685 struct conntrack_dump dump;
5686 struct conntrack *ct;
5687 struct dp_netdev *dp;
5688};
5689
5690static int
5691dpif_netdev_ct_dump_start(struct dpif *dpif, struct ct_dpif_dump_state **dump_,
ded30c74 5692 const uint16_t *pzone, int *ptot_bkts)
4d4e68ed
DDP
5693{
5694 struct dp_netdev *dp = get_dp_netdev(dpif);
5695 struct dp_netdev_ct_dump *dump;
5696
5697 dump = xzalloc(sizeof *dump);
5698 dump->dp = dp;
5699 dump->ct = &dp->conntrack;
5700
ded30c74 5701 conntrack_dump_start(&dp->conntrack, &dump->dump, pzone, ptot_bkts);
4d4e68ed
DDP
5702
5703 *dump_ = &dump->up;
5704
5705 return 0;
5706}
5707
5708static int
5709dpif_netdev_ct_dump_next(struct dpif *dpif OVS_UNUSED,
5710 struct ct_dpif_dump_state *dump_,
5711 struct ct_dpif_entry *entry)
5712{
5713 struct dp_netdev_ct_dump *dump;
5714
5715 INIT_CONTAINER(dump, dump_, up);
5716
5717 return conntrack_dump_next(&dump->dump, entry);
5718}
5719
5720static int
5721dpif_netdev_ct_dump_done(struct dpif *dpif OVS_UNUSED,
5722 struct ct_dpif_dump_state *dump_)
5723{
5724 struct dp_netdev_ct_dump *dump;
5725 int err;
5726
5727 INIT_CONTAINER(dump, dump_, up);
5728
5729 err = conntrack_dump_done(&dump->dump);
5730
5731 free(dump);
5732
5733 return err;
5734}
5735
5d9cbb4c
DDP
5736static int
5737dpif_netdev_ct_flush(struct dpif *dpif, const uint16_t *zone)
5738{
5739 struct dp_netdev *dp = get_dp_netdev(dpif);
5740
5741 return conntrack_flush(&dp->conntrack, zone);
5742}
5743
72865317 5744const struct dpif_class dpif_netdev_class = {
72865317 5745 "netdev",
6553d06b 5746 dpif_netdev_init,
2197d7ab 5747 dpif_netdev_enumerate,
0aeaabc8 5748 dpif_netdev_port_open_type,
72865317
BP
5749 dpif_netdev_open,
5750 dpif_netdev_close,
7dab847a 5751 dpif_netdev_destroy,
e4cfed38
PS
5752 dpif_netdev_run,
5753 dpif_netdev_wait,
72865317 5754 dpif_netdev_get_stats,
72865317
BP
5755 dpif_netdev_port_add,
5756 dpif_netdev_port_del,
3eb67853 5757 dpif_netdev_port_set_config,
72865317
BP
5758 dpif_netdev_port_query_by_number,
5759 dpif_netdev_port_query_by_name,
98403001 5760 NULL, /* port_get_pid */
b0ec0f27
BP
5761 dpif_netdev_port_dump_start,
5762 dpif_netdev_port_dump_next,
5763 dpif_netdev_port_dump_done,
72865317
BP
5764 dpif_netdev_port_poll,
5765 dpif_netdev_port_poll_wait,
72865317 5766 dpif_netdev_flow_flush,
ac64794a
BP
5767 dpif_netdev_flow_dump_create,
5768 dpif_netdev_flow_dump_destroy,
5769 dpif_netdev_flow_dump_thread_create,
5770 dpif_netdev_flow_dump_thread_destroy,
704a1e09 5771 dpif_netdev_flow_dump_next,
1a0c894a 5772 dpif_netdev_operate,
6b31e073
RW
5773 NULL, /* recv_set */
5774 NULL, /* handlers_set */
d4f6865c 5775 dpif_netdev_set_config,
5bf93d67 5776 dpif_netdev_queue_to_priority,
6b31e073
RW
5777 NULL, /* recv */
5778 NULL, /* recv_wait */
5779 NULL, /* recv_purge */
e4e74c3a 5780 dpif_netdev_register_dp_purge_cb,
6b31e073
RW
5781 dpif_netdev_register_upcall_cb,
5782 dpif_netdev_enable_upcall,
5783 dpif_netdev_disable_upcall,
b5cbbcf6 5784 dpif_netdev_get_datapath_version,
4d4e68ed
DDP
5785 dpif_netdev_ct_dump_start,
5786 dpif_netdev_ct_dump_next,
5787 dpif_netdev_ct_dump_done,
5d9cbb4c 5788 dpif_netdev_ct_flush,
5dddf960
JR
5789 dpif_netdev_meter_get_features,
5790 dpif_netdev_meter_set,
5791 dpif_netdev_meter_get,
5792 dpif_netdev_meter_del,
72865317 5793};
614c4892 5794
74cc3969
BP
5795static void
5796dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
5797 const char *argv[], void *aux OVS_UNUSED)
5798{
e9985d6a 5799 struct dp_netdev_port *port;
74cc3969 5800 struct dp_netdev *dp;
ff073a71 5801 odp_port_t port_no;
74cc3969 5802
8a4e3a85 5803 ovs_mutex_lock(&dp_netdev_mutex);
74cc3969
BP
5804 dp = shash_find_data(&dp_netdevs, argv[1]);
5805 if (!dp || !dpif_netdev_class_is_dummy(dp->class)) {
8a4e3a85 5806 ovs_mutex_unlock(&dp_netdev_mutex);
74cc3969
BP
5807 unixctl_command_reply_error(conn, "unknown datapath or not a dummy");
5808 return;
5809 }
8a4e3a85
BP
5810 ovs_refcount_ref(&dp->ref_cnt);
5811 ovs_mutex_unlock(&dp_netdev_mutex);
74cc3969 5812
59e6d833 5813 ovs_mutex_lock(&dp->port_mutex);
e9985d6a 5814 if (get_port_by_name(dp, argv[2], &port)) {
74cc3969 5815 unixctl_command_reply_error(conn, "unknown port");
8a4e3a85 5816 goto exit;
74cc3969
BP
5817 }
5818
ff073a71
BP
5819 port_no = u32_to_odp(atoi(argv[3]));
5820 if (!port_no || port_no == ODPP_NONE) {
74cc3969 5821 unixctl_command_reply_error(conn, "bad port number");
8a4e3a85 5822 goto exit;
74cc3969 5823 }
ff073a71 5824 if (dp_netdev_lookup_port(dp, port_no)) {
74cc3969 5825 unixctl_command_reply_error(conn, "port number already in use");
8a4e3a85 5826 goto exit;
74cc3969 5827 }
59e6d833 5828
e9985d6a
DDP
5829 /* Remove port. */
5830 hmap_remove(&dp->ports, &port->node);
e32971b8 5831 reconfigure_datapath(dp);
59e6d833 5832
e9985d6a
DDP
5833 /* Reinsert with new port number. */
5834 port->port_no = port_no;
5835 hmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
e32971b8 5836 reconfigure_datapath(dp);
59e6d833 5837
d33ed218 5838 seq_change(dp->port_seq);
74cc3969 5839 unixctl_command_reply(conn, NULL);
8a4e3a85
BP
5840
5841exit:
59e6d833 5842 ovs_mutex_unlock(&dp->port_mutex);
8a4e3a85 5843 dp_netdev_unref(dp);
74cc3969
BP
5844}
5845
0cbfe35d
BP
5846static void
5847dpif_dummy_register__(const char *type)
5848{
5849 struct dpif_class *class;
5850
5851 class = xmalloc(sizeof *class);
5852 *class = dpif_netdev_class;
5853 class->type = xstrdup(type);
5854 dp_register_provider(class);
5855}
5856
8420c7ad
BP
5857static void
5858dpif_dummy_override(const char *type)
5859{
65d43fdc
YT
5860 int error;
5861
5862 /*
5863 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
5864 * a userland-only build. It's useful for testsuite.
5865 */
5866 error = dp_unregister_provider(type);
5867 if (error == 0 || error == EAFNOSUPPORT) {
8420c7ad
BP
5868 dpif_dummy_register__(type);
5869 }
5870}
5871
614c4892 5872void
8420c7ad 5873dpif_dummy_register(enum dummy_level level)
614c4892 5874{
8420c7ad 5875 if (level == DUMMY_OVERRIDE_ALL) {
0cbfe35d
BP
5876 struct sset types;
5877 const char *type;
5878
5879 sset_init(&types);
5880 dp_enumerate_types(&types);
5881 SSET_FOR_EACH (type, &types) {
8420c7ad 5882 dpif_dummy_override(type);
0cbfe35d
BP
5883 }
5884 sset_destroy(&types);
8420c7ad
BP
5885 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
5886 dpif_dummy_override("system");
614c4892 5887 }
0cbfe35d
BP
5888
5889 dpif_dummy_register__("dummy");
74cc3969
BP
5890
5891 unixctl_command_register("dpif-dummy/change-port-number",
74467d5c 5892 "dp port new-number",
74cc3969 5893 3, 3, dpif_dummy_change_port_number, NULL);
614c4892 5894}
0de8783a
JR
5895\f
5896/* Datapath Classifier. */
5897
5898/* A set of rules that all have the same fields wildcarded. */
5899struct dpcls_subtable {
5900 /* The fields are only used by writers. */
5901 struct cmap_node cmap_node OVS_GUARDED; /* Within dpcls 'subtables_map'. */
5902
5903 /* These fields are accessed by readers. */
5904 struct cmap rules; /* Contains "struct dpcls_rule"s. */
3453b4d6
JS
5905 uint32_t hit_cnt; /* Number of match hits in subtable in current
5906 optimization interval. */
0de8783a
JR
5907 struct netdev_flow_key mask; /* Wildcards for fields (const). */
5908 /* 'mask' must be the last field, additional space is allocated here. */
5909};
5910
5911/* Initializes 'cls' as a classifier that initially contains no classification
5912 * rules. */
5913static void
5914dpcls_init(struct dpcls *cls)
5915{
5916 cmap_init(&cls->subtables_map);
da9cfca6 5917 pvector_init(&cls->subtables);
0de8783a
JR
5918}
5919
5920static void
5921dpcls_destroy_subtable(struct dpcls *cls, struct dpcls_subtable *subtable)
5922{
3453b4d6 5923 VLOG_DBG("Destroying subtable %p for in_port %d", subtable, cls->in_port);
da9cfca6 5924 pvector_remove(&cls->subtables, subtable);
0de8783a
JR
5925 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
5926 subtable->mask.hash);
5927 cmap_destroy(&subtable->rules);
5928 ovsrcu_postpone(free, subtable);
5929}
5930
5931/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
5932 * caller's responsibility.
5933 * May only be called after all the readers have been terminated. */
5934static void
5935dpcls_destroy(struct dpcls *cls)
5936{
5937 if (cls) {
5938 struct dpcls_subtable *subtable;
5939
5940 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
361d808d 5941 ovs_assert(cmap_count(&subtable->rules) == 0);
0de8783a
JR
5942 dpcls_destroy_subtable(cls, subtable);
5943 }
5944 cmap_destroy(&cls->subtables_map);
da9cfca6 5945 pvector_destroy(&cls->subtables);
0de8783a
JR
5946 }
5947}
5948
5949static struct dpcls_subtable *
5950dpcls_create_subtable(struct dpcls *cls, const struct netdev_flow_key *mask)
5951{
5952 struct dpcls_subtable *subtable;
5953
5954 /* Need to add one. */
caeb4906
JR
5955 subtable = xmalloc(sizeof *subtable
5956 - sizeof subtable->mask.mf + mask->len);
0de8783a 5957 cmap_init(&subtable->rules);
3453b4d6 5958 subtable->hit_cnt = 0;
0de8783a
JR
5959 netdev_flow_key_clone(&subtable->mask, mask);
5960 cmap_insert(&cls->subtables_map, &subtable->cmap_node, mask->hash);
3453b4d6 5961 /* Add the new subtable at the end of the pvector (with no hits yet) */
da9cfca6 5962 pvector_insert(&cls->subtables, subtable, 0);
84dbfb2b 5963 VLOG_DBG("Creating %"PRIuSIZE". subtable %p for in_port %d",
3453b4d6 5964 cmap_count(&cls->subtables_map), subtable, cls->in_port);
da9cfca6 5965 pvector_publish(&cls->subtables);
0de8783a
JR
5966
5967 return subtable;
5968}
5969
5970static inline struct dpcls_subtable *
5971dpcls_find_subtable(struct dpcls *cls, const struct netdev_flow_key *mask)
5972{
5973 struct dpcls_subtable *subtable;
5974
5975 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, mask->hash,
5976 &cls->subtables_map) {
5977 if (netdev_flow_key_equal(&subtable->mask, mask)) {
5978 return subtable;
5979 }
5980 }
5981 return dpcls_create_subtable(cls, mask);
5982}
5983
3453b4d6
JS
5984
5985/* Periodically sort the dpcls subtable vectors according to hit counts */
5986static void
5987dpcls_sort_subtable_vector(struct dpcls *cls)
5988{
5989 struct pvector *pvec = &cls->subtables;
5990 struct dpcls_subtable *subtable;
5991
5992 PVECTOR_FOR_EACH (subtable, pvec) {
5993 pvector_change_priority(pvec, subtable, subtable->hit_cnt);
5994 subtable->hit_cnt = 0;
5995 }
5996 pvector_publish(pvec);
5997}
5998
5999static inline void
4809891b
KT
6000dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
6001 struct polled_queue *poll_list, int poll_cnt)
3453b4d6
JS
6002{
6003 struct dpcls *cls;
6004 long long int now = time_msec();
6005
64bf452e 6006 if (now > pmd->rxq_next_cycle_store) {
4809891b
KT
6007 /* Get the cycles that were used to process each queue and store. */
6008 for (unsigned i = 0; i < poll_cnt; i++) {
6009 uint64_t rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
6010 RXQ_CYCLES_PROC_CURR);
6011 dp_netdev_rxq_set_intrvl_cycles(poll_list[i].rxq, rxq_cyc_curr);
6012 dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR,
6013 0);
6014 }
6015 /* Start new measuring interval */
64bf452e 6016 pmd->rxq_next_cycle_store = now + PMD_RXQ_INTERVAL_LEN;
4809891b
KT
6017 }
6018
3453b4d6
JS
6019 if (now > pmd->next_optimization) {
6020 /* Try to obtain the flow lock to block out revalidator threads.
6021 * If not possible, just try next time. */
6022 if (!ovs_mutex_trylock(&pmd->flow_mutex)) {
6023 /* Optimize each classifier */
6024 CMAP_FOR_EACH (cls, node, &pmd->classifiers) {
6025 dpcls_sort_subtable_vector(cls);
6026 }
6027 ovs_mutex_unlock(&pmd->flow_mutex);
6028 /* Start new measuring interval */
6029 pmd->next_optimization = now + DPCLS_OPTIMIZATION_INTERVAL;
6030 }
6031 }
6032}
6033
0de8783a
JR
6034/* Insert 'rule' into 'cls'. */
6035static void
6036dpcls_insert(struct dpcls *cls, struct dpcls_rule *rule,
6037 const struct netdev_flow_key *mask)
6038{
6039 struct dpcls_subtable *subtable = dpcls_find_subtable(cls, mask);
6040
3453b4d6 6041 /* Refer to subtable's mask, also for later removal. */
0de8783a
JR
6042 rule->mask = &subtable->mask;
6043 cmap_insert(&subtable->rules, &rule->cmap_node, rule->flow.hash);
6044}
6045
6046/* Removes 'rule' from 'cls', also destructing the 'rule'. */
6047static void
6048dpcls_remove(struct dpcls *cls, struct dpcls_rule *rule)
6049{
6050 struct dpcls_subtable *subtable;
6051
6052 ovs_assert(rule->mask);
6053
3453b4d6 6054 /* Get subtable from reference in rule->mask. */
0de8783a 6055 INIT_CONTAINER(subtable, rule->mask, mask);
0de8783a
JR
6056 if (cmap_remove(&subtable->rules, &rule->cmap_node, rule->flow.hash)
6057 == 0) {
3453b4d6 6058 /* Delete empty subtable. */
0de8783a 6059 dpcls_destroy_subtable(cls, subtable);
da9cfca6 6060 pvector_publish(&cls->subtables);
0de8783a
JR
6061 }
6062}
6063
361d808d
JR
6064/* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
6065 * in 'mask' the values in 'key' and 'target' are the same. */
0de8783a
JR
6066static inline bool
6067dpcls_rule_matches_key(const struct dpcls_rule *rule,
6068 const struct netdev_flow_key *target)
6069{
09b0fa9c
JR
6070 const uint64_t *keyp = miniflow_get_values(&rule->flow.mf);
6071 const uint64_t *maskp = miniflow_get_values(&rule->mask->mf);
5fcff47b 6072 uint64_t value;
0de8783a 6073
5fcff47b
JR
6074 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) {
6075 if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) {
0de8783a
JR
6076 return false;
6077 }
6078 }
6079 return true;
6080}
6081
5b1c9c78
FA
6082/* For each miniflow in 'keys' performs a classifier lookup writing the result
6083 * into the corresponding slot in 'rules'. If a particular entry in 'keys' is
0de8783a
JR
6084 * NULL it is skipped.
6085 *
6086 * This function is optimized for use in the userspace datapath and therefore
6087 * does not implement a lot of features available in the standard
6088 * classifier_lookup() function. Specifically, it does not implement
6089 * priorities, instead returning any rule which matches the flow.
6090 *
5b1c9c78 6091 * Returns true if all miniflows found a corresponding rule. */
0de8783a 6092static bool
3453b4d6
JS
6093dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[],
6094 struct dpcls_rule **rules, const size_t cnt,
6095 int *num_lookups_p)
0de8783a 6096{
5b1c9c78 6097 /* The received 'cnt' miniflows are the search-keys that will be processed
63906f18
BB
6098 * to find a matching entry into the available subtables.
6099 * The number of bits in map_type is equal to NETDEV_MAX_BURST. */
6100 typedef uint32_t map_type;
0de8783a 6101#define MAP_BITS (sizeof(map_type) * CHAR_BIT)
63906f18 6102 BUILD_ASSERT_DECL(MAP_BITS >= NETDEV_MAX_BURST);
0de8783a 6103
0de8783a
JR
6104 struct dpcls_subtable *subtable;
6105
63906f18
BB
6106 map_type keys_map = TYPE_MAXIMUM(map_type); /* Set all bits. */
6107 map_type found_map;
6108 uint32_t hashes[MAP_BITS];
6109 const struct cmap_node *nodes[MAP_BITS];
6110
6111 if (cnt != MAP_BITS) {
6112 keys_map >>= MAP_BITS - cnt; /* Clear extra bits. */
0de8783a
JR
6113 }
6114 memset(rules, 0, cnt * sizeof *rules);
6115
3453b4d6
JS
6116 int lookups_match = 0, subtable_pos = 1;
6117
5b1c9c78
FA
6118 /* The Datapath classifier - aka dpcls - is composed of subtables.
6119 * Subtables are dynamically created as needed when new rules are inserted.
6120 * Each subtable collects rules with matches on a specific subset of packet
6121 * fields as defined by the subtable's mask. We proceed to process every
6122 * search-key against each subtable, but when a match is found for a
6123 * search-key, the search for that key can stop because the rules are
6124 * non-overlapping. */
da9cfca6 6125 PVECTOR_FOR_EACH (subtable, &cls->subtables) {
63906f18
BB
6126 int i;
6127
6128 /* Compute hashes for the remaining keys. Each search-key is
6129 * masked with the subtable's mask to avoid hashing the wildcarded
6130 * bits. */
6131 ULLONG_FOR_EACH_1(i, keys_map) {
6132 hashes[i] = netdev_flow_key_hash_in_mask(&keys[i],
6133 &subtable->mask);
6134 }
6135 /* Lookup. */
6136 found_map = cmap_find_batch(&subtable->rules, keys_map, hashes, nodes);
6137 /* Check results. When the i-th bit of found_map is set, it means
6138 * that a set of nodes with a matching hash value was found for the
6139 * i-th search-key. Due to possible hash collisions we need to check
6140 * which of the found rules, if any, really matches our masked
6141 * search-key. */
6142 ULLONG_FOR_EACH_1(i, found_map) {
6143 struct dpcls_rule *rule;
6144
6145 CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) {
6146 if (OVS_LIKELY(dpcls_rule_matches_key(rule, &keys[i]))) {
6147 rules[i] = rule;
6148 /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap
6149 * within one second optimization interval. */
6150 subtable->hit_cnt++;
6151 lookups_match += subtable_pos;
6152 goto next;
0de8783a 6153 }
0de8783a 6154 }
63906f18
BB
6155 /* None of the found rules was a match. Reset the i-th bit to
6156 * keep searching this key in the next subtable. */
6157 ULLONG_SET0(found_map, i); /* Did not match. */
6158 next:
6159 ; /* Keep Sparse happy. */
0de8783a 6160 }
63906f18
BB
6161 keys_map &= ~found_map; /* Clear the found rules. */
6162 if (!keys_map) {
3453b4d6
JS
6163 if (num_lookups_p) {
6164 *num_lookups_p = lookups_match;
6165 }
0de8783a
JR
6166 return true; /* All found. */
6167 }
3453b4d6
JS
6168 subtable_pos++;
6169 }
6170 if (num_lookups_p) {
6171 *num_lookups_p = lookups_match;
0de8783a
JR
6172 }
6173 return false; /* Some misses. */
6174}