2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
25 #include <netinet/in.h>
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
35 #include <rte_cycles.h>
40 #include "conntrack.h"
44 #include "dp-packet.h"
46 #include "dpif-provider.h"
48 #include "fat-rwlock.h"
53 #include "netdev-vport.h"
55 #include "odp-execute.h"
57 #include "openvswitch/dynamic-string.h"
58 #include "openvswitch/list.h"
59 #include "openvswitch/match.h"
60 #include "openvswitch/ofp-print.h"
61 #include "openvswitch/ofp-util.h"
62 #include "openvswitch/ofpbuf.h"
63 #include "openvswitch/shash.h"
64 #include "openvswitch/vlog.h"
68 #include "poll-loop.h"
75 #include "tnl-neigh-cache.h"
76 #include "tnl-ports.h"
80 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
82 #define FLOW_DUMP_MAX_BATCH 50
83 /* Use per thread recirc_depth to prevent recirculation loop. */
84 #define MAX_RECIRC_DEPTH 5
85 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
87 /* Configuration parameters. */
88 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
90 /* Protects against changes to 'dp_netdevs'. */
91 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
93 /* Contains all 'struct dp_netdev's. */
94 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
95 = SHASH_INITIALIZER(&dp_netdevs
);
97 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
99 #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
100 | CS_INVALID | CS_REPLY_DIR | CS_TRACKED)
101 #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
103 static struct odp_support dp_netdev_support
= {
104 .max_mpls_depth
= SIZE_MAX
,
112 /* Stores a miniflow with inline values */
114 struct netdev_flow_key
{
115 uint32_t hash
; /* Hash function differs for different users. */
116 uint32_t len
; /* Length of the following miniflow (incl. map). */
118 uint64_t buf
[FLOW_MAX_PACKET_U64S
];
121 /* Exact match cache for frequently used flows
123 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
124 * search its entries for a miniflow that matches exactly the miniflow of the
125 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
127 * A cache entry holds a reference to its 'dp_netdev_flow'.
129 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
130 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
131 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
132 * value is the index of a cache entry where the miniflow could be.
138 * Each pmd_thread has its own private exact match cache.
139 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
142 #define EM_FLOW_HASH_SHIFT 13
143 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
144 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
145 #define EM_FLOW_HASH_SEGS 2
147 /* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */
148 #define DEFAULT_EM_FLOW_INSERT_INV_PROB 100
149 #define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
150 DEFAULT_EM_FLOW_INSERT_INV_PROB)
153 struct dp_netdev_flow
*flow
;
154 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
158 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
159 int sweep_idx
; /* For emc_cache_slow_sweep(). */
162 /* Iterate in the exact match cache through every entry that might contain a
163 * miniflow with hash 'HASH'. */
164 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
165 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
166 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
167 i__ < EM_FLOW_HASH_SEGS; \
168 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
170 /* Simple non-wildcarding single-priority classifier. */
172 /* Time in ms between successive optimizations of the dpcls subtable vector */
173 #define DPCLS_OPTIMIZATION_INTERVAL 1000
176 struct cmap_node node
; /* Within dp_netdev_pmd_thread.classifiers */
178 struct cmap subtables_map
;
179 struct pvector subtables
;
182 /* A rule to be inserted to the classifier. */
184 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
185 struct netdev_flow_key
*mask
; /* Subtable's mask. */
186 struct netdev_flow_key flow
; /* Matching key. */
187 /* 'flow' must be the last field, additional space is allocated here. */
190 static void dpcls_init(struct dpcls
*);
191 static void dpcls_destroy(struct dpcls
*);
192 static void dpcls_sort_subtable_vector(struct dpcls
*);
193 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
194 const struct netdev_flow_key
*mask
);
195 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
196 static bool dpcls_lookup(struct dpcls
*cls
,
197 const struct netdev_flow_key keys
[],
198 struct dpcls_rule
**rules
, size_t cnt
,
201 /* Datapath based on the network device interface from netdev.h.
207 * Some members, marked 'const', are immutable. Accessing other members
208 * requires synchronization, as noted in more detail below.
210 * Acquisition order is, from outermost to innermost:
212 * dp_netdev_mutex (global)
217 const struct dpif_class
*const class;
218 const char *const name
;
220 struct ovs_refcount ref_cnt
;
221 atomic_flag destroyed
;
225 * Any lookup into 'ports' or any access to the dp_netdev_ports found
226 * through 'ports' requires taking 'port_mutex'. */
227 struct ovs_mutex port_mutex
;
229 struct seq
*port_seq
; /* Incremented whenever a port changes. */
231 /* Protects access to ofproto-dpif-upcall interface during revalidator
232 * thread synchronization. */
233 struct fat_rwlock upcall_rwlock
;
234 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
237 /* Callback function for notifying the purging of dp flows (during
238 * reseting pmd deletion). */
239 dp_purge_callback
*dp_purge_cb
;
242 /* Stores all 'struct dp_netdev_pmd_thread's. */
243 struct cmap poll_threads
;
245 /* Protects the access of the 'struct dp_netdev_pmd_thread'
246 * instance for non-pmd thread. */
247 struct ovs_mutex non_pmd_mutex
;
249 /* Each pmd thread will store its pointer to
250 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
251 ovsthread_key_t per_pmd_key
;
253 struct seq
*reconfigure_seq
;
254 uint64_t last_reconfigure_seq
;
256 /* Cpu mask for pin of pmd threads. */
259 uint64_t last_tnl_conf_seq
;
261 struct conntrack conntrack
;
263 /* Probability of EMC insertions is a factor of 'emc_insert_min'.*/
264 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
) atomic_uint32_t emc_insert_min
;
267 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
269 OVS_REQUIRES(dp
->port_mutex
);
272 DP_STAT_EXACT_HIT
, /* Packets that had an exact match (emc). */
273 DP_STAT_MASKED_HIT
, /* Packets that matched in the flow table. */
274 DP_STAT_MISS
, /* Packets that did not match. */
275 DP_STAT_LOST
, /* Packets not passed up to the client. */
276 DP_STAT_LOOKUP_HIT
, /* Number of subtable lookups for flow table
281 enum pmd_cycles_counter_type
{
282 PMD_CYCLES_POLLING
, /* Cycles spent polling NICs. */
283 PMD_CYCLES_PROCESSING
, /* Cycles spent processing packets */
287 #define XPS_TIMEOUT_MS 500LL
289 /* Contained by struct dp_netdev_port's 'rxqs' member. */
290 struct dp_netdev_rxq
{
291 struct dp_netdev_port
*port
;
292 struct netdev_rxq
*rx
;
293 unsigned core_id
; /* Core to which this queue should be
294 pinned. OVS_CORE_UNSPEC if the
295 queue doesn't need to be pinned to a
297 struct dp_netdev_pmd_thread
*pmd
; /* pmd thread that will poll this queue. */
300 /* A port in a netdev-based datapath. */
301 struct dp_netdev_port
{
303 struct netdev
*netdev
;
304 struct hmap_node node
; /* Node in dp_netdev's 'ports'. */
305 struct netdev_saved_flags
*sf
;
306 struct dp_netdev_rxq
*rxqs
;
307 unsigned n_rxq
; /* Number of elements in 'rxq' */
308 bool dynamic_txqs
; /* If true XPS will be used. */
309 unsigned *txq_used
; /* Number of threads that uses each tx queue. */
310 struct ovs_mutex txq_used_mutex
;
311 char *type
; /* Port type as requested by user. */
312 char *rxq_affinity_list
; /* Requested affinity of rx queues. */
313 bool need_reconfigure
; /* True if we should reconfigure netdev. */
316 /* Contained by struct dp_netdev_flow's 'stats' member. */
317 struct dp_netdev_flow_stats
{
318 atomic_llong used
; /* Last used time, in monotonic msecs. */
319 atomic_ullong packet_count
; /* Number of packets matched. */
320 atomic_ullong byte_count
; /* Number of bytes matched. */
321 atomic_uint16_t tcp_flags
; /* Bitwise-OR of seen tcp_flags values. */
324 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
330 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
331 * its pmd thread's classifier. The text below calls this classifier 'cls'.
336 * The thread safety rules described here for "struct dp_netdev_flow" are
337 * motivated by two goals:
339 * - Prevent threads that read members of "struct dp_netdev_flow" from
340 * reading bad data due to changes by some thread concurrently modifying
343 * - Prevent two threads making changes to members of a given "struct
344 * dp_netdev_flow" from interfering with each other.
350 * A flow 'flow' may be accessed without a risk of being freed during an RCU
351 * grace period. Code that needs to hold onto a flow for a while
352 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
354 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
355 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
358 * Some members, marked 'const', are immutable. Accessing other members
359 * requires synchronization, as noted in more detail below.
361 struct dp_netdev_flow
{
362 const struct flow flow
; /* Unmasked flow that created this entry. */
363 /* Hash table index by unmasked flow. */
364 const struct cmap_node node
; /* In owning dp_netdev_pmd_thread's */
366 const ovs_u128 ufid
; /* Unique flow identifier. */
367 const unsigned pmd_id
; /* The 'core_id' of pmd thread owning this */
370 /* Number of references.
371 * The classifier owns one reference.
372 * Any thread trying to keep a rule from being freed should hold its own
374 struct ovs_refcount ref_cnt
;
379 struct dp_netdev_flow_stats stats
;
382 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
384 /* While processing a group of input packets, the datapath uses the next
385 * member to store a pointer to the output batch for the flow. It is
386 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
387 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
388 struct packet_batch_per_flow
*batch
;
390 /* Packet classification. */
391 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
392 /* 'cr' must be the last member. */
395 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
396 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
397 static int dpif_netdev_flow_from_nlattrs(const struct nlattr
*, uint32_t,
400 /* A set of datapath actions within a "struct dp_netdev_flow".
406 * A struct dp_netdev_actions 'actions' is protected with RCU. */
407 struct dp_netdev_actions
{
408 /* These members are immutable: they do not change during the struct's
410 unsigned int size
; /* Size of 'actions', in bytes. */
411 struct nlattr actions
[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
414 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
416 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
417 const struct dp_netdev_flow
*);
418 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
420 /* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
421 struct dp_netdev_pmd_stats
{
422 /* Indexed by DP_STAT_*. */
423 atomic_ullong n
[DP_N_STATS
];
426 /* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */
427 struct dp_netdev_pmd_cycles
{
428 /* Indexed by PMD_CYCLES_*. */
429 atomic_ullong n
[PMD_N_CYCLES
];
432 struct polled_queue
{
433 struct netdev_rxq
*rx
;
437 /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
439 struct dp_netdev_rxq
*rxq
;
440 struct hmap_node node
;
443 /* Contained by struct dp_netdev_pmd_thread's 'send_port_cache',
444 * 'tnl_port_cache' or 'tx_ports'. */
446 struct dp_netdev_port
*port
;
449 struct hmap_node node
;
452 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
453 * the performance overhead of interrupt processing. Therefore netdev can
454 * not implement rx-wait for these devices. dpif-netdev needs to poll
455 * these device to check for recv buffer. pmd-thread does polling for
456 * devices assigned to itself.
458 * DPDK used PMD for accessing NIC.
460 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
461 * I/O of all non-pmd threads. There will be no actual thread created
464 * Each struct has its own flow table and classifier. Packets received
465 * from managed ports are looked up in the corresponding pmd thread's
466 * flow table, and are executed with the found actions.
468 struct dp_netdev_pmd_thread
{
469 struct dp_netdev
*dp
;
470 struct ovs_refcount ref_cnt
; /* Every reference must be refcount'ed. */
471 struct cmap_node node
; /* In 'dp->poll_threads'. */
473 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
474 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
476 /* Per thread exact-match cache. Note, the instance for cpu core
477 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
478 * need to be protected by 'non_pmd_mutex'. Every other instance
479 * will only be accessed by its own pmd thread. */
480 struct emc_cache flow_cache
;
482 /* Flow-Table and classifiers
484 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
485 * changes to 'classifiers' must be made while still holding the
488 struct ovs_mutex flow_mutex
;
489 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
491 /* One classifier per in_port polled by the pmd */
492 struct cmap classifiers
;
493 /* Periodically sort subtable vectors according to hit frequencies */
494 long long int next_optimization
;
497 struct dp_netdev_pmd_stats stats
;
499 /* Cycles counters */
500 struct dp_netdev_pmd_cycles cycles
;
502 /* Used to count cicles. See 'cycles_counter_end()' */
503 unsigned long long last_cycles
;
505 struct latch exit_latch
; /* For terminating the pmd thread. */
506 struct seq
*reload_seq
;
507 uint64_t last_reload_seq
;
508 atomic_bool reload
; /* Do we need to reload ports? */
510 unsigned core_id
; /* CPU core id of this pmd thread. */
511 int numa_id
; /* numa node id of this pmd thread. */
514 /* Queue id used by this pmd thread to send packets on all netdevs if
515 * XPS disabled for this netdev. All static_tx_qid's are unique and less
516 * than 'cmap_count(dp->poll_threads)'. */
517 const int static_tx_qid
;
519 struct ovs_mutex port_mutex
; /* Mutex for 'poll_list' and 'tx_ports'. */
520 /* List of rx queues to poll. */
521 struct hmap poll_list OVS_GUARDED
;
522 /* Map of 'tx_port's used for transmission. Written by the main thread,
523 * read by the pmd thread. */
524 struct hmap tx_ports OVS_GUARDED
;
526 /* These are thread-local copies of 'tx_ports'. One contains only tunnel
527 * ports (that support push_tunnel/pop_tunnel), the other contains ports
528 * with at least one txq (that support send). A port can be in both.
530 * There are two separate maps to make sure that we don't try to execute
531 * OUTPUT on a device which has 0 txqs or PUSH/POP on a non-tunnel device.
533 * The instances for cpu core NON_PMD_CORE_ID can be accessed by multiple
534 * threads, and thusly need to be protected by 'non_pmd_mutex'. Every
535 * other instance will only be accessed by its own pmd thread. */
536 struct hmap tnl_port_cache
;
537 struct hmap send_port_cache
;
539 /* Only a pmd thread can write on its own 'cycles' and 'stats'.
540 * The main thread keeps 'stats_zero' and 'cycles_zero' as base
541 * values and subtracts them from 'stats' and 'cycles' before
542 * reporting to the user */
543 unsigned long long stats_zero
[DP_N_STATS
];
544 uint64_t cycles_zero
[PMD_N_CYCLES
];
546 /* Set to true if the pmd thread needs to be reloaded. */
550 /* Interface to netdev-based datapath. */
553 struct dp_netdev
*dp
;
554 uint64_t last_port_seq
;
557 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
558 struct dp_netdev_port
**portp
)
559 OVS_REQUIRES(dp
->port_mutex
);
560 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
561 struct dp_netdev_port
**portp
)
562 OVS_REQUIRES(dp
->port_mutex
);
563 static void dp_netdev_free(struct dp_netdev
*)
564 OVS_REQUIRES(dp_netdev_mutex
);
565 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
566 const char *type
, odp_port_t port_no
)
567 OVS_REQUIRES(dp
->port_mutex
);
568 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
569 OVS_REQUIRES(dp
->port_mutex
);
570 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
571 bool create
, struct dpif
**);
572 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
573 struct dp_packet_batch
*,
574 bool may_steal
, const struct flow
*flow
,
575 const struct nlattr
*actions
,
578 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
579 struct dp_packet_batch
*, odp_port_t port_no
);
580 static void dp_netdev_recirculate(struct dp_netdev_pmd_thread
*,
581 struct dp_packet_batch
*);
583 static void dp_netdev_disable_upcall(struct dp_netdev
*);
584 static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
585 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
586 struct dp_netdev
*dp
, unsigned core_id
,
588 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
);
589 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
590 OVS_REQUIRES(dp
->port_mutex
);
592 static void *pmd_thread_main(void *);
593 static struct dp_netdev_pmd_thread
*dp_netdev_get_pmd(struct dp_netdev
*dp
,
595 static struct dp_netdev_pmd_thread
*
596 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
);
597 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
, bool non_pmd
);
598 static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
);
599 static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
600 struct dp_netdev_port
*port
)
601 OVS_REQUIRES(pmd
->port_mutex
);
602 static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
604 OVS_REQUIRES(pmd
->port_mutex
);
605 static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
606 struct dp_netdev_rxq
*rxq
)
607 OVS_REQUIRES(pmd
->port_mutex
);
608 static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
609 struct rxq_poll
*poll
)
610 OVS_REQUIRES(pmd
->port_mutex
);
611 static void reconfigure_datapath(struct dp_netdev
*dp
)
612 OVS_REQUIRES(dp
->port_mutex
);
613 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
);
614 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
);
615 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
);
616 static void pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
617 OVS_REQUIRES(pmd
->port_mutex
);
619 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
);
622 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
623 long long now
, bool purge
);
624 static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
625 struct tx_port
*tx
, long long now
);
627 static inline bool emc_entry_alive(struct emc_entry
*ce
);
628 static void emc_clear_entry(struct emc_entry
*ce
);
631 emc_cache_init(struct emc_cache
*flow_cache
)
635 flow_cache
->sweep_idx
= 0;
636 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
637 flow_cache
->entries
[i
].flow
= NULL
;
638 flow_cache
->entries
[i
].key
.hash
= 0;
639 flow_cache
->entries
[i
].key
.len
= sizeof(struct miniflow
);
640 flowmap_init(&flow_cache
->entries
[i
].key
.mf
.map
);
645 emc_cache_uninit(struct emc_cache
*flow_cache
)
649 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
650 emc_clear_entry(&flow_cache
->entries
[i
]);
654 /* Check and clear dead flow references slowly (one entry at each
657 emc_cache_slow_sweep(struct emc_cache
*flow_cache
)
659 struct emc_entry
*entry
= &flow_cache
->entries
[flow_cache
->sweep_idx
];
661 if (!emc_entry_alive(entry
)) {
662 emc_clear_entry(entry
);
664 flow_cache
->sweep_idx
= (flow_cache
->sweep_idx
+ 1) & EM_FLOW_HASH_MASK
;
667 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
669 dpif_is_netdev(const struct dpif
*dpif
)
671 return dpif
->dpif_class
->open
== dpif_netdev_open
;
674 static struct dpif_netdev
*
675 dpif_netdev_cast(const struct dpif
*dpif
)
677 ovs_assert(dpif_is_netdev(dpif
));
678 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
681 static struct dp_netdev
*
682 get_dp_netdev(const struct dpif
*dpif
)
684 return dpif_netdev_cast(dpif
)->dp
;
688 PMD_INFO_SHOW_STATS
, /* Show how cpu cycles are spent. */
689 PMD_INFO_CLEAR_STATS
, /* Set the cycles count to 0. */
690 PMD_INFO_SHOW_RXQ
/* Show poll-lists of pmd threads. */
694 pmd_info_show_stats(struct ds
*reply
,
695 struct dp_netdev_pmd_thread
*pmd
,
696 unsigned long long stats
[DP_N_STATS
],
697 uint64_t cycles
[PMD_N_CYCLES
])
699 unsigned long long total_packets
= 0;
700 uint64_t total_cycles
= 0;
703 /* These loops subtracts reference values ('*_zero') from the counters.
704 * Since loads and stores are relaxed, it might be possible for a '*_zero'
705 * value to be more recent than the current value we're reading from the
706 * counter. This is not a big problem, since these numbers are not
707 * supposed to be too accurate, but we should at least make sure that
708 * the result is not negative. */
709 for (i
= 0; i
< DP_N_STATS
; i
++) {
710 if (stats
[i
] > pmd
->stats_zero
[i
]) {
711 stats
[i
] -= pmd
->stats_zero
[i
];
716 if (i
!= DP_STAT_LOST
) {
717 /* Lost packets are already included in DP_STAT_MISS */
718 total_packets
+= stats
[i
];
722 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
723 if (cycles
[i
] > pmd
->cycles_zero
[i
]) {
724 cycles
[i
] -= pmd
->cycles_zero
[i
];
729 total_cycles
+= cycles
[i
];
732 ds_put_cstr(reply
, (pmd
->core_id
== NON_PMD_CORE_ID
)
733 ? "main thread" : "pmd thread");
735 if (pmd
->numa_id
!= OVS_NUMA_UNSPEC
) {
736 ds_put_format(reply
, " numa_id %d", pmd
->numa_id
);
738 if (pmd
->core_id
!= OVS_CORE_UNSPEC
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
739 ds_put_format(reply
, " core_id %u", pmd
->core_id
);
741 ds_put_cstr(reply
, ":\n");
744 "\temc hits:%llu\n\tmegaflow hits:%llu\n"
745 "\tavg. subtable lookups per hit:%.2f\n"
746 "\tmiss:%llu\n\tlost:%llu\n",
747 stats
[DP_STAT_EXACT_HIT
], stats
[DP_STAT_MASKED_HIT
],
748 stats
[DP_STAT_MASKED_HIT
] > 0
749 ? (1.0*stats
[DP_STAT_LOOKUP_HIT
])/stats
[DP_STAT_MASKED_HIT
]
751 stats
[DP_STAT_MISS
], stats
[DP_STAT_LOST
]);
753 if (total_cycles
== 0) {
758 "\tpolling cycles:%"PRIu64
" (%.02f%%)\n"
759 "\tprocessing cycles:%"PRIu64
" (%.02f%%)\n",
760 cycles
[PMD_CYCLES_POLLING
],
761 cycles
[PMD_CYCLES_POLLING
] / (double)total_cycles
* 100,
762 cycles
[PMD_CYCLES_PROCESSING
],
763 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_cycles
* 100);
765 if (total_packets
== 0) {
770 "\tavg cycles per packet: %.02f (%"PRIu64
"/%llu)\n",
771 total_cycles
/ (double)total_packets
,
772 total_cycles
, total_packets
);
775 "\tavg processing cycles per packet: "
776 "%.02f (%"PRIu64
"/%llu)\n",
777 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_packets
,
778 cycles
[PMD_CYCLES_PROCESSING
], total_packets
);
782 pmd_info_clear_stats(struct ds
*reply OVS_UNUSED
,
783 struct dp_netdev_pmd_thread
*pmd
,
784 unsigned long long stats
[DP_N_STATS
],
785 uint64_t cycles
[PMD_N_CYCLES
])
789 /* We cannot write 'stats' and 'cycles' (because they're written by other
790 * threads) and we shouldn't change 'stats' (because they're used to count
791 * datapath stats, which must not be cleared here). Instead, we save the
792 * current values and subtract them from the values to be displayed in the
794 for (i
= 0; i
< DP_N_STATS
; i
++) {
795 pmd
->stats_zero
[i
] = stats
[i
];
797 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
798 pmd
->cycles_zero
[i
] = cycles
[i
];
803 compare_poll_list(const void *a_
, const void *b_
)
805 const struct rxq_poll
*a
= a_
;
806 const struct rxq_poll
*b
= b_
;
808 const char *namea
= netdev_rxq_get_name(a
->rxq
->rx
);
809 const char *nameb
= netdev_rxq_get_name(b
->rxq
->rx
);
811 int cmp
= strcmp(namea
, nameb
);
813 return netdev_rxq_get_queue_id(a
->rxq
->rx
)
814 - netdev_rxq_get_queue_id(b
->rxq
->rx
);
821 sorted_poll_list(struct dp_netdev_pmd_thread
*pmd
, struct rxq_poll
**list
,
824 struct rxq_poll
*ret
, *poll
;
827 *n
= hmap_count(&pmd
->poll_list
);
831 ret
= xcalloc(*n
, sizeof *ret
);
833 HMAP_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
840 qsort(ret
, *n
, sizeof *ret
, compare_poll_list
);
846 pmd_info_show_rxq(struct ds
*reply
, struct dp_netdev_pmd_thread
*pmd
)
848 if (pmd
->core_id
!= NON_PMD_CORE_ID
) {
849 const char *prev_name
= NULL
;
850 struct rxq_poll
*list
;
854 "pmd thread numa_id %d core_id %u:\n\tisolated : %s\n",
855 pmd
->numa_id
, pmd
->core_id
, (pmd
->isolated
)
858 ovs_mutex_lock(&pmd
->port_mutex
);
859 sorted_poll_list(pmd
, &list
, &n
);
860 for (i
= 0; i
< n
; i
++) {
861 const char *name
= netdev_rxq_get_name(list
[i
].rxq
->rx
);
863 if (!prev_name
|| strcmp(name
, prev_name
)) {
865 ds_put_cstr(reply
, "\n");
867 ds_put_format(reply
, "\tport: %s\tqueue-id:", name
);
869 ds_put_format(reply
, " %d",
870 netdev_rxq_get_queue_id(list
[i
].rxq
->rx
));
873 ovs_mutex_unlock(&pmd
->port_mutex
);
874 ds_put_cstr(reply
, "\n");
880 dpif_netdev_pmd_info(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
883 struct ds reply
= DS_EMPTY_INITIALIZER
;
884 struct dp_netdev_pmd_thread
*pmd
;
885 struct dp_netdev
*dp
= NULL
;
886 enum pmd_info_type type
= *(enum pmd_info_type
*) aux
;
888 ovs_mutex_lock(&dp_netdev_mutex
);
891 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
892 } else if (shash_count(&dp_netdevs
) == 1) {
893 /* There's only one datapath */
894 dp
= shash_first(&dp_netdevs
)->data
;
898 ovs_mutex_unlock(&dp_netdev_mutex
);
899 unixctl_command_reply_error(conn
,
900 "please specify an existing datapath");
904 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
905 if (type
== PMD_INFO_SHOW_RXQ
) {
906 pmd_info_show_rxq(&reply
, pmd
);
908 unsigned long long stats
[DP_N_STATS
];
909 uint64_t cycles
[PMD_N_CYCLES
];
912 /* Read current stats and cycle counters */
913 for (i
= 0; i
< ARRAY_SIZE(stats
); i
++) {
914 atomic_read_relaxed(&pmd
->stats
.n
[i
], &stats
[i
]);
916 for (i
= 0; i
< ARRAY_SIZE(cycles
); i
++) {
917 atomic_read_relaxed(&pmd
->cycles
.n
[i
], &cycles
[i
]);
920 if (type
== PMD_INFO_CLEAR_STATS
) {
921 pmd_info_clear_stats(&reply
, pmd
, stats
, cycles
);
922 } else if (type
== PMD_INFO_SHOW_STATS
) {
923 pmd_info_show_stats(&reply
, pmd
, stats
, cycles
);
928 ovs_mutex_unlock(&dp_netdev_mutex
);
930 unixctl_command_reply(conn
, ds_cstr(&reply
));
935 dpif_netdev_init(void)
937 static enum pmd_info_type show_aux
= PMD_INFO_SHOW_STATS
,
938 clear_aux
= PMD_INFO_CLEAR_STATS
,
939 poll_aux
= PMD_INFO_SHOW_RXQ
;
941 unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
942 0, 1, dpif_netdev_pmd_info
,
944 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
945 0, 1, dpif_netdev_pmd_info
,
947 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]",
948 0, 1, dpif_netdev_pmd_info
,
954 dpif_netdev_enumerate(struct sset
*all_dps
,
955 const struct dpif_class
*dpif_class
)
957 struct shash_node
*node
;
959 ovs_mutex_lock(&dp_netdev_mutex
);
960 SHASH_FOR_EACH(node
, &dp_netdevs
) {
961 struct dp_netdev
*dp
= node
->data
;
962 if (dpif_class
!= dp
->class) {
963 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
964 * If the class doesn't match, skip this dpif. */
967 sset_add(all_dps
, node
->name
);
969 ovs_mutex_unlock(&dp_netdev_mutex
);
975 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
977 return class != &dpif_netdev_class
;
981 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
983 return strcmp(type
, "internal") ? type
984 : dpif_netdev_class_is_dummy(class) ? "dummy-internal"
989 create_dpif_netdev(struct dp_netdev
*dp
)
991 uint16_t netflow_id
= hash_string(dp
->name
, 0);
992 struct dpif_netdev
*dpif
;
994 ovs_refcount_ref(&dp
->ref_cnt
);
996 dpif
= xmalloc(sizeof *dpif
);
997 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
999 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
1004 /* Choose an unused, non-zero port number and return it on success.
1005 * Return ODPP_NONE on failure. */
1007 choose_port(struct dp_netdev
*dp
, const char *name
)
1008 OVS_REQUIRES(dp
->port_mutex
)
1012 if (dp
->class != &dpif_netdev_class
) {
1016 /* If the port name begins with "br", start the number search at
1017 * 100 to make writing tests easier. */
1018 if (!strncmp(name
, "br", 2)) {
1022 /* If the port name contains a number, try to assign that port number.
1023 * This can make writing unit tests easier because port numbers are
1025 for (p
= name
; *p
!= '\0'; p
++) {
1026 if (isdigit((unsigned char) *p
)) {
1027 port_no
= start_no
+ strtol(p
, NULL
, 10);
1028 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
1029 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
1030 return u32_to_odp(port_no
);
1037 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
1038 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
1039 return u32_to_odp(port_no
);
1047 create_dp_netdev(const char *name
, const struct dpif_class
*class,
1048 struct dp_netdev
**dpp
)
1049 OVS_REQUIRES(dp_netdev_mutex
)
1051 struct dp_netdev
*dp
;
1054 dp
= xzalloc(sizeof *dp
);
1055 shash_add(&dp_netdevs
, name
, dp
);
1057 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
1058 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
1059 ovs_refcount_init(&dp
->ref_cnt
);
1060 atomic_flag_clear(&dp
->destroyed
);
1062 ovs_mutex_init(&dp
->port_mutex
);
1063 hmap_init(&dp
->ports
);
1064 dp
->port_seq
= seq_create();
1065 fat_rwlock_init(&dp
->upcall_rwlock
);
1067 dp
->reconfigure_seq
= seq_create();
1068 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
1070 /* Disable upcalls by default. */
1071 dp_netdev_disable_upcall(dp
);
1072 dp
->upcall_aux
= NULL
;
1073 dp
->upcall_cb
= NULL
;
1075 conntrack_init(&dp
->conntrack
);
1077 atomic_init(&dp
->emc_insert_min
, DEFAULT_EM_FLOW_INSERT_MIN
);
1079 cmap_init(&dp
->poll_threads
);
1080 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
1081 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
1083 ovs_mutex_lock(&dp
->port_mutex
);
1084 dp_netdev_set_nonpmd(dp
);
1086 error
= do_add_port(dp
, name
, dpif_netdev_port_open_type(dp
->class,
1089 ovs_mutex_unlock(&dp
->port_mutex
);
1095 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
1101 dp_netdev_request_reconfigure(struct dp_netdev
*dp
)
1103 seq_change(dp
->reconfigure_seq
);
1107 dp_netdev_is_reconf_required(struct dp_netdev
*dp
)
1109 return seq_read(dp
->reconfigure_seq
) != dp
->last_reconfigure_seq
;
1113 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
1114 bool create
, struct dpif
**dpifp
)
1116 struct dp_netdev
*dp
;
1119 ovs_mutex_lock(&dp_netdev_mutex
);
1120 dp
= shash_find_data(&dp_netdevs
, name
);
1122 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
1124 error
= (dp
->class != class ? EINVAL
1129 *dpifp
= create_dpif_netdev(dp
);
1132 ovs_mutex_unlock(&dp_netdev_mutex
);
1138 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
1139 OVS_NO_THREAD_SAFETY_ANALYSIS
1141 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
1142 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
1144 /* Before freeing a lock we should release it */
1145 fat_rwlock_unlock(&dp
->upcall_rwlock
);
1146 fat_rwlock_destroy(&dp
->upcall_rwlock
);
1149 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
1150 * through the 'dp_netdevs' shash while freeing 'dp'. */
1152 dp_netdev_free(struct dp_netdev
*dp
)
1153 OVS_REQUIRES(dp_netdev_mutex
)
1155 struct dp_netdev_port
*port
, *next
;
1157 shash_find_and_delete(&dp_netdevs
, dp
->name
);
1159 ovs_mutex_lock(&dp
->port_mutex
);
1160 HMAP_FOR_EACH_SAFE (port
, next
, node
, &dp
->ports
) {
1161 do_del_port(dp
, port
);
1163 ovs_mutex_unlock(&dp
->port_mutex
);
1164 dp_netdev_destroy_all_pmds(dp
, true);
1165 cmap_destroy(&dp
->poll_threads
);
1167 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
1168 ovsthread_key_delete(dp
->per_pmd_key
);
1170 conntrack_destroy(&dp
->conntrack
);
1173 seq_destroy(dp
->reconfigure_seq
);
1175 seq_destroy(dp
->port_seq
);
1176 hmap_destroy(&dp
->ports
);
1177 ovs_mutex_destroy(&dp
->port_mutex
);
1179 /* Upcalls must be disabled at this point */
1180 dp_netdev_destroy_upcall_lock(dp
);
1182 free(dp
->pmd_cmask
);
1183 free(CONST_CAST(char *, dp
->name
));
1188 dp_netdev_unref(struct dp_netdev
*dp
)
1191 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1192 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1193 ovs_mutex_lock(&dp_netdev_mutex
);
1194 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1197 ovs_mutex_unlock(&dp_netdev_mutex
);
1202 dpif_netdev_close(struct dpif
*dpif
)
1204 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1206 dp_netdev_unref(dp
);
1211 dpif_netdev_destroy(struct dpif
*dpif
)
1213 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1215 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
1216 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1217 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1225 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1226 * load/store semantics. While the increment is not atomic, the load and
1227 * store operations are, making it impossible to read inconsistent values.
1229 * This is used to update thread local stats counters. */
1231 non_atomic_ullong_add(atomic_ullong
*var
, unsigned long long n
)
1233 unsigned long long tmp
;
1235 atomic_read_relaxed(var
, &tmp
);
1237 atomic_store_relaxed(var
, tmp
);
1241 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
1243 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1244 struct dp_netdev_pmd_thread
*pmd
;
1246 stats
->n_flows
= stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
1247 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1248 unsigned long long n
;
1249 stats
->n_flows
+= cmap_count(&pmd
->flow_table
);
1251 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MASKED_HIT
], &n
);
1253 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_EXACT_HIT
], &n
);
1255 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MISS
], &n
);
1256 stats
->n_missed
+= n
;
1257 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_LOST
], &n
);
1260 stats
->n_masks
= UINT32_MAX
;
1261 stats
->n_mask_hit
= UINT64_MAX
;
1267 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
1269 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1270 ovs_mutex_lock(&pmd
->dp
->non_pmd_mutex
);
1271 ovs_mutex_lock(&pmd
->port_mutex
);
1272 pmd_load_cached_ports(pmd
);
1273 ovs_mutex_unlock(&pmd
->port_mutex
);
1274 ovs_mutex_unlock(&pmd
->dp
->non_pmd_mutex
);
1278 ovs_mutex_lock(&pmd
->cond_mutex
);
1279 seq_change(pmd
->reload_seq
);
1280 atomic_store_relaxed(&pmd
->reload
, true);
1281 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
1282 ovs_mutex_unlock(&pmd
->cond_mutex
);
1286 hash_port_no(odp_port_t port_no
)
1288 return hash_int(odp_to_u32(port_no
), 0);
1292 port_create(const char *devname
, const char *type
,
1293 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1295 struct netdev_saved_flags
*sf
;
1296 struct dp_netdev_port
*port
;
1297 enum netdev_flags flags
;
1298 struct netdev
*netdev
;
1303 /* Open and validate network device. */
1304 error
= netdev_open(devname
, type
, &netdev
);
1308 /* XXX reject non-Ethernet devices */
1310 netdev_get_flags(netdev
, &flags
);
1311 if (flags
& NETDEV_LOOPBACK
) {
1312 VLOG_ERR("%s: cannot add a loopback device", devname
);
1317 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
1319 VLOG_ERR("%s: cannot set promisc flag", devname
);
1323 port
= xzalloc(sizeof *port
);
1324 port
->port_no
= port_no
;
1325 port
->netdev
= netdev
;
1326 port
->type
= xstrdup(type
);
1328 port
->need_reconfigure
= true;
1329 ovs_mutex_init(&port
->txq_used_mutex
);
1336 netdev_close(netdev
);
1341 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
1343 OVS_REQUIRES(dp
->port_mutex
)
1345 struct dp_netdev_port
*port
;
1348 /* Reject devices already in 'dp'. */
1349 if (!get_port_by_name(dp
, devname
, &port
)) {
1353 error
= port_create(devname
, type
, port_no
, &port
);
1358 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
1359 seq_change(dp
->port_seq
);
1361 reconfigure_datapath(dp
);
1367 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
1368 odp_port_t
*port_nop
)
1370 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1371 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1372 const char *dpif_port
;
1376 ovs_mutex_lock(&dp
->port_mutex
);
1377 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1378 if (*port_nop
!= ODPP_NONE
) {
1379 port_no
= *port_nop
;
1380 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
1382 port_no
= choose_port(dp
, dpif_port
);
1383 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
1386 *port_nop
= port_no
;
1387 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
1389 ovs_mutex_unlock(&dp
->port_mutex
);
1395 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
1397 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1400 ovs_mutex_lock(&dp
->port_mutex
);
1401 if (port_no
== ODPP_LOCAL
) {
1404 struct dp_netdev_port
*port
;
1406 error
= get_port_by_number(dp
, port_no
, &port
);
1408 do_del_port(dp
, port
);
1411 ovs_mutex_unlock(&dp
->port_mutex
);
1417 is_valid_port_number(odp_port_t port_no
)
1419 return port_no
!= ODPP_NONE
;
1422 static struct dp_netdev_port
*
1423 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
1424 OVS_REQUIRES(dp
->port_mutex
)
1426 struct dp_netdev_port
*port
;
1428 HMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
1429 if (port
->port_no
== port_no
) {
1437 get_port_by_number(struct dp_netdev
*dp
,
1438 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1439 OVS_REQUIRES(dp
->port_mutex
)
1441 if (!is_valid_port_number(port_no
)) {
1445 *portp
= dp_netdev_lookup_port(dp
, port_no
);
1446 return *portp
? 0 : ENODEV
;
1451 port_destroy(struct dp_netdev_port
*port
)
1457 netdev_close(port
->netdev
);
1458 netdev_restore_flags(port
->sf
);
1460 for (unsigned i
= 0; i
< port
->n_rxq
; i
++) {
1461 netdev_rxq_close(port
->rxqs
[i
].rx
);
1463 ovs_mutex_destroy(&port
->txq_used_mutex
);
1464 free(port
->rxq_affinity_list
);
1465 free(port
->txq_used
);
1472 get_port_by_name(struct dp_netdev
*dp
,
1473 const char *devname
, struct dp_netdev_port
**portp
)
1474 OVS_REQUIRES(dp
->port_mutex
)
1476 struct dp_netdev_port
*port
;
1478 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1479 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1485 /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non
1490 /* Returns 'true' if there is a port with pmd netdev. */
1492 has_pmd_port(struct dp_netdev
*dp
)
1493 OVS_REQUIRES(dp
->port_mutex
)
1495 struct dp_netdev_port
*port
;
1497 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1498 if (netdev_is_pmd(port
->netdev
)) {
1507 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1508 OVS_REQUIRES(dp
->port_mutex
)
1510 hmap_remove(&dp
->ports
, &port
->node
);
1511 seq_change(dp
->port_seq
);
1513 reconfigure_datapath(dp
);
1519 answer_port_query(const struct dp_netdev_port
*port
,
1520 struct dpif_port
*dpif_port
)
1522 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1523 dpif_port
->type
= xstrdup(port
->type
);
1524 dpif_port
->port_no
= port
->port_no
;
1528 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1529 struct dpif_port
*dpif_port
)
1531 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1532 struct dp_netdev_port
*port
;
1535 ovs_mutex_lock(&dp
->port_mutex
);
1536 error
= get_port_by_number(dp
, port_no
, &port
);
1537 if (!error
&& dpif_port
) {
1538 answer_port_query(port
, dpif_port
);
1540 ovs_mutex_unlock(&dp
->port_mutex
);
1546 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1547 struct dpif_port
*dpif_port
)
1549 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1550 struct dp_netdev_port
*port
;
1553 ovs_mutex_lock(&dp
->port_mutex
);
1554 error
= get_port_by_name(dp
, devname
, &port
);
1555 if (!error
&& dpif_port
) {
1556 answer_port_query(port
, dpif_port
);
1558 ovs_mutex_unlock(&dp
->port_mutex
);
1564 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1566 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1570 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1572 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1573 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1578 dp_netdev_flow_hash(const ovs_u128
*ufid
)
1580 return ufid
->u32
[0];
1583 static inline struct dpcls
*
1584 dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread
*pmd
,
1588 uint32_t hash
= hash_port_no(in_port
);
1589 CMAP_FOR_EACH_WITH_HASH (cls
, node
, hash
, &pmd
->classifiers
) {
1590 if (cls
->in_port
== in_port
) {
1591 /* Port classifier exists already */
1598 static inline struct dpcls
*
1599 dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread
*pmd
,
1601 OVS_REQUIRES(pmd
->flow_mutex
)
1603 struct dpcls
*cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
1604 uint32_t hash
= hash_port_no(in_port
);
1607 /* Create new classifier for in_port */
1608 cls
= xmalloc(sizeof(*cls
));
1610 cls
->in_port
= in_port
;
1611 cmap_insert(&pmd
->classifiers
, &cls
->node
, hash
);
1612 VLOG_DBG("Creating dpcls %p for in_port %d", cls
, in_port
);
1618 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread
*pmd
,
1619 struct dp_netdev_flow
*flow
)
1620 OVS_REQUIRES(pmd
->flow_mutex
)
1622 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1624 odp_port_t in_port
= flow
->flow
.in_port
.odp_port
;
1626 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
1627 ovs_assert(cls
!= NULL
);
1628 dpcls_remove(cls
, &flow
->cr
);
1629 cmap_remove(&pmd
->flow_table
, node
, dp_netdev_flow_hash(&flow
->ufid
));
1632 dp_netdev_flow_unref(flow
);
1636 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
)
1638 struct dp_netdev_flow
*netdev_flow
;
1640 ovs_mutex_lock(&pmd
->flow_mutex
);
1641 CMAP_FOR_EACH (netdev_flow
, node
, &pmd
->flow_table
) {
1642 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
1644 ovs_mutex_unlock(&pmd
->flow_mutex
);
1648 dpif_netdev_flow_flush(struct dpif
*dpif
)
1650 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1651 struct dp_netdev_pmd_thread
*pmd
;
1653 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1654 dp_netdev_pmd_flow_flush(pmd
);
1660 struct dp_netdev_port_state
{
1661 struct hmap_position position
;
1666 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1668 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
1673 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
1674 struct dpif_port
*dpif_port
)
1676 struct dp_netdev_port_state
*state
= state_
;
1677 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1678 struct hmap_node
*node
;
1681 ovs_mutex_lock(&dp
->port_mutex
);
1682 node
= hmap_at_position(&dp
->ports
, &state
->position
);
1684 struct dp_netdev_port
*port
;
1686 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
1689 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
1690 dpif_port
->name
= state
->name
;
1691 dpif_port
->type
= port
->type
;
1692 dpif_port
->port_no
= port
->port_no
;
1698 ovs_mutex_unlock(&dp
->port_mutex
);
1704 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
1706 struct dp_netdev_port_state
*state
= state_
;
1713 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
1715 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1716 uint64_t new_port_seq
;
1719 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
1720 if (dpif
->last_port_seq
!= new_port_seq
) {
1721 dpif
->last_port_seq
= new_port_seq
;
1731 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
1733 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1735 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
1738 static struct dp_netdev_flow
*
1739 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
1741 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
1744 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
1746 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
1749 /* netdev_flow_key utilities.
1751 * netdev_flow_key is basically a miniflow. We use these functions
1752 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1753 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1755 * - Since we are dealing exclusively with miniflows created by
1756 * miniflow_extract(), if the map is different the miniflow is different.
1757 * Therefore we can be faster by comparing the map and the miniflow in a
1759 * - These functions can be inlined by the compiler. */
1761 /* Given the number of bits set in miniflow's maps, returns the size of the
1762 * 'netdev_flow_key.mf' */
1763 static inline size_t
1764 netdev_flow_key_size(size_t flow_u64s
)
1766 return sizeof(struct miniflow
) + MINIFLOW_VALUES_SIZE(flow_u64s
);
1770 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
1771 const struct netdev_flow_key
*b
)
1773 /* 'b->len' may be not set yet. */
1774 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
1777 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
1778 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
1779 * generated by miniflow_extract. */
1781 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
1782 const struct miniflow
*mf
)
1784 return !memcmp(&key
->mf
, mf
, key
->len
);
1788 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
1789 const struct netdev_flow_key
*src
)
1792 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
1797 netdev_flow_key_from_flow(struct netdev_flow_key
*dst
,
1798 const struct flow
*src
)
1800 struct dp_packet packet
;
1801 uint64_t buf_stub
[512 / 8];
1803 dp_packet_use_stub(&packet
, buf_stub
, sizeof buf_stub
);
1804 pkt_metadata_from_flow(&packet
.md
, src
);
1805 flow_compose(&packet
, src
);
1806 miniflow_extract(&packet
, &dst
->mf
);
1807 dp_packet_uninit(&packet
);
1809 dst
->len
= netdev_flow_key_size(miniflow_n_values(&dst
->mf
));
1810 dst
->hash
= 0; /* Not computed yet. */
1813 /* Initialize a netdev_flow_key 'mask' from 'match'. */
1815 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
1816 const struct match
*match
)
1818 uint64_t *dst
= miniflow_values(&mask
->mf
);
1819 struct flowmap fmap
;
1823 /* Only check masks that make sense for the flow. */
1824 flow_wc_map(&match
->flow
, &fmap
);
1825 flowmap_init(&mask
->mf
.map
);
1827 FLOWMAP_FOR_EACH_INDEX(idx
, fmap
) {
1828 uint64_t mask_u64
= flow_u64_value(&match
->wc
.masks
, idx
);
1831 flowmap_set(&mask
->mf
.map
, idx
, 1);
1833 hash
= hash_add64(hash
, mask_u64
);
1839 FLOWMAP_FOR_EACH_MAP (map
, mask
->mf
.map
) {
1840 hash
= hash_add64(hash
, map
);
1843 size_t n
= dst
- miniflow_get_values(&mask
->mf
);
1845 mask
->hash
= hash_finish(hash
, n
* 8);
1846 mask
->len
= netdev_flow_key_size(n
);
1849 /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
1851 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
1852 const struct flow
*flow
,
1853 const struct netdev_flow_key
*mask
)
1855 uint64_t *dst_u64
= miniflow_values(&dst
->mf
);
1856 const uint64_t *mask_u64
= miniflow_get_values(&mask
->mf
);
1860 dst
->len
= mask
->len
;
1861 dst
->mf
= mask
->mf
; /* Copy maps. */
1863 FLOW_FOR_EACH_IN_MAPS(value
, flow
, mask
->mf
.map
) {
1864 *dst_u64
= value
& *mask_u64
++;
1865 hash
= hash_add64(hash
, *dst_u64
++);
1867 dst
->hash
= hash_finish(hash
,
1868 (dst_u64
- miniflow_get_values(&dst
->mf
)) * 8);
1871 /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
1872 #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
1873 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
1875 /* Returns a hash value for the bits of 'key' where there are 1-bits in
1877 static inline uint32_t
1878 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
1879 const struct netdev_flow_key
*mask
)
1881 const uint64_t *p
= miniflow_get_values(&mask
->mf
);
1885 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, key
, mask
->mf
.map
) {
1886 hash
= hash_add64(hash
, value
& *p
++);
1889 return hash_finish(hash
, (p
- miniflow_get_values(&mask
->mf
)) * 8);
1893 emc_entry_alive(struct emc_entry
*ce
)
1895 return ce
->flow
&& !ce
->flow
->dead
;
1899 emc_clear_entry(struct emc_entry
*ce
)
1902 dp_netdev_flow_unref(ce
->flow
);
1908 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
1909 const struct netdev_flow_key
*key
)
1911 if (ce
->flow
!= flow
) {
1913 dp_netdev_flow_unref(ce
->flow
);
1916 if (dp_netdev_flow_ref(flow
)) {
1923 netdev_flow_key_clone(&ce
->key
, key
);
1928 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
1929 struct dp_netdev_flow
*flow
)
1931 struct emc_entry
*to_be_replaced
= NULL
;
1932 struct emc_entry
*current_entry
;
1934 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1935 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
1936 /* We found the entry with the 'mf' miniflow */
1937 emc_change_entry(current_entry
, flow
, NULL
);
1941 /* Replacement policy: put the flow in an empty (not alive) entry, or
1942 * in the first entry where it can be */
1944 || (emc_entry_alive(to_be_replaced
)
1945 && !emc_entry_alive(current_entry
))
1946 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
1947 to_be_replaced
= current_entry
;
1950 /* We didn't find the miniflow in the cache.
1951 * The 'to_be_replaced' entry is where the new flow will be stored */
1953 emc_change_entry(to_be_replaced
, flow
, key
);
1957 emc_probabilistic_insert(struct dp_netdev_pmd_thread
*pmd
,
1958 const struct netdev_flow_key
*key
,
1959 struct dp_netdev_flow
*flow
)
1961 /* Insert an entry into the EMC based on probability value 'min'. By
1962 * default the value is UINT32_MAX / 100 which yields an insertion
1963 * probability of 1/100 ie. 1% */
1966 atomic_read_relaxed(&pmd
->dp
->emc_insert_min
, &min
);
1969 if (min
&& (key
->hash
^ (uint32_t) pmd
->last_cycles
) <= min
) {
1971 if (min
&& (key
->hash
^ random_uint32()) <= min
) {
1973 emc_insert(&pmd
->flow_cache
, key
, flow
);
1977 static inline struct dp_netdev_flow
*
1978 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
1980 struct emc_entry
*current_entry
;
1982 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1983 if (current_entry
->key
.hash
== key
->hash
1984 && emc_entry_alive(current_entry
)
1985 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
1987 /* We found the entry with the 'key->mf' miniflow */
1988 return current_entry
->flow
;
1995 static struct dp_netdev_flow
*
1996 dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread
*pmd
,
1997 const struct netdev_flow_key
*key
,
2001 struct dpcls_rule
*rule
;
2002 odp_port_t in_port
= u32_to_odp(MINIFLOW_GET_U32(&key
->mf
, in_port
));
2003 struct dp_netdev_flow
*netdev_flow
= NULL
;
2005 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
2006 if (OVS_LIKELY(cls
)) {
2007 dpcls_lookup(cls
, key
, &rule
, 1, lookup_num_p
);
2008 netdev_flow
= dp_netdev_flow_cast(rule
);
2013 static struct dp_netdev_flow
*
2014 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread
*pmd
,
2015 const ovs_u128
*ufidp
, const struct nlattr
*key
,
2018 struct dp_netdev_flow
*netdev_flow
;
2022 /* If a UFID is not provided, determine one based on the key. */
2023 if (!ufidp
&& key
&& key_len
2024 && !dpif_netdev_flow_from_nlattrs(key
, key_len
, &flow
)) {
2025 dpif_flow_hash(pmd
->dp
->dpif
, &flow
, sizeof flow
, &ufid
);
2030 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, dp_netdev_flow_hash(ufidp
),
2032 if (ovs_u128_equals(netdev_flow
->ufid
, *ufidp
)) {
2042 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_
,
2043 struct dpif_flow_stats
*stats
)
2045 struct dp_netdev_flow
*netdev_flow
;
2046 unsigned long long n
;
2050 netdev_flow
= CONST_CAST(struct dp_netdev_flow
*, netdev_flow_
);
2052 atomic_read_relaxed(&netdev_flow
->stats
.packet_count
, &n
);
2053 stats
->n_packets
= n
;
2054 atomic_read_relaxed(&netdev_flow
->stats
.byte_count
, &n
);
2056 atomic_read_relaxed(&netdev_flow
->stats
.used
, &used
);
2058 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
2059 stats
->tcp_flags
= flags
;
2062 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
2063 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
2064 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
2067 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
2068 struct ofpbuf
*key_buf
, struct ofpbuf
*mask_buf
,
2069 struct dpif_flow
*flow
, bool terse
)
2072 memset(flow
, 0, sizeof *flow
);
2074 struct flow_wildcards wc
;
2075 struct dp_netdev_actions
*actions
;
2077 struct odp_flow_key_parms odp_parms
= {
2078 .flow
= &netdev_flow
->flow
,
2080 .support
= dp_netdev_support
,
2083 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
2084 /* in_port is exact matched, but we have left it out from the mask for
2085 * optimnization reasons. Add in_port back to the mask. */
2086 wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
2089 offset
= key_buf
->size
;
2090 flow
->key
= ofpbuf_tail(key_buf
);
2091 odp_flow_key_from_flow(&odp_parms
, key_buf
);
2092 flow
->key_len
= key_buf
->size
- offset
;
2095 offset
= mask_buf
->size
;
2096 flow
->mask
= ofpbuf_tail(mask_buf
);
2097 odp_parms
.key_buf
= key_buf
;
2098 odp_flow_key_from_mask(&odp_parms
, mask_buf
);
2099 flow
->mask_len
= mask_buf
->size
- offset
;
2102 actions
= dp_netdev_flow_get_actions(netdev_flow
);
2103 flow
->actions
= actions
->actions
;
2104 flow
->actions_len
= actions
->size
;
2107 flow
->ufid
= netdev_flow
->ufid
;
2108 flow
->ufid_present
= true;
2109 flow
->pmd_id
= netdev_flow
->pmd_id
;
2110 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
2114 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
2115 const struct nlattr
*mask_key
,
2116 uint32_t mask_key_len
, const struct flow
*flow
,
2117 struct flow_wildcards
*wc
)
2119 enum odp_key_fitness fitness
;
2121 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, wc
, flow
);
2123 /* This should not happen: it indicates that
2124 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
2125 * disagree on the acceptable form of a mask. Log the problem
2126 * as an error, with enough details to enable debugging. */
2127 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2129 if (!VLOG_DROP_ERR(&rl
)) {
2133 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
2135 VLOG_ERR("internal error parsing flow mask %s (%s)",
2136 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
2147 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
2152 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
2153 /* This should not happen: it indicates that odp_flow_key_from_flow()
2154 * and odp_flow_key_to_flow() disagree on the acceptable form of a
2155 * flow. Log the problem as an error, with enough details to enable
2157 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2159 if (!VLOG_DROP_ERR(&rl
)) {
2163 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
2164 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
2171 in_port
= flow
->in_port
.odp_port
;
2172 if (!is_valid_port_number(in_port
) && in_port
!= ODPP_NONE
) {
2176 if (flow
->ct_state
& DP_NETDEV_CS_UNSUPPORTED_MASK
) {
2184 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
2186 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2187 struct dp_netdev_flow
*netdev_flow
;
2188 struct dp_netdev_pmd_thread
*pmd
;
2189 struct hmapx to_find
= HMAPX_INITIALIZER(&to_find
);
2190 struct hmapx_node
*node
;
2193 if (get
->pmd_id
== PMD_ID_NULL
) {
2194 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2195 if (dp_netdev_pmd_try_ref(pmd
) && !hmapx_add(&to_find
, pmd
)) {
2196 dp_netdev_pmd_unref(pmd
);
2200 pmd
= dp_netdev_get_pmd(dp
, get
->pmd_id
);
2204 hmapx_add(&to_find
, pmd
);
2207 if (!hmapx_count(&to_find
)) {
2211 HMAPX_FOR_EACH (node
, &to_find
) {
2212 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
2213 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, get
->ufid
, get
->key
,
2216 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->buffer
,
2225 HMAPX_FOR_EACH (node
, &to_find
) {
2226 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
2227 dp_netdev_pmd_unref(pmd
);
2230 hmapx_destroy(&to_find
);
2234 static struct dp_netdev_flow
*
2235 dp_netdev_flow_add(struct dp_netdev_pmd_thread
*pmd
,
2236 struct match
*match
, const ovs_u128
*ufid
,
2237 const struct nlattr
*actions
, size_t actions_len
)
2238 OVS_REQUIRES(pmd
->flow_mutex
)
2240 struct dp_netdev_flow
*flow
;
2241 struct netdev_flow_key mask
;
2244 /* Make sure in_port is exact matched before we read it. */
2245 ovs_assert(match
->wc
.masks
.in_port
.odp_port
== ODPP_NONE
);
2246 odp_port_t in_port
= match
->flow
.in_port
.odp_port
;
2248 /* As we select the dpcls based on the port number, each netdev flow
2249 * belonging to the same dpcls will have the same odp_port value.
2250 * For performance reasons we wildcard odp_port here in the mask. In the
2251 * typical case dp_hash is also wildcarded, and the resulting 8-byte
2252 * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and
2253 * will not be part of the subtable mask.
2254 * This will speed up the hash computation during dpcls_lookup() because
2255 * there is one less call to hash_add64() in this case. */
2256 match
->wc
.masks
.in_port
.odp_port
= 0;
2257 netdev_flow_mask_init(&mask
, match
);
2258 match
->wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
2260 /* Make sure wc does not have metadata. */
2261 ovs_assert(!FLOWMAP_HAS_FIELD(&mask
.mf
.map
, metadata
)
2262 && !FLOWMAP_HAS_FIELD(&mask
.mf
.map
, regs
));
2264 /* Do not allocate extra space. */
2265 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
2266 memset(&flow
->stats
, 0, sizeof flow
->stats
);
2269 *CONST_CAST(unsigned *, &flow
->pmd_id
) = pmd
->core_id
;
2270 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
2271 *CONST_CAST(ovs_u128
*, &flow
->ufid
) = *ufid
;
2272 ovs_refcount_init(&flow
->ref_cnt
);
2273 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
2275 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
2277 /* Select dpcls for in_port. Relies on in_port to be exact match. */
2278 cls
= dp_netdev_pmd_find_dpcls(pmd
, in_port
);
2279 dpcls_insert(cls
, &flow
->cr
, &mask
);
2281 cmap_insert(&pmd
->flow_table
, CONST_CAST(struct cmap_node
*, &flow
->node
),
2282 dp_netdev_flow_hash(&flow
->ufid
));
2284 if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
2285 struct ds ds
= DS_EMPTY_INITIALIZER
;
2286 struct ofpbuf key_buf
, mask_buf
;
2287 struct odp_flow_key_parms odp_parms
= {
2288 .flow
= &match
->flow
,
2289 .mask
= &match
->wc
.masks
,
2290 .support
= dp_netdev_support
,
2293 ofpbuf_init(&key_buf
, 0);
2294 ofpbuf_init(&mask_buf
, 0);
2296 odp_flow_key_from_flow(&odp_parms
, &key_buf
);
2297 odp_parms
.key_buf
= &key_buf
;
2298 odp_flow_key_from_mask(&odp_parms
, &mask_buf
);
2300 ds_put_cstr(&ds
, "flow_add: ");
2301 odp_format_ufid(ufid
, &ds
);
2302 ds_put_cstr(&ds
, " ");
2303 odp_flow_format(key_buf
.data
, key_buf
.size
,
2304 mask_buf
.data
, mask_buf
.size
,
2306 ds_put_cstr(&ds
, ", actions:");
2307 format_odp_actions(&ds
, actions
, actions_len
);
2309 VLOG_DBG_RL(&upcall_rl
, "%s", ds_cstr(&ds
));
2311 ofpbuf_uninit(&key_buf
);
2312 ofpbuf_uninit(&mask_buf
);
2320 flow_put_on_pmd(struct dp_netdev_pmd_thread
*pmd
,
2321 struct netdev_flow_key
*key
,
2322 struct match
*match
,
2324 const struct dpif_flow_put
*put
,
2325 struct dpif_flow_stats
*stats
)
2327 struct dp_netdev_flow
*netdev_flow
;
2331 memset(stats
, 0, sizeof *stats
);
2334 ovs_mutex_lock(&pmd
->flow_mutex
);
2335 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
, NULL
);
2337 if (put
->flags
& DPIF_FP_CREATE
) {
2338 if (cmap_count(&pmd
->flow_table
) < MAX_FLOWS
) {
2339 dp_netdev_flow_add(pmd
, match
, ufid
, put
->actions
,
2349 if (put
->flags
& DPIF_FP_MODIFY
2350 && flow_equal(&match
->flow
, &netdev_flow
->flow
)) {
2351 struct dp_netdev_actions
*new_actions
;
2352 struct dp_netdev_actions
*old_actions
;
2354 new_actions
= dp_netdev_actions_create(put
->actions
,
2357 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
2358 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
2361 get_dpif_flow_stats(netdev_flow
, stats
);
2363 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
2364 /* XXX: The userspace datapath uses thread local statistics
2365 * (for flows), which should be updated only by the owning
2366 * thread. Since we cannot write on stats memory here,
2367 * we choose not to support this flag. Please note:
2368 * - This feature is currently used only by dpctl commands with
2370 * - Should the need arise, this operation can be implemented
2371 * by keeping a base value (to be update here) for each
2372 * counter, and subtracting it before outputting the stats */
2376 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
2377 } else if (put
->flags
& DPIF_FP_CREATE
) {
2380 /* Overlapping flow. */
2384 ovs_mutex_unlock(&pmd
->flow_mutex
);
2389 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
2391 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2392 struct netdev_flow_key key
;
2393 struct dp_netdev_pmd_thread
*pmd
;
2399 memset(put
->stats
, 0, sizeof *put
->stats
);
2401 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
);
2405 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
2406 put
->mask
, put
->mask_len
,
2407 &match
.flow
, &match
.wc
);
2415 dpif_flow_hash(dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
2418 /* Must produce a netdev_flow_key for lookup.
2419 * This interface is no longer performance critical, since it is not used
2420 * for upcall processing any more. */
2421 netdev_flow_key_from_flow(&key
, &match
.flow
);
2423 if (put
->pmd_id
== PMD_ID_NULL
) {
2424 if (cmap_count(&dp
->poll_threads
) == 0) {
2427 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2428 struct dpif_flow_stats pmd_stats
;
2431 pmd_error
= flow_put_on_pmd(pmd
, &key
, &match
, &ufid
, put
,
2435 } else if (put
->stats
) {
2436 put
->stats
->n_packets
+= pmd_stats
.n_packets
;
2437 put
->stats
->n_bytes
+= pmd_stats
.n_bytes
;
2438 put
->stats
->used
= MAX(put
->stats
->used
, pmd_stats
.used
);
2439 put
->stats
->tcp_flags
|= pmd_stats
.tcp_flags
;
2443 pmd
= dp_netdev_get_pmd(dp
, put
->pmd_id
);
2447 error
= flow_put_on_pmd(pmd
, &key
, &match
, &ufid
, put
, put
->stats
);
2448 dp_netdev_pmd_unref(pmd
);
2455 flow_del_on_pmd(struct dp_netdev_pmd_thread
*pmd
,
2456 struct dpif_flow_stats
*stats
,
2457 const struct dpif_flow_del
*del
)
2459 struct dp_netdev_flow
*netdev_flow
;
2462 ovs_mutex_lock(&pmd
->flow_mutex
);
2463 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, del
->ufid
, del
->key
,
2467 get_dpif_flow_stats(netdev_flow
, stats
);
2469 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
2473 ovs_mutex_unlock(&pmd
->flow_mutex
);
2479 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
2481 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2482 struct dp_netdev_pmd_thread
*pmd
;
2486 memset(del
->stats
, 0, sizeof *del
->stats
);
2489 if (del
->pmd_id
== PMD_ID_NULL
) {
2490 if (cmap_count(&dp
->poll_threads
) == 0) {
2493 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2494 struct dpif_flow_stats pmd_stats
;
2497 pmd_error
= flow_del_on_pmd(pmd
, &pmd_stats
, del
);
2500 } else if (del
->stats
) {
2501 del
->stats
->n_packets
+= pmd_stats
.n_packets
;
2502 del
->stats
->n_bytes
+= pmd_stats
.n_bytes
;
2503 del
->stats
->used
= MAX(del
->stats
->used
, pmd_stats
.used
);
2504 del
->stats
->tcp_flags
|= pmd_stats
.tcp_flags
;
2508 pmd
= dp_netdev_get_pmd(dp
, del
->pmd_id
);
2512 error
= flow_del_on_pmd(pmd
, del
->stats
, del
);
2513 dp_netdev_pmd_unref(pmd
);
2520 struct dpif_netdev_flow_dump
{
2521 struct dpif_flow_dump up
;
2522 struct cmap_position poll_thread_pos
;
2523 struct cmap_position flow_pos
;
2524 struct dp_netdev_pmd_thread
*cur_pmd
;
2526 struct ovs_mutex mutex
;
2529 static struct dpif_netdev_flow_dump
*
2530 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
2532 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
2535 static struct dpif_flow_dump
*
2536 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
, bool terse
)
2538 struct dpif_netdev_flow_dump
*dump
;
2540 dump
= xzalloc(sizeof *dump
);
2541 dpif_flow_dump_init(&dump
->up
, dpif_
);
2542 dump
->up
.terse
= terse
;
2543 ovs_mutex_init(&dump
->mutex
);
2549 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
2551 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2553 ovs_mutex_destroy(&dump
->mutex
);
2558 struct dpif_netdev_flow_dump_thread
{
2559 struct dpif_flow_dump_thread up
;
2560 struct dpif_netdev_flow_dump
*dump
;
2561 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
2562 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
2565 static struct dpif_netdev_flow_dump_thread
*
2566 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
2568 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
2571 static struct dpif_flow_dump_thread
*
2572 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
2574 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2575 struct dpif_netdev_flow_dump_thread
*thread
;
2577 thread
= xmalloc(sizeof *thread
);
2578 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
2579 thread
->dump
= dump
;
2584 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
2586 struct dpif_netdev_flow_dump_thread
*thread
2587 = dpif_netdev_flow_dump_thread_cast(thread_
);
2593 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
2594 struct dpif_flow
*flows
, int max_flows
)
2596 struct dpif_netdev_flow_dump_thread
*thread
2597 = dpif_netdev_flow_dump_thread_cast(thread_
);
2598 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
2599 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
2603 ovs_mutex_lock(&dump
->mutex
);
2604 if (!dump
->status
) {
2605 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
2606 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
2607 struct dp_netdev_pmd_thread
*pmd
= dump
->cur_pmd
;
2608 int flow_limit
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
2610 /* First call to dump_next(), extracts the first pmd thread.
2611 * If there is no pmd thread, returns immediately. */
2613 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2615 ovs_mutex_unlock(&dump
->mutex
);
2622 for (n_flows
= 0; n_flows
< flow_limit
; n_flows
++) {
2623 struct cmap_node
*node
;
2625 node
= cmap_next_position(&pmd
->flow_table
, &dump
->flow_pos
);
2629 netdev_flows
[n_flows
] = CONTAINER_OF(node
,
2630 struct dp_netdev_flow
,
2633 /* When finishing dumping the current pmd thread, moves to
2635 if (n_flows
< flow_limit
) {
2636 memset(&dump
->flow_pos
, 0, sizeof dump
->flow_pos
);
2637 dp_netdev_pmd_unref(pmd
);
2638 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2644 /* Keeps the reference to next caller. */
2645 dump
->cur_pmd
= pmd
;
2647 /* If the current dump is empty, do not exit the loop, since the
2648 * remaining pmds could have flows to be dumped. Just dumps again
2649 * on the new 'pmd'. */
2652 ovs_mutex_unlock(&dump
->mutex
);
2654 for (i
= 0; i
< n_flows
; i
++) {
2655 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
2656 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
2657 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
2658 struct dpif_flow
*f
= &flows
[i
];
2659 struct ofpbuf key
, mask
;
2661 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
2662 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
2663 dp_netdev_flow_to_dpif_flow(netdev_flow
, &key
, &mask
, f
,
2671 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
2672 OVS_NO_THREAD_SAFETY_ANALYSIS
2674 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2675 struct dp_netdev_pmd_thread
*pmd
;
2676 struct dp_packet_batch pp
;
2678 if (dp_packet_size(execute
->packet
) < ETH_HEADER_LEN
||
2679 dp_packet_size(execute
->packet
) > UINT16_MAX
) {
2683 /* Tries finding the 'pmd'. If NULL is returned, that means
2684 * the current thread is a non-pmd thread and should use
2685 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
2686 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
2688 pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
2694 if (execute
->probe
) {
2695 /* If this is part of a probe, Drop the packet, since executing
2696 * the action may actually cause spurious packets be sent into
2701 /* If the current thread is non-pmd thread, acquires
2702 * the 'non_pmd_mutex'. */
2703 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2704 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2707 /* The action processing expects the RSS hash to be valid, because
2708 * it's always initialized at the beginning of datapath processing.
2709 * In this case, though, 'execute->packet' may not have gone through
2710 * the datapath at all, it may have been generated by the upper layer
2711 * (OpenFlow packet-out, BFD frame, ...). */
2712 if (!dp_packet_rss_valid(execute
->packet
)) {
2713 dp_packet_set_rss_hash(execute
->packet
,
2714 flow_hash_5tuple(execute
->flow
, 0));
2717 dp_packet_batch_init_packet(&pp
, execute
->packet
);
2718 dp_netdev_execute_actions(pmd
, &pp
, false, execute
->flow
,
2719 execute
->actions
, execute
->actions_len
,
2722 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2723 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2724 dp_netdev_pmd_unref(pmd
);
2731 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
2735 for (i
= 0; i
< n_ops
; i
++) {
2736 struct dpif_op
*op
= ops
[i
];
2739 case DPIF_OP_FLOW_PUT
:
2740 op
->error
= dpif_netdev_flow_put(dpif
, &op
->u
.flow_put
);
2743 case DPIF_OP_FLOW_DEL
:
2744 op
->error
= dpif_netdev_flow_del(dpif
, &op
->u
.flow_del
);
2747 case DPIF_OP_EXECUTE
:
2748 op
->error
= dpif_netdev_execute(dpif
, &op
->u
.execute
);
2751 case DPIF_OP_FLOW_GET
:
2752 op
->error
= dpif_netdev_flow_get(dpif
, &op
->u
.flow_get
);
2758 /* Applies datapath configuration from the database. Some of the changes are
2759 * actually applied in dpif_netdev_run(). */
2761 dpif_netdev_set_config(struct dpif
*dpif
, const struct smap
*other_config
)
2763 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2764 const char *cmask
= smap_get(other_config
, "pmd-cpu-mask");
2765 unsigned long long insert_prob
=
2766 smap_get_ullong(other_config
, "emc-insert-inv-prob",
2767 DEFAULT_EM_FLOW_INSERT_INV_PROB
);
2768 uint32_t insert_min
, cur_min
;
2770 if (!nullable_string_is_equal(dp
->pmd_cmask
, cmask
)) {
2771 free(dp
->pmd_cmask
);
2772 dp
->pmd_cmask
= nullable_xstrdup(cmask
);
2773 dp_netdev_request_reconfigure(dp
);
2776 atomic_read_relaxed(&dp
->emc_insert_min
, &cur_min
);
2777 if (insert_prob
<= UINT32_MAX
) {
2778 insert_min
= insert_prob
== 0 ? 0 : UINT32_MAX
/ insert_prob
;
2780 insert_min
= DEFAULT_EM_FLOW_INSERT_MIN
;
2781 insert_prob
= DEFAULT_EM_FLOW_INSERT_INV_PROB
;
2784 if (insert_min
!= cur_min
) {
2785 atomic_store_relaxed(&dp
->emc_insert_min
, insert_min
);
2786 if (insert_min
== 0) {
2787 VLOG_INFO("EMC has been disabled");
2789 VLOG_INFO("EMC insertion probability changed to 1/%llu (~%.2f%%)",
2790 insert_prob
, (100 / (float)insert_prob
));
2797 /* Parses affinity list and returns result in 'core_ids'. */
2799 parse_affinity_list(const char *affinity_list
, unsigned *core_ids
, int n_rxq
)
2802 char *list
, *copy
, *key
, *value
;
2805 for (i
= 0; i
< n_rxq
; i
++) {
2806 core_ids
[i
] = OVS_CORE_UNSPEC
;
2809 if (!affinity_list
) {
2813 list
= copy
= xstrdup(affinity_list
);
2815 while (ofputil_parse_key_value(&list
, &key
, &value
)) {
2816 int rxq_id
, core_id
;
2818 if (!str_to_int(key
, 0, &rxq_id
) || rxq_id
< 0
2819 || !str_to_int(value
, 0, &core_id
) || core_id
< 0) {
2824 if (rxq_id
< n_rxq
) {
2825 core_ids
[rxq_id
] = core_id
;
2833 /* Parses 'affinity_list' and applies configuration if it is valid. */
2835 dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port
*port
,
2836 const char *affinity_list
)
2838 unsigned *core_ids
, i
;
2841 core_ids
= xmalloc(port
->n_rxq
* sizeof *core_ids
);
2842 if (parse_affinity_list(affinity_list
, core_ids
, port
->n_rxq
)) {
2847 for (i
= 0; i
< port
->n_rxq
; i
++) {
2848 port
->rxqs
[i
].core_id
= core_ids
[i
];
2856 /* Changes the affinity of port's rx queues. The changes are actually applied
2857 * in dpif_netdev_run(). */
2859 dpif_netdev_port_set_config(struct dpif
*dpif
, odp_port_t port_no
,
2860 const struct smap
*cfg
)
2862 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2863 struct dp_netdev_port
*port
;
2865 const char *affinity_list
= smap_get(cfg
, "pmd-rxq-affinity");
2867 ovs_mutex_lock(&dp
->port_mutex
);
2868 error
= get_port_by_number(dp
, port_no
, &port
);
2869 if (error
|| !netdev_is_pmd(port
->netdev
)
2870 || nullable_string_is_equal(affinity_list
, port
->rxq_affinity_list
)) {
2874 error
= dpif_netdev_port_set_rxq_affinity(port
, affinity_list
);
2878 free(port
->rxq_affinity_list
);
2879 port
->rxq_affinity_list
= nullable_xstrdup(affinity_list
);
2881 dp_netdev_request_reconfigure(dp
);
2883 ovs_mutex_unlock(&dp
->port_mutex
);
2888 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2889 uint32_t queue_id
, uint32_t *priority
)
2891 *priority
= queue_id
;
2896 /* Creates and returns a new 'struct dp_netdev_actions', whose actions are
2897 * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */
2898 struct dp_netdev_actions
*
2899 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
2901 struct dp_netdev_actions
*netdev_actions
;
2903 netdev_actions
= xmalloc(sizeof *netdev_actions
+ size
);
2904 memcpy(netdev_actions
->actions
, actions
, size
);
2905 netdev_actions
->size
= size
;
2907 return netdev_actions
;
2910 struct dp_netdev_actions
*
2911 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
2913 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
2917 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
2922 static inline unsigned long long
2923 cycles_counter(void)
2926 return rte_get_tsc_cycles();
2932 /* Fake mutex to make sure that the calls to cycles_count_* are balanced */
2933 extern struct ovs_mutex cycles_counter_fake_mutex
;
2935 /* Start counting cycles. Must be followed by 'cycles_count_end()' */
2937 cycles_count_start(struct dp_netdev_pmd_thread
*pmd
)
2938 OVS_ACQUIRES(&cycles_counter_fake_mutex
)
2939 OVS_NO_THREAD_SAFETY_ANALYSIS
2941 pmd
->last_cycles
= cycles_counter();
2944 /* Stop counting cycles and add them to the counter 'type' */
2946 cycles_count_end(struct dp_netdev_pmd_thread
*pmd
,
2947 enum pmd_cycles_counter_type type
)
2948 OVS_RELEASES(&cycles_counter_fake_mutex
)
2949 OVS_NO_THREAD_SAFETY_ANALYSIS
2951 unsigned long long interval
= cycles_counter() - pmd
->last_cycles
;
2953 non_atomic_ullong_add(&pmd
->cycles
.n
[type
], interval
);
2957 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
2958 struct netdev_rxq
*rx
,
2961 struct dp_packet_batch batch
;
2964 dp_packet_batch_init(&batch
);
2965 cycles_count_start(pmd
);
2966 error
= netdev_rxq_recv(rx
, &batch
);
2967 cycles_count_end(pmd
, PMD_CYCLES_POLLING
);
2969 *recirc_depth_get() = 0;
2971 cycles_count_start(pmd
);
2972 dp_netdev_input(pmd
, &batch
, port_no
);
2973 cycles_count_end(pmd
, PMD_CYCLES_PROCESSING
);
2974 } else if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
2975 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2977 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
2978 netdev_rxq_get_name(rx
), ovs_strerror(error
));
2982 static struct tx_port
*
2983 tx_port_lookup(const struct hmap
*hmap
, odp_port_t port_no
)
2987 HMAP_FOR_EACH_IN_BUCKET (tx
, node
, hash_port_no(port_no
), hmap
) {
2988 if (tx
->port
->port_no
== port_no
) {
2997 port_reconfigure(struct dp_netdev_port
*port
)
2999 struct netdev
*netdev
= port
->netdev
;
3002 port
->need_reconfigure
= false;
3004 /* Closes the existing 'rxq's. */
3005 for (i
= 0; i
< port
->n_rxq
; i
++) {
3006 netdev_rxq_close(port
->rxqs
[i
].rx
);
3007 port
->rxqs
[i
].rx
= NULL
;
3011 /* Allows 'netdev' to apply the pending configuration changes. */
3012 if (netdev_is_reconf_required(netdev
)) {
3013 err
= netdev_reconfigure(netdev
);
3014 if (err
&& (err
!= EOPNOTSUPP
)) {
3015 VLOG_ERR("Failed to set interface %s new configuration",
3016 netdev_get_name(netdev
));
3020 /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */
3021 port
->rxqs
= xrealloc(port
->rxqs
,
3022 sizeof *port
->rxqs
* netdev_n_rxq(netdev
));
3023 /* Realloc 'used' counters for tx queues. */
3024 free(port
->txq_used
);
3025 port
->txq_used
= xcalloc(netdev_n_txq(netdev
), sizeof *port
->txq_used
);
3027 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
3028 port
->rxqs
[i
].port
= port
;
3029 err
= netdev_rxq_open(netdev
, &port
->rxqs
[i
].rx
, i
);
3036 /* Parse affinity list to apply configuration for new queues. */
3037 dpif_netdev_port_set_rxq_affinity(port
, port
->rxq_affinity_list
);
3042 struct rr_numa_list
{
3043 struct hmap numas
; /* Contains 'struct rr_numa' */
3047 struct hmap_node node
;
3051 /* Non isolated pmds on numa node 'numa_id' */
3052 struct dp_netdev_pmd_thread
**pmds
;
3058 static struct rr_numa
*
3059 rr_numa_list_lookup(struct rr_numa_list
*rr
, int numa_id
)
3061 struct rr_numa
*numa
;
3063 HMAP_FOR_EACH_WITH_HASH (numa
, node
, hash_int(numa_id
, 0), &rr
->numas
) {
3064 if (numa
->numa_id
== numa_id
) {
3073 rr_numa_list_populate(struct dp_netdev
*dp
, struct rr_numa_list
*rr
)
3075 struct dp_netdev_pmd_thread
*pmd
;
3076 struct rr_numa
*numa
;
3078 hmap_init(&rr
->numas
);
3080 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3081 if (pmd
->core_id
== NON_PMD_CORE_ID
|| pmd
->isolated
) {
3085 numa
= rr_numa_list_lookup(rr
, pmd
->numa_id
);
3087 numa
= xzalloc(sizeof *numa
);
3088 numa
->numa_id
= pmd
->numa_id
;
3089 hmap_insert(&rr
->numas
, &numa
->node
, hash_int(pmd
->numa_id
, 0));
3092 numa
->pmds
= xrealloc(numa
->pmds
, numa
->n_pmds
* sizeof *numa
->pmds
);
3093 numa
->pmds
[numa
->n_pmds
- 1] = pmd
;
3097 static struct dp_netdev_pmd_thread
*
3098 rr_numa_get_pmd(struct rr_numa
*numa
)
3100 return numa
->pmds
[numa
->cur_index
++ % numa
->n_pmds
];
3104 rr_numa_list_destroy(struct rr_numa_list
*rr
)
3106 struct rr_numa
*numa
;
3108 HMAP_FOR_EACH_POP (numa
, node
, &rr
->numas
) {
3112 hmap_destroy(&rr
->numas
);
3115 /* Assign pmds to queues. If 'pinned' is true, assign pmds to pinned
3116 * queues and marks the pmds as isolated. Otherwise, assign non isolated
3117 * pmds to unpinned queues.
3119 * The function doesn't touch the pmd threads, it just stores the assignment
3120 * in the 'pmd' member of each rxq. */
3122 rxq_scheduling(struct dp_netdev
*dp
, bool pinned
) OVS_REQUIRES(dp
->port_mutex
)
3124 struct dp_netdev_port
*port
;
3125 struct rr_numa_list rr
;
3127 rr_numa_list_populate(dp
, &rr
);
3129 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3130 struct rr_numa
*numa
;
3133 if (!netdev_is_pmd(port
->netdev
)) {
3137 numa_id
= netdev_get_numa_id(port
->netdev
);
3138 numa
= rr_numa_list_lookup(&rr
, numa_id
);
3140 for (int qid
= 0; qid
< port
->n_rxq
; qid
++) {
3141 struct dp_netdev_rxq
*q
= &port
->rxqs
[qid
];
3143 if (pinned
&& q
->core_id
!= OVS_CORE_UNSPEC
) {
3144 struct dp_netdev_pmd_thread
*pmd
;
3146 pmd
= dp_netdev_get_pmd(dp
, q
->core_id
);
3148 VLOG_WARN("There is no PMD thread on core %d. Queue "
3149 "%d on port \'%s\' will not be polled.",
3150 q
->core_id
, qid
, netdev_get_name(port
->netdev
));
3153 pmd
->isolated
= true;
3154 dp_netdev_pmd_unref(pmd
);
3156 } else if (!pinned
&& q
->core_id
== OVS_CORE_UNSPEC
) {
3158 VLOG_WARN("There's no available (non isolated) pmd thread "
3159 "on numa node %d. Queue %d on port \'%s\' will "
3161 numa_id
, qid
, netdev_get_name(port
->netdev
));
3163 q
->pmd
= rr_numa_get_pmd(numa
);
3169 rr_numa_list_destroy(&rr
);
3173 reconfigure_pmd_threads(struct dp_netdev
*dp
)
3174 OVS_REQUIRES(dp
->port_mutex
)
3176 struct dp_netdev_pmd_thread
*pmd
;
3177 struct ovs_numa_dump
*pmd_cores
;
3178 bool changed
= false;
3180 /* The pmd threads should be started only if there's a pmd port in the
3181 * datapath. If the user didn't provide any "pmd-cpu-mask", we start
3182 * NR_PMD_THREADS per numa node. */
3183 if (!has_pmd_port(dp
)) {
3184 pmd_cores
= ovs_numa_dump_n_cores_per_numa(0);
3185 } else if (dp
->pmd_cmask
&& dp
->pmd_cmask
[0]) {
3186 pmd_cores
= ovs_numa_dump_cores_with_cmask(dp
->pmd_cmask
);
3188 pmd_cores
= ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS
);
3191 /* Check for changed configuration */
3192 if (ovs_numa_dump_count(pmd_cores
) != cmap_count(&dp
->poll_threads
) - 1) {
3195 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3196 if (pmd
->core_id
!= NON_PMD_CORE_ID
3197 && !ovs_numa_dump_contains_core(pmd_cores
,
3206 /* Destroy the old and recreate the new pmd threads. We don't perform an
3207 * incremental update because we would have to adjust 'static_tx_qid'. */
3209 struct ovs_numa_info_core
*core
;
3210 struct ovs_numa_info_numa
*numa
;
3212 /* Do not destroy the non pmd thread. */
3213 dp_netdev_destroy_all_pmds(dp
, false);
3214 FOR_EACH_CORE_ON_DUMP (core
, pmd_cores
) {
3215 struct dp_netdev_pmd_thread
*pmd
= xzalloc(sizeof *pmd
);
3217 dp_netdev_configure_pmd(pmd
, dp
, core
->core_id
, core
->numa_id
);
3219 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
3222 /* Log the number of pmd threads per numa node. */
3223 FOR_EACH_NUMA_ON_DUMP (numa
, pmd_cores
) {
3224 VLOG_INFO("Created %"PRIuSIZE
" pmd threads on numa node %d",
3225 numa
->n_cores
, numa
->numa_id
);
3229 ovs_numa_dump_destroy(pmd_cores
);
3233 reload_affected_pmds(struct dp_netdev
*dp
)
3235 struct dp_netdev_pmd_thread
*pmd
;
3237 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3238 if (pmd
->need_reload
) {
3239 dp_netdev_reload_pmd__(pmd
);
3240 pmd
->need_reload
= false;
3246 pmd_remove_stale_ports(struct dp_netdev
*dp
,
3247 struct dp_netdev_pmd_thread
*pmd
)
3248 OVS_EXCLUDED(pmd
->port_mutex
)
3249 OVS_REQUIRES(dp
->port_mutex
)
3251 struct rxq_poll
*poll
, *poll_next
;
3252 struct tx_port
*tx
, *tx_next
;
3254 ovs_mutex_lock(&pmd
->port_mutex
);
3255 HMAP_FOR_EACH_SAFE (poll
, poll_next
, node
, &pmd
->poll_list
) {
3256 struct dp_netdev_port
*port
= poll
->rxq
->port
;
3258 if (port
->need_reconfigure
3259 || !hmap_contains(&dp
->ports
, &port
->node
)) {
3260 dp_netdev_del_rxq_from_pmd(pmd
, poll
);
3263 HMAP_FOR_EACH_SAFE (tx
, tx_next
, node
, &pmd
->tx_ports
) {
3264 struct dp_netdev_port
*port
= tx
->port
;
3266 if (port
->need_reconfigure
3267 || !hmap_contains(&dp
->ports
, &port
->node
)) {
3268 dp_netdev_del_port_tx_from_pmd(pmd
, tx
);
3271 ovs_mutex_unlock(&pmd
->port_mutex
);
3274 /* Must be called each time a port is added/removed or the cmask changes.
3275 * This creates and destroys pmd threads, reconfigures ports, opens their
3276 * rxqs and assigns all rxqs/txqs to pmd threads. */
3278 reconfigure_datapath(struct dp_netdev
*dp
)
3279 OVS_REQUIRES(dp
->port_mutex
)
3281 struct dp_netdev_pmd_thread
*pmd
;
3282 struct dp_netdev_port
*port
;
3285 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
3287 /* Step 1: Adjust the pmd threads based on the datapath ports, the cores
3288 * on the system and the user configuration. */
3289 reconfigure_pmd_threads(dp
);
3291 wanted_txqs
= cmap_count(&dp
->poll_threads
);
3293 /* The number of pmd threads might have changed, or a port can be new:
3294 * adjust the txqs. */
3295 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3296 netdev_set_tx_multiq(port
->netdev
, wanted_txqs
);
3299 /* Step 2: Remove from the pmd threads ports that have been removed or
3300 * need reconfiguration. */
3302 /* Check for all the ports that need reconfiguration. We cache this in
3303 * 'port->reconfigure', because netdev_is_reconf_required() can change at
3305 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3306 if (netdev_is_reconf_required(port
->netdev
)) {
3307 port
->need_reconfigure
= true;
3311 /* Remove from the pmd threads all the ports that have been deleted or
3312 * need reconfiguration. */
3313 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3314 pmd_remove_stale_ports(dp
, pmd
);
3317 /* Reload affected pmd threads. We must wait for the pmd threads before
3318 * reconfiguring the ports, because a port cannot be reconfigured while
3319 * it's being used. */
3320 reload_affected_pmds(dp
);
3322 /* Step 3: Reconfigure ports. */
3324 /* We only reconfigure the ports that we determined above, because they're
3325 * not being used by any pmd thread at the moment. If a port fails to
3326 * reconfigure we remove it from the datapath. */
3327 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3330 if (!port
->need_reconfigure
) {
3334 err
= port_reconfigure(port
);
3336 hmap_remove(&dp
->ports
, &port
->node
);
3337 seq_change(dp
->port_seq
);
3340 port
->dynamic_txqs
= netdev_n_txq(port
->netdev
) < wanted_txqs
;
3344 /* Step 4: Compute new rxq scheduling. We don't touch the pmd threads
3345 * for now, we just update the 'pmd' pointer in each rxq to point to the
3346 * wanted thread according to the scheduling policy. */
3348 /* Reset all the pmd threads to non isolated. */
3349 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3350 pmd
->isolated
= false;
3353 /* Reset all the queues to unassigned */
3354 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3355 for (int i
= 0; i
< port
->n_rxq
; i
++) {
3356 port
->rxqs
[i
].pmd
= NULL
;
3360 /* Add pinned queues and mark pmd threads isolated. */
3361 rxq_scheduling(dp
, true);
3363 /* Add non-pinned queues. */
3364 rxq_scheduling(dp
, false);
3366 /* Step 5: Remove queues not compliant with new scheduling. */
3367 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3368 struct rxq_poll
*poll
, *poll_next
;
3370 ovs_mutex_lock(&pmd
->port_mutex
);
3371 HMAP_FOR_EACH_SAFE (poll
, poll_next
, node
, &pmd
->poll_list
) {
3372 if (poll
->rxq
->pmd
!= pmd
) {
3373 dp_netdev_del_rxq_from_pmd(pmd
, poll
);
3376 ovs_mutex_unlock(&pmd
->port_mutex
);
3379 /* Reload affected pmd threads. We must wait for the pmd threads to remove
3380 * the old queues before readding them, otherwise a queue can be polled by
3381 * two threads at the same time. */
3382 reload_affected_pmds(dp
);
3384 /* Step 6: Add queues from scheduling, if they're not there already. */
3385 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3386 if (!netdev_is_pmd(port
->netdev
)) {
3390 for (int qid
= 0; qid
< port
->n_rxq
; qid
++) {
3391 struct dp_netdev_rxq
*q
= &port
->rxqs
[qid
];
3394 ovs_mutex_lock(&q
->pmd
->port_mutex
);
3395 dp_netdev_add_rxq_to_pmd(q
->pmd
, q
);
3396 ovs_mutex_unlock(&q
->pmd
->port_mutex
);
3401 /* Add every port to the tx cache of every pmd thread, if it's not
3402 * there already and if this pmd has at least one rxq to poll. */
3403 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3404 ovs_mutex_lock(&pmd
->port_mutex
);
3405 if (hmap_count(&pmd
->poll_list
) || pmd
->core_id
== NON_PMD_CORE_ID
) {
3406 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3407 dp_netdev_add_port_tx_to_pmd(pmd
, port
);
3410 ovs_mutex_unlock(&pmd
->port_mutex
);
3413 /* Reload affected pmd threads. */
3414 reload_affected_pmds(dp
);
3417 /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
3419 ports_require_restart(const struct dp_netdev
*dp
)
3420 OVS_REQUIRES(dp
->port_mutex
)
3422 struct dp_netdev_port
*port
;
3424 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3425 if (netdev_is_reconf_required(port
->netdev
)) {
3433 /* Return true if needs to revalidate datapath flows. */
3435 dpif_netdev_run(struct dpif
*dpif
)
3437 struct dp_netdev_port
*port
;
3438 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3439 struct dp_netdev_pmd_thread
*non_pmd
;
3440 uint64_t new_tnl_seq
;
3442 ovs_mutex_lock(&dp
->port_mutex
);
3443 non_pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
3445 ovs_mutex_lock(&dp
->non_pmd_mutex
);
3446 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3447 if (!netdev_is_pmd(port
->netdev
)) {
3450 for (i
= 0; i
< port
->n_rxq
; i
++) {
3451 dp_netdev_process_rxq_port(non_pmd
, port
->rxqs
[i
].rx
,
3456 dpif_netdev_xps_revalidate_pmd(non_pmd
, time_msec(), false);
3457 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
3459 dp_netdev_pmd_unref(non_pmd
);
3462 if (dp_netdev_is_reconf_required(dp
) || ports_require_restart(dp
)) {
3463 reconfigure_datapath(dp
);
3465 ovs_mutex_unlock(&dp
->port_mutex
);
3467 tnl_neigh_cache_run();
3469 new_tnl_seq
= seq_read(tnl_conf_seq
);
3471 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
3472 dp
->last_tnl_conf_seq
= new_tnl_seq
;
3479 dpif_netdev_wait(struct dpif
*dpif
)
3481 struct dp_netdev_port
*port
;
3482 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3484 ovs_mutex_lock(&dp_netdev_mutex
);
3485 ovs_mutex_lock(&dp
->port_mutex
);
3486 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3487 netdev_wait_reconf_required(port
->netdev
);
3488 if (!netdev_is_pmd(port
->netdev
)) {
3491 for (i
= 0; i
< port
->n_rxq
; i
++) {
3492 netdev_rxq_wait(port
->rxqs
[i
].rx
);
3496 ovs_mutex_unlock(&dp
->port_mutex
);
3497 ovs_mutex_unlock(&dp_netdev_mutex
);
3498 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
3502 pmd_free_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
3504 struct tx_port
*tx_port_cached
;
3506 /* Free all used tx queue ids. */
3507 dpif_netdev_xps_revalidate_pmd(pmd
, 0, true);
3509 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->tnl_port_cache
) {
3510 free(tx_port_cached
);
3512 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->send_port_cache
) {
3513 free(tx_port_cached
);
3517 /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to
3518 * 'pmd->port_cache' (thread local) */
3520 pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
3521 OVS_REQUIRES(pmd
->port_mutex
)
3523 struct tx_port
*tx_port
, *tx_port_cached
;
3525 pmd_free_cached_ports(pmd
);
3526 hmap_shrink(&pmd
->send_port_cache
);
3527 hmap_shrink(&pmd
->tnl_port_cache
);
3529 HMAP_FOR_EACH (tx_port
, node
, &pmd
->tx_ports
) {
3530 if (netdev_has_tunnel_push_pop(tx_port
->port
->netdev
)) {
3531 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
3532 hmap_insert(&pmd
->tnl_port_cache
, &tx_port_cached
->node
,
3533 hash_port_no(tx_port_cached
->port
->port_no
));
3536 if (netdev_n_txq(tx_port
->port
->netdev
)) {
3537 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
3538 hmap_insert(&pmd
->send_port_cache
, &tx_port_cached
->node
,
3539 hash_port_no(tx_port_cached
->port
->port_no
));
3545 pmd_load_queues_and_ports(struct dp_netdev_pmd_thread
*pmd
,
3546 struct polled_queue
**ppoll_list
)
3548 struct polled_queue
*poll_list
= *ppoll_list
;
3549 struct rxq_poll
*poll
;
3552 ovs_mutex_lock(&pmd
->port_mutex
);
3553 poll_list
= xrealloc(poll_list
, hmap_count(&pmd
->poll_list
)
3554 * sizeof *poll_list
);
3557 HMAP_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
3558 poll_list
[i
].rx
= poll
->rxq
->rx
;
3559 poll_list
[i
].port_no
= poll
->rxq
->port
->port_no
;
3563 pmd_load_cached_ports(pmd
);
3565 ovs_mutex_unlock(&pmd
->port_mutex
);
3567 *ppoll_list
= poll_list
;
3572 pmd_thread_main(void *f_
)
3574 struct dp_netdev_pmd_thread
*pmd
= f_
;
3575 unsigned int lc
= 0;
3576 struct polled_queue
*poll_list
;
3583 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
3584 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
3585 ovs_numa_thread_setaffinity_core(pmd
->core_id
);
3586 dpdk_set_lcore_id(pmd
->core_id
);
3587 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
3589 emc_cache_init(&pmd
->flow_cache
);
3591 /* List port/core affinity */
3592 for (i
= 0; i
< poll_cnt
; i
++) {
3593 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
3594 pmd
->core_id
, netdev_rxq_get_name(poll_list
[i
].rx
),
3595 netdev_rxq_get_queue_id(poll_list
[i
].rx
));
3599 while (seq_read(pmd
->reload_seq
) == pmd
->last_reload_seq
) {
3600 seq_wait(pmd
->reload_seq
, pmd
->last_reload_seq
);
3607 for (i
= 0; i
< poll_cnt
; i
++) {
3608 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].rx
,
3609 poll_list
[i
].port_no
);
3617 coverage_try_clear();
3618 dp_netdev_pmd_try_optimize(pmd
);
3619 if (!ovsrcu_try_quiesce()) {
3620 emc_cache_slow_sweep(&pmd
->flow_cache
);
3623 atomic_read_relaxed(&pmd
->reload
, &reload
);
3630 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
3631 exiting
= latch_is_set(&pmd
->exit_latch
);
3632 /* Signal here to make sure the pmd finishes
3633 * reloading the updated configuration. */
3634 dp_netdev_pmd_reload_done(pmd
);
3636 emc_cache_uninit(&pmd
->flow_cache
);
3643 pmd_free_cached_ports(pmd
);
3648 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
3649 OVS_ACQUIRES(dp
->upcall_rwlock
)
3651 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
3655 dpif_netdev_disable_upcall(struct dpif
*dpif
)
3656 OVS_NO_THREAD_SAFETY_ANALYSIS
3658 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3659 dp_netdev_disable_upcall(dp
);
3663 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
3664 OVS_RELEASES(dp
->upcall_rwlock
)
3666 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3670 dpif_netdev_enable_upcall(struct dpif
*dpif
)
3671 OVS_NO_THREAD_SAFETY_ANALYSIS
3673 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3674 dp_netdev_enable_upcall(dp
);
3678 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
3680 ovs_mutex_lock(&pmd
->cond_mutex
);
3681 atomic_store_relaxed(&pmd
->reload
, false);
3682 pmd
->last_reload_seq
= seq_read(pmd
->reload_seq
);
3683 xpthread_cond_signal(&pmd
->cond
);
3684 ovs_mutex_unlock(&pmd
->cond_mutex
);
3687 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
3688 * the pointer if succeeds, otherwise, NULL (it can return NULL even if
3689 * 'core_id' is NON_PMD_CORE_ID).
3691 * Caller must unrefs the returned reference. */
3692 static struct dp_netdev_pmd_thread
*
3693 dp_netdev_get_pmd(struct dp_netdev
*dp
, unsigned core_id
)
3695 struct dp_netdev_pmd_thread
*pmd
;
3696 const struct cmap_node
*pnode
;
3698 pnode
= cmap_find(&dp
->poll_threads
, hash_int(core_id
, 0));
3702 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
3704 return dp_netdev_pmd_try_ref(pmd
) ? pmd
: NULL
;
3707 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
3709 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
3710 OVS_REQUIRES(dp
->port_mutex
)
3712 struct dp_netdev_pmd_thread
*non_pmd
;
3714 non_pmd
= xzalloc(sizeof *non_pmd
);
3715 dp_netdev_configure_pmd(non_pmd
, dp
, NON_PMD_CORE_ID
, OVS_NUMA_UNSPEC
);
3718 /* Caller must have valid pointer to 'pmd'. */
3720 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
)
3722 return ovs_refcount_try_ref_rcu(&pmd
->ref_cnt
);
3726 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
)
3728 if (pmd
&& ovs_refcount_unref(&pmd
->ref_cnt
) == 1) {
3729 ovsrcu_postpone(dp_netdev_destroy_pmd
, pmd
);
3733 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
3734 * fails, keeps checking for next node until reaching the end of cmap.
3736 * Caller must unrefs the returned reference. */
3737 static struct dp_netdev_pmd_thread
*
3738 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
)
3740 struct dp_netdev_pmd_thread
*next
;
3743 struct cmap_node
*node
;
3745 node
= cmap_next_position(&dp
->poll_threads
, pos
);
3746 next
= node
? CONTAINER_OF(node
, struct dp_netdev_pmd_thread
, node
)
3748 } while (next
&& !dp_netdev_pmd_try_ref(next
));
3753 /* Configures the 'pmd' based on the input argument. */
3755 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
3756 unsigned core_id
, int numa_id
)
3759 pmd
->core_id
= core_id
;
3760 pmd
->numa_id
= numa_id
;
3761 pmd
->need_reload
= false;
3763 *CONST_CAST(int *, &pmd
->static_tx_qid
) = cmap_count(&dp
->poll_threads
);
3765 ovs_refcount_init(&pmd
->ref_cnt
);
3766 latch_init(&pmd
->exit_latch
);
3767 pmd
->reload_seq
= seq_create();
3768 pmd
->last_reload_seq
= seq_read(pmd
->reload_seq
);
3769 atomic_init(&pmd
->reload
, false);
3770 xpthread_cond_init(&pmd
->cond
, NULL
);
3771 ovs_mutex_init(&pmd
->cond_mutex
);
3772 ovs_mutex_init(&pmd
->flow_mutex
);
3773 ovs_mutex_init(&pmd
->port_mutex
);
3774 cmap_init(&pmd
->flow_table
);
3775 cmap_init(&pmd
->classifiers
);
3776 pmd
->next_optimization
= time_msec() + DPCLS_OPTIMIZATION_INTERVAL
;
3777 hmap_init(&pmd
->poll_list
);
3778 hmap_init(&pmd
->tx_ports
);
3779 hmap_init(&pmd
->tnl_port_cache
);
3780 hmap_init(&pmd
->send_port_cache
);
3781 /* init the 'flow_cache' since there is no
3782 * actual thread created for NON_PMD_CORE_ID. */
3783 if (core_id
== NON_PMD_CORE_ID
) {
3784 emc_cache_init(&pmd
->flow_cache
);
3786 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
3787 hash_int(core_id
, 0));
3791 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
)
3795 dp_netdev_pmd_flow_flush(pmd
);
3796 hmap_destroy(&pmd
->send_port_cache
);
3797 hmap_destroy(&pmd
->tnl_port_cache
);
3798 hmap_destroy(&pmd
->tx_ports
);
3799 hmap_destroy(&pmd
->poll_list
);
3800 /* All flows (including their dpcls_rules) have been deleted already */
3801 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
3803 ovsrcu_postpone(free
, cls
);
3805 cmap_destroy(&pmd
->classifiers
);
3806 cmap_destroy(&pmd
->flow_table
);
3807 ovs_mutex_destroy(&pmd
->flow_mutex
);
3808 latch_destroy(&pmd
->exit_latch
);
3809 seq_destroy(pmd
->reload_seq
);
3810 xpthread_cond_destroy(&pmd
->cond
);
3811 ovs_mutex_destroy(&pmd
->cond_mutex
);
3812 ovs_mutex_destroy(&pmd
->port_mutex
);
3816 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
3817 * and unrefs the struct. */
3819 dp_netdev_del_pmd(struct dp_netdev
*dp
, struct dp_netdev_pmd_thread
*pmd
)
3821 /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize,
3822 * but extra cleanup is necessary */
3823 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3824 ovs_mutex_lock(&dp
->non_pmd_mutex
);
3825 emc_cache_uninit(&pmd
->flow_cache
);
3826 pmd_free_cached_ports(pmd
);
3827 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
3829 latch_set(&pmd
->exit_latch
);
3830 dp_netdev_reload_pmd__(pmd
);
3831 xpthread_join(pmd
->thread
, NULL
);
3834 dp_netdev_pmd_clear_ports(pmd
);
3836 /* Purges the 'pmd''s flows after stopping the thread, but before
3837 * destroying the flows, so that the flow stats can be collected. */
3838 if (dp
->dp_purge_cb
) {
3839 dp
->dp_purge_cb(dp
->dp_purge_aux
, pmd
->core_id
);
3841 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
3842 dp_netdev_pmd_unref(pmd
);
3845 /* Destroys all pmd threads. If 'non_pmd' is true it also destroys the non pmd
3848 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
, bool non_pmd
)
3850 struct dp_netdev_pmd_thread
*pmd
;
3851 struct dp_netdev_pmd_thread
**pmd_list
;
3852 size_t k
= 0, n_pmds
;
3854 n_pmds
= cmap_count(&dp
->poll_threads
);
3855 pmd_list
= xcalloc(n_pmds
, sizeof *pmd_list
);
3857 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3858 if (!non_pmd
&& pmd
->core_id
== NON_PMD_CORE_ID
) {
3861 /* We cannot call dp_netdev_del_pmd(), since it alters
3862 * 'dp->poll_threads' (while we're iterating it) and it
3864 ovs_assert(k
< n_pmds
);
3865 pmd_list
[k
++] = pmd
;
3868 for (size_t i
= 0; i
< k
; i
++) {
3869 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
3874 /* Deletes all rx queues from pmd->poll_list and all the ports from
3877 dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
)
3879 struct rxq_poll
*poll
;
3880 struct tx_port
*port
;
3882 ovs_mutex_lock(&pmd
->port_mutex
);
3883 HMAP_FOR_EACH_POP (poll
, node
, &pmd
->poll_list
) {
3886 HMAP_FOR_EACH_POP (port
, node
, &pmd
->tx_ports
) {
3889 ovs_mutex_unlock(&pmd
->port_mutex
);
3892 /* Adds rx queue to poll_list of PMD thread, if it's not there already. */
3894 dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
3895 struct dp_netdev_rxq
*rxq
)
3896 OVS_REQUIRES(pmd
->port_mutex
)
3898 int qid
= netdev_rxq_get_queue_id(rxq
->rx
);
3899 uint32_t hash
= hash_2words(odp_to_u32(rxq
->port
->port_no
), qid
);
3900 struct rxq_poll
*poll
;
3902 HMAP_FOR_EACH_WITH_HASH (poll
, node
, hash
, &pmd
->poll_list
) {
3903 if (poll
->rxq
== rxq
) {
3904 /* 'rxq' is already polled by this thread. Do nothing. */
3909 poll
= xmalloc(sizeof *poll
);
3911 hmap_insert(&pmd
->poll_list
, &poll
->node
, hash
);
3913 pmd
->need_reload
= true;
3916 /* Delete 'poll' from poll_list of PMD thread. */
3918 dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
3919 struct rxq_poll
*poll
)
3920 OVS_REQUIRES(pmd
->port_mutex
)
3922 hmap_remove(&pmd
->poll_list
, &poll
->node
);
3925 pmd
->need_reload
= true;
3928 /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the
3929 * changes to take effect. */
3931 dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
3932 struct dp_netdev_port
*port
)
3933 OVS_REQUIRES(pmd
->port_mutex
)
3937 tx
= tx_port_lookup(&pmd
->tx_ports
, port
->port_no
);
3939 /* 'port' is already on this thread tx cache. Do nothing. */
3943 tx
= xzalloc(sizeof *tx
);
3948 hmap_insert(&pmd
->tx_ports
, &tx
->node
, hash_port_no(tx
->port
->port_no
));
3949 pmd
->need_reload
= true;
3952 /* Del 'tx' from the tx port cache of 'pmd', which must be reloaded for the
3953 * changes to take effect. */
3955 dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
3957 OVS_REQUIRES(pmd
->port_mutex
)
3959 hmap_remove(&pmd
->tx_ports
, &tx
->node
);
3961 pmd
->need_reload
= true;
3965 dpif_netdev_get_datapath_version(void)
3967 return xstrdup("<built-in>");
3971 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
, int cnt
, int size
,
3972 uint16_t tcp_flags
, long long now
)
3976 atomic_store_relaxed(&netdev_flow
->stats
.used
, now
);
3977 non_atomic_ullong_add(&netdev_flow
->stats
.packet_count
, cnt
);
3978 non_atomic_ullong_add(&netdev_flow
->stats
.byte_count
, size
);
3979 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
3981 atomic_store_relaxed(&netdev_flow
->stats
.tcp_flags
, flags
);
3985 dp_netdev_count_packet(struct dp_netdev_pmd_thread
*pmd
,
3986 enum dp_stat_type type
, int cnt
)
3988 non_atomic_ullong_add(&pmd
->stats
.n
[type
], cnt
);
3992 dp_netdev_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet_
,
3993 struct flow
*flow
, struct flow_wildcards
*wc
, ovs_u128
*ufid
,
3994 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
3995 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
3997 struct dp_netdev
*dp
= pmd
->dp
;
3999 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
4003 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
4004 struct ds ds
= DS_EMPTY_INITIALIZER
;
4007 struct odp_flow_key_parms odp_parms
= {
4009 .mask
= wc
? &wc
->masks
: NULL
,
4010 .support
= dp_netdev_support
,
4013 ofpbuf_init(&key
, 0);
4014 odp_flow_key_from_flow(&odp_parms
, &key
);
4015 packet_str
= ofp_packet_to_string(dp_packet_data(packet_
),
4016 dp_packet_size(packet_
));
4018 odp_flow_key_format(key
.data
, key
.size
, &ds
);
4020 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
4021 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
4023 ofpbuf_uninit(&key
);
4029 return dp
->upcall_cb(packet_
, flow
, ufid
, pmd
->core_id
, type
, userdata
,
4030 actions
, wc
, put_actions
, dp
->upcall_aux
);
4033 static inline uint32_t
4034 dpif_netdev_packet_get_rss_hash(struct dp_packet
*packet
,
4035 const struct miniflow
*mf
)
4037 uint32_t hash
, recirc_depth
;
4039 if (OVS_LIKELY(dp_packet_rss_valid(packet
))) {
4040 hash
= dp_packet_get_rss_hash(packet
);
4042 hash
= miniflow_hash_5tuple(mf
, 0);
4043 dp_packet_set_rss_hash(packet
, hash
);
4046 /* The RSS hash must account for the recirculation depth to avoid
4047 * collisions in the exact match cache */
4048 recirc_depth
= *recirc_depth_get_unsafe();
4049 if (OVS_UNLIKELY(recirc_depth
)) {
4050 hash
= hash_finish(hash
, recirc_depth
);
4051 dp_packet_set_rss_hash(packet
, hash
);
4056 struct packet_batch_per_flow
{
4057 unsigned int byte_count
;
4059 struct dp_netdev_flow
*flow
;
4061 struct dp_packet_batch array
;
4065 packet_batch_per_flow_update(struct packet_batch_per_flow
*batch
,
4066 struct dp_packet
*packet
,
4067 const struct miniflow
*mf
)
4069 batch
->byte_count
+= dp_packet_size(packet
);
4070 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
4071 batch
->array
.packets
[batch
->array
.count
++] = packet
;
4075 packet_batch_per_flow_init(struct packet_batch_per_flow
*batch
,
4076 struct dp_netdev_flow
*flow
)
4078 flow
->batch
= batch
;
4081 dp_packet_batch_init(&batch
->array
);
4082 batch
->byte_count
= 0;
4083 batch
->tcp_flags
= 0;
4087 packet_batch_per_flow_execute(struct packet_batch_per_flow
*batch
,
4088 struct dp_netdev_pmd_thread
*pmd
,
4091 struct dp_netdev_actions
*actions
;
4092 struct dp_netdev_flow
*flow
= batch
->flow
;
4094 dp_netdev_flow_used(flow
, batch
->array
.count
, batch
->byte_count
,
4095 batch
->tcp_flags
, now
);
4097 actions
= dp_netdev_flow_get_actions(flow
);
4099 dp_netdev_execute_actions(pmd
, &batch
->array
, true, &flow
->flow
,
4100 actions
->actions
, actions
->size
, now
);
4104 dp_netdev_queue_batches(struct dp_packet
*pkt
,
4105 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
4106 struct packet_batch_per_flow
*batches
, size_t *n_batches
)
4108 struct packet_batch_per_flow
*batch
= flow
->batch
;
4110 if (OVS_UNLIKELY(!batch
)) {
4111 batch
= &batches
[(*n_batches
)++];
4112 packet_batch_per_flow_init(batch
, flow
);
4115 packet_batch_per_flow_update(batch
, pkt
, mf
);
4118 /* Try to process all ('cnt') the 'packets' using only the exact match cache
4119 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
4120 * miniflow is copied into 'keys' and the packet pointer is moved at the
4121 * beginning of the 'packets' array.
4123 * The function returns the number of packets that needs to be processed in the
4124 * 'packets' array (they have been moved to the beginning of the vector).
4126 * If 'md_is_valid' is false, the metadata in 'packets' is not valid and must be
4127 * initialized by this function using 'port_no'.
4129 static inline size_t
4130 emc_processing(struct dp_netdev_pmd_thread
*pmd
,
4131 struct dp_packet_batch
*packets_
,
4132 struct netdev_flow_key
*keys
,
4133 struct packet_batch_per_flow batches
[], size_t *n_batches
,
4134 bool md_is_valid
, odp_port_t port_no
)
4136 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
4137 struct netdev_flow_key
*key
= &keys
[0];
4138 size_t n_missed
= 0, n_dropped
= 0;
4139 struct dp_packet
*packet
;
4140 const size_t size
= dp_packet_batch_size(packets_
);
4143 DP_PACKET_BATCH_REFILL_FOR_EACH (i
, size
, packet
, packets_
) {
4144 struct dp_netdev_flow
*flow
;
4146 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
4147 dp_packet_delete(packet
);
4152 if (i
!= size
- 1) {
4153 struct dp_packet
**packets
= packets_
->packets
;
4154 /* Prefetch next packet data and metadata. */
4155 OVS_PREFETCH(dp_packet_data(packets
[i
+1]));
4156 pkt_metadata_prefetch_init(&packets
[i
+1]->md
);
4160 pkt_metadata_init(&packet
->md
, port_no
);
4162 miniflow_extract(packet
, &key
->mf
);
4163 key
->len
= 0; /* Not computed yet. */
4164 key
->hash
= dpif_netdev_packet_get_rss_hash(packet
, &key
->mf
);
4166 flow
= emc_lookup(flow_cache
, key
);
4167 if (OVS_LIKELY(flow
)) {
4168 dp_netdev_queue_batches(packet
, flow
, &key
->mf
, batches
,
4171 /* Exact match cache missed. Group missed packets together at
4172 * the beginning of the 'packets' array. */
4173 dp_packet_batch_refill(packets_
, packet
, i
);
4174 /* 'key[n_missed]' contains the key of the current packet and it
4175 * must be returned to the caller. The next key should be extracted
4176 * to 'keys[n_missed + 1]'. */
4177 key
= &keys
[++n_missed
];
4181 dp_netdev_count_packet(pmd
, DP_STAT_EXACT_HIT
, size
- n_dropped
- n_missed
);
4183 return dp_packet_batch_size(packets_
);
4187 handle_packet_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet
,
4188 const struct netdev_flow_key
*key
,
4189 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
,
4190 int *lost_cnt
, long long now
)
4192 struct ofpbuf
*add_actions
;
4193 struct dp_packet_batch b
;
4198 match
.tun_md
.valid
= false;
4199 miniflow_expand(&key
->mf
, &match
.flow
);
4201 ofpbuf_clear(actions
);
4202 ofpbuf_clear(put_actions
);
4204 dpif_flow_hash(pmd
->dp
->dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
4205 error
= dp_netdev_upcall(pmd
, packet
, &match
.flow
, &match
.wc
,
4206 &ufid
, DPIF_UC_MISS
, NULL
, actions
,
4208 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
4209 dp_packet_delete(packet
);
4214 /* The Netlink encoding of datapath flow keys cannot express
4215 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
4216 * tag is interpreted as exact match on the fact that there is no
4217 * VLAN. Unless we refactor a lot of code that translates between
4218 * Netlink and struct flow representations, we have to do the same
4220 if (!match
.wc
.masks
.vlan_tci
) {
4221 match
.wc
.masks
.vlan_tci
= htons(0xffff);
4224 /* We can't allow the packet batching in the next loop to execute
4225 * the actions. Otherwise, if there are any slow path actions,
4226 * we'll send the packet up twice. */
4227 dp_packet_batch_init_packet(&b
, packet
);
4228 dp_netdev_execute_actions(pmd
, &b
, true, &match
.flow
,
4229 actions
->data
, actions
->size
, now
);
4231 add_actions
= put_actions
->size
? put_actions
: actions
;
4232 if (OVS_LIKELY(error
!= ENOSPC
)) {
4233 struct dp_netdev_flow
*netdev_flow
;
4235 /* XXX: There's a race window where a flow covering this packet
4236 * could have already been installed since we last did the flow
4237 * lookup before upcall. This could be solved by moving the
4238 * mutex lock outside the loop, but that's an awful long time
4239 * to be locking everyone out of making flow installs. If we
4240 * move to a per-core classifier, it would be reasonable. */
4241 ovs_mutex_lock(&pmd
->flow_mutex
);
4242 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
, NULL
);
4243 if (OVS_LIKELY(!netdev_flow
)) {
4244 netdev_flow
= dp_netdev_flow_add(pmd
, &match
, &ufid
,
4248 ovs_mutex_unlock(&pmd
->flow_mutex
);
4249 emc_probabilistic_insert(pmd
, key
, netdev_flow
);
4254 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
4255 struct dp_packet_batch
*packets_
,
4256 struct netdev_flow_key
*keys
,
4257 struct packet_batch_per_flow batches
[], size_t *n_batches
,
4261 int cnt
= packets_
->count
;
4262 #if !defined(__CHECKER__) && !defined(_WIN32)
4263 const size_t PKT_ARRAY_SIZE
= cnt
;
4265 /* Sparse or MSVC doesn't like variable length array. */
4266 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
4268 struct dp_packet
**packets
= packets_
->packets
;
4270 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
4271 struct dp_netdev
*dp
= pmd
->dp
;
4272 int miss_cnt
= 0, lost_cnt
= 0;
4273 int lookup_cnt
= 0, add_lookup_cnt
;
4277 for (i
= 0; i
< cnt
; i
++) {
4278 /* Key length is needed in all the cases, hash computed on demand. */
4279 keys
[i
].len
= netdev_flow_key_size(miniflow_n_values(&keys
[i
].mf
));
4281 /* Get the classifier for the in_port */
4282 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
4283 if (OVS_LIKELY(cls
)) {
4284 any_miss
= !dpcls_lookup(cls
, keys
, rules
, cnt
, &lookup_cnt
);
4287 memset(rules
, 0, sizeof(rules
));
4289 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
4290 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
4291 struct ofpbuf actions
, put_actions
;
4293 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
4294 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
4296 for (i
= 0; i
< cnt
; i
++) {
4297 struct dp_netdev_flow
*netdev_flow
;
4299 if (OVS_LIKELY(rules
[i
])) {
4303 /* It's possible that an earlier slow path execution installed
4304 * a rule covering this flow. In this case, it's a lot cheaper
4305 * to catch it here than execute a miss. */
4306 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
],
4309 lookup_cnt
+= add_lookup_cnt
;
4310 rules
[i
] = &netdev_flow
->cr
;
4315 handle_packet_upcall(pmd
, packets
[i
], &keys
[i
], &actions
,
4316 &put_actions
, &lost_cnt
, now
);
4319 ofpbuf_uninit(&actions
);
4320 ofpbuf_uninit(&put_actions
);
4321 fat_rwlock_unlock(&dp
->upcall_rwlock
);
4322 } else if (OVS_UNLIKELY(any_miss
)) {
4323 for (i
= 0; i
< cnt
; i
++) {
4324 if (OVS_UNLIKELY(!rules
[i
])) {
4325 dp_packet_delete(packets
[i
]);
4332 for (i
= 0; i
< cnt
; i
++) {
4333 struct dp_packet
*packet
= packets
[i
];
4334 struct dp_netdev_flow
*flow
;
4336 if (OVS_UNLIKELY(!rules
[i
])) {
4340 flow
= dp_netdev_flow_cast(rules
[i
]);
4342 emc_probabilistic_insert(pmd
, &keys
[i
], flow
);
4343 dp_netdev_queue_batches(packet
, flow
, &keys
[i
].mf
, batches
, n_batches
);
4346 dp_netdev_count_packet(pmd
, DP_STAT_MASKED_HIT
, cnt
- miss_cnt
);
4347 dp_netdev_count_packet(pmd
, DP_STAT_LOOKUP_HIT
, lookup_cnt
);
4348 dp_netdev_count_packet(pmd
, DP_STAT_MISS
, miss_cnt
);
4349 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
4352 /* Packets enter the datapath from a port (or from recirculation) here.
4354 * For performance reasons a caller may choose not to initialize the metadata
4355 * in 'packets': in this case 'mdinit' is false and this function needs to
4356 * initialize it using 'port_no'. If the metadata in 'packets' is already
4357 * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */
4359 dp_netdev_input__(struct dp_netdev_pmd_thread
*pmd
,
4360 struct dp_packet_batch
*packets
,
4361 bool md_is_valid
, odp_port_t port_no
)
4363 int cnt
= packets
->count
;
4364 #if !defined(__CHECKER__) && !defined(_WIN32)
4365 const size_t PKT_ARRAY_SIZE
= cnt
;
4367 /* Sparse or MSVC doesn't like variable length array. */
4368 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
4370 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
) struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
4371 struct packet_batch_per_flow batches
[PKT_ARRAY_SIZE
];
4372 long long now
= time_msec();
4377 emc_processing(pmd
, packets
, keys
, batches
, &n_batches
,
4378 md_is_valid
, port_no
);
4379 if (!dp_packet_batch_is_empty(packets
)) {
4380 /* Get ingress port from first packet's metadata. */
4381 in_port
= packets
->packets
[0]->md
.in_port
.odp_port
;
4382 fast_path_processing(pmd
, packets
, keys
, batches
, &n_batches
, in_port
, now
);
4385 /* All the flow batches need to be reset before any call to
4386 * packet_batch_per_flow_execute() as it could potentially trigger
4387 * recirculation. When a packet matching flow ‘j’ happens to be
4388 * recirculated, the nested call to dp_netdev_input__() could potentially
4389 * classify the packet as matching another flow - say 'k'. It could happen
4390 * that in the previous call to dp_netdev_input__() that same flow 'k' had
4391 * already its own batches[k] still waiting to be served. So if its
4392 * ‘batch’ member is not reset, the recirculated packet would be wrongly
4393 * appended to batches[k] of the 1st call to dp_netdev_input__(). */
4395 for (i
= 0; i
< n_batches
; i
++) {
4396 batches
[i
].flow
->batch
= NULL
;
4399 for (i
= 0; i
< n_batches
; i
++) {
4400 packet_batch_per_flow_execute(&batches
[i
], pmd
, now
);
4405 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
4406 struct dp_packet_batch
*packets
,
4409 dp_netdev_input__(pmd
, packets
, false, port_no
);
4413 dp_netdev_recirculate(struct dp_netdev_pmd_thread
*pmd
,
4414 struct dp_packet_batch
*packets
)
4416 dp_netdev_input__(pmd
, packets
, true, 0);
4419 struct dp_netdev_execute_aux
{
4420 struct dp_netdev_pmd_thread
*pmd
;
4422 const struct flow
*flow
;
4426 dpif_netdev_register_dp_purge_cb(struct dpif
*dpif
, dp_purge_callback
*cb
,
4429 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4430 dp
->dp_purge_aux
= aux
;
4431 dp
->dp_purge_cb
= cb
;
4435 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
4438 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4439 dp
->upcall_aux
= aux
;
4444 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
4445 long long now
, bool purge
)
4448 struct dp_netdev_port
*port
;
4451 HMAP_FOR_EACH (tx
, node
, &pmd
->send_port_cache
) {
4452 if (!tx
->port
->dynamic_txqs
) {
4455 interval
= now
- tx
->last_used
;
4456 if (tx
->qid
>= 0 && (purge
|| interval
>= XPS_TIMEOUT_MS
)) {
4458 ovs_mutex_lock(&port
->txq_used_mutex
);
4459 port
->txq_used
[tx
->qid
]--;
4460 ovs_mutex_unlock(&port
->txq_used_mutex
);
4467 dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
4468 struct tx_port
*tx
, long long now
)
4470 struct dp_netdev_port
*port
;
4472 int i
, min_cnt
, min_qid
;
4474 if (OVS_UNLIKELY(!now
)) {
4478 interval
= now
- tx
->last_used
;
4479 tx
->last_used
= now
;
4481 if (OVS_LIKELY(tx
->qid
>= 0 && interval
< XPS_TIMEOUT_MS
)) {
4487 ovs_mutex_lock(&port
->txq_used_mutex
);
4489 port
->txq_used
[tx
->qid
]--;
4495 for (i
= 0; i
< netdev_n_txq(port
->netdev
); i
++) {
4496 if (port
->txq_used
[i
] < min_cnt
|| min_cnt
== -1) {
4497 min_cnt
= port
->txq_used
[i
];
4502 port
->txq_used
[min_qid
]++;
4505 ovs_mutex_unlock(&port
->txq_used_mutex
);
4507 dpif_netdev_xps_revalidate_pmd(pmd
, now
, false);
4509 VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
4510 pmd
->core_id
, tx
->qid
, netdev_get_name(tx
->port
->netdev
));
4514 static struct tx_port
*
4515 pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
4518 return tx_port_lookup(&pmd
->tnl_port_cache
, port_no
);
4521 static struct tx_port
*
4522 pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
4525 return tx_port_lookup(&pmd
->send_port_cache
, port_no
);
4529 push_tnl_action(const struct dp_netdev_pmd_thread
*pmd
,
4530 const struct nlattr
*attr
,
4531 struct dp_packet_batch
*batch
)
4533 struct tx_port
*tun_port
;
4534 const struct ovs_action_push_tnl
*data
;
4537 data
= nl_attr_get(attr
);
4539 tun_port
= pmd_tnl_port_cache_lookup(pmd
, u32_to_odp(data
->tnl_port
));
4544 err
= netdev_push_header(tun_port
->port
->netdev
, batch
, data
);
4549 dp_packet_delete_batch(batch
, true);
4554 dp_execute_userspace_action(struct dp_netdev_pmd_thread
*pmd
,
4555 struct dp_packet
*packet
, bool may_steal
,
4556 struct flow
*flow
, ovs_u128
*ufid
,
4557 struct ofpbuf
*actions
,
4558 const struct nlattr
*userdata
, long long now
)
4560 struct dp_packet_batch b
;
4563 ofpbuf_clear(actions
);
4565 error
= dp_netdev_upcall(pmd
, packet
, flow
, NULL
, ufid
,
4566 DPIF_UC_ACTION
, userdata
, actions
,
4568 if (!error
|| error
== ENOSPC
) {
4569 dp_packet_batch_init_packet(&b
, packet
);
4570 dp_netdev_execute_actions(pmd
, &b
, may_steal
, flow
,
4571 actions
->data
, actions
->size
, now
);
4572 } else if (may_steal
) {
4573 dp_packet_delete(packet
);
4578 dp_execute_cb(void *aux_
, struct dp_packet_batch
*packets_
,
4579 const struct nlattr
*a
, bool may_steal
)
4581 struct dp_netdev_execute_aux
*aux
= aux_
;
4582 uint32_t *depth
= recirc_depth_get();
4583 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
4584 struct dp_netdev
*dp
= pmd
->dp
;
4585 int type
= nl_attr_type(a
);
4586 long long now
= aux
->now
;
4589 switch ((enum ovs_action_attr
)type
) {
4590 case OVS_ACTION_ATTR_OUTPUT
:
4591 p
= pmd_send_port_cache_lookup(pmd
, nl_attr_get_odp_port(a
));
4592 if (OVS_LIKELY(p
)) {
4596 dynamic_txqs
= p
->port
->dynamic_txqs
;
4598 tx_qid
= dpif_netdev_xps_get_tx_qid(pmd
, p
, now
);
4600 tx_qid
= pmd
->static_tx_qid
;
4603 netdev_send(p
->port
->netdev
, tx_qid
, packets_
, may_steal
,
4609 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
4610 if (*depth
< MAX_RECIRC_DEPTH
) {
4611 struct dp_packet_batch tnl_pkt
;
4612 struct dp_packet_batch
*orig_packets_
= packets_
;
4616 dp_packet_batch_clone(&tnl_pkt
, packets_
);
4617 packets_
= &tnl_pkt
;
4618 dp_packet_batch_reset_cutlen(orig_packets_
);
4621 dp_packet_batch_apply_cutlen(packets_
);
4623 err
= push_tnl_action(pmd
, a
, packets_
);
4626 dp_netdev_recirculate(pmd
, packets_
);
4633 case OVS_ACTION_ATTR_TUNNEL_POP
:
4634 if (*depth
< MAX_RECIRC_DEPTH
) {
4635 struct dp_packet_batch
*orig_packets_
= packets_
;
4636 odp_port_t portno
= nl_attr_get_odp_port(a
);
4638 p
= pmd_tnl_port_cache_lookup(pmd
, portno
);
4640 struct dp_packet_batch tnl_pkt
;
4643 dp_packet_batch_clone(&tnl_pkt
, packets_
);
4644 packets_
= &tnl_pkt
;
4645 dp_packet_batch_reset_cutlen(orig_packets_
);
4648 dp_packet_batch_apply_cutlen(packets_
);
4650 netdev_pop_header(p
->port
->netdev
, packets_
);
4651 if (dp_packet_batch_is_empty(packets_
)) {
4655 struct dp_packet
*packet
;
4656 DP_PACKET_BATCH_FOR_EACH (packet
, packets_
) {
4657 packet
->md
.in_port
.odp_port
= portno
;
4661 dp_netdev_recirculate(pmd
, packets_
);
4668 case OVS_ACTION_ATTR_USERSPACE
:
4669 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
4670 struct dp_packet_batch
*orig_packets_
= packets_
;
4671 const struct nlattr
*userdata
;
4672 struct dp_packet_batch usr_pkt
;
4673 struct ofpbuf actions
;
4678 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
4679 ofpbuf_init(&actions
, 0);
4681 if (packets_
->trunc
) {
4683 dp_packet_batch_clone(&usr_pkt
, packets_
);
4684 packets_
= &usr_pkt
;
4686 dp_packet_batch_reset_cutlen(orig_packets_
);
4689 dp_packet_batch_apply_cutlen(packets_
);
4692 struct dp_packet
*packet
;
4693 DP_PACKET_BATCH_FOR_EACH (packet
, packets_
) {
4694 flow_extract(packet
, &flow
);
4695 dpif_flow_hash(dp
->dpif
, &flow
, sizeof flow
, &ufid
);
4696 dp_execute_userspace_action(pmd
, packet
, may_steal
, &flow
,
4697 &ufid
, &actions
, userdata
, now
);
4701 dp_packet_delete_batch(packets_
, true);
4704 ofpbuf_uninit(&actions
);
4705 fat_rwlock_unlock(&dp
->upcall_rwlock
);
4711 case OVS_ACTION_ATTR_RECIRC
:
4712 if (*depth
< MAX_RECIRC_DEPTH
) {
4713 struct dp_packet_batch recirc_pkts
;
4716 dp_packet_batch_clone(&recirc_pkts
, packets_
);
4717 packets_
= &recirc_pkts
;
4720 struct dp_packet
*packet
;
4721 DP_PACKET_BATCH_FOR_EACH (packet
, packets_
) {
4722 packet
->md
.recirc_id
= nl_attr_get_u32(a
);
4726 dp_netdev_recirculate(pmd
, packets_
);
4732 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
4735 case OVS_ACTION_ATTR_CT
: {
4736 const struct nlattr
*b
;
4737 bool commit
= false;
4740 const char *helper
= NULL
;
4741 const uint32_t *setmark
= NULL
;
4742 const struct ovs_key_ct_labels
*setlabel
= NULL
;
4744 NL_ATTR_FOR_EACH_UNSAFE (b
, left
, nl_attr_get(a
),
4745 nl_attr_get_size(a
)) {
4746 enum ovs_ct_attr sub_type
= nl_attr_type(b
);
4749 case OVS_CT_ATTR_COMMIT
:
4752 case OVS_CT_ATTR_ZONE
:
4753 zone
= nl_attr_get_u16(b
);
4755 case OVS_CT_ATTR_HELPER
:
4756 helper
= nl_attr_get_string(b
);
4758 case OVS_CT_ATTR_MARK
:
4759 setmark
= nl_attr_get(b
);
4761 case OVS_CT_ATTR_LABELS
:
4762 setlabel
= nl_attr_get(b
);
4764 case OVS_CT_ATTR_NAT
:
4765 case OVS_CT_ATTR_UNSPEC
:
4766 case __OVS_CT_ATTR_MAX
:
4771 conntrack_execute(&dp
->conntrack
, packets_
, aux
->flow
->dl_type
, commit
,
4772 zone
, setmark
, setlabel
, helper
);
4776 case OVS_ACTION_ATTR_PUSH_VLAN
:
4777 case OVS_ACTION_ATTR_POP_VLAN
:
4778 case OVS_ACTION_ATTR_PUSH_MPLS
:
4779 case OVS_ACTION_ATTR_POP_MPLS
:
4780 case OVS_ACTION_ATTR_SET
:
4781 case OVS_ACTION_ATTR_SET_MASKED
:
4782 case OVS_ACTION_ATTR_SAMPLE
:
4783 case OVS_ACTION_ATTR_HASH
:
4784 case OVS_ACTION_ATTR_UNSPEC
:
4785 case OVS_ACTION_ATTR_TRUNC
:
4786 case OVS_ACTION_ATTR_CLONE
:
4787 case __OVS_ACTION_ATTR_MAX
:
4791 dp_packet_delete_batch(packets_
, may_steal
);
4795 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
4796 struct dp_packet_batch
*packets
,
4797 bool may_steal
, const struct flow
*flow
,
4798 const struct nlattr
*actions
, size_t actions_len
,
4801 struct dp_netdev_execute_aux aux
= { pmd
, now
, flow
};
4803 odp_execute_actions(&aux
, packets
, may_steal
, actions
,
4804 actions_len
, dp_execute_cb
);
4807 struct dp_netdev_ct_dump
{
4808 struct ct_dpif_dump_state up
;
4809 struct conntrack_dump dump
;
4810 struct conntrack
*ct
;
4811 struct dp_netdev
*dp
;
4815 dpif_netdev_ct_dump_start(struct dpif
*dpif
, struct ct_dpif_dump_state
**dump_
,
4816 const uint16_t *pzone
)
4818 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4819 struct dp_netdev_ct_dump
*dump
;
4821 dump
= xzalloc(sizeof *dump
);
4823 dump
->ct
= &dp
->conntrack
;
4825 conntrack_dump_start(&dp
->conntrack
, &dump
->dump
, pzone
);
4833 dpif_netdev_ct_dump_next(struct dpif
*dpif OVS_UNUSED
,
4834 struct ct_dpif_dump_state
*dump_
,
4835 struct ct_dpif_entry
*entry
)
4837 struct dp_netdev_ct_dump
*dump
;
4839 INIT_CONTAINER(dump
, dump_
, up
);
4841 return conntrack_dump_next(&dump
->dump
, entry
);
4845 dpif_netdev_ct_dump_done(struct dpif
*dpif OVS_UNUSED
,
4846 struct ct_dpif_dump_state
*dump_
)
4848 struct dp_netdev_ct_dump
*dump
;
4851 INIT_CONTAINER(dump
, dump_
, up
);
4853 err
= conntrack_dump_done(&dump
->dump
);
4861 dpif_netdev_ct_flush(struct dpif
*dpif
, const uint16_t *zone
)
4863 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4865 return conntrack_flush(&dp
->conntrack
, zone
);
4868 const struct dpif_class dpif_netdev_class
= {
4871 dpif_netdev_enumerate
,
4872 dpif_netdev_port_open_type
,
4875 dpif_netdev_destroy
,
4878 dpif_netdev_get_stats
,
4879 dpif_netdev_port_add
,
4880 dpif_netdev_port_del
,
4881 dpif_netdev_port_set_config
,
4882 dpif_netdev_port_query_by_number
,
4883 dpif_netdev_port_query_by_name
,
4884 NULL
, /* port_get_pid */
4885 dpif_netdev_port_dump_start
,
4886 dpif_netdev_port_dump_next
,
4887 dpif_netdev_port_dump_done
,
4888 dpif_netdev_port_poll
,
4889 dpif_netdev_port_poll_wait
,
4890 dpif_netdev_flow_flush
,
4891 dpif_netdev_flow_dump_create
,
4892 dpif_netdev_flow_dump_destroy
,
4893 dpif_netdev_flow_dump_thread_create
,
4894 dpif_netdev_flow_dump_thread_destroy
,
4895 dpif_netdev_flow_dump_next
,
4896 dpif_netdev_operate
,
4897 NULL
, /* recv_set */
4898 NULL
, /* handlers_set */
4899 dpif_netdev_set_config
,
4900 dpif_netdev_queue_to_priority
,
4902 NULL
, /* recv_wait */
4903 NULL
, /* recv_purge */
4904 dpif_netdev_register_dp_purge_cb
,
4905 dpif_netdev_register_upcall_cb
,
4906 dpif_netdev_enable_upcall
,
4907 dpif_netdev_disable_upcall
,
4908 dpif_netdev_get_datapath_version
,
4909 dpif_netdev_ct_dump_start
,
4910 dpif_netdev_ct_dump_next
,
4911 dpif_netdev_ct_dump_done
,
4912 dpif_netdev_ct_flush
,
4916 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
4917 const char *argv
[], void *aux OVS_UNUSED
)
4919 struct dp_netdev_port
*port
;
4920 struct dp_netdev
*dp
;
4923 ovs_mutex_lock(&dp_netdev_mutex
);
4924 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
4925 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
4926 ovs_mutex_unlock(&dp_netdev_mutex
);
4927 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
4930 ovs_refcount_ref(&dp
->ref_cnt
);
4931 ovs_mutex_unlock(&dp_netdev_mutex
);
4933 ovs_mutex_lock(&dp
->port_mutex
);
4934 if (get_port_by_name(dp
, argv
[2], &port
)) {
4935 unixctl_command_reply_error(conn
, "unknown port");
4939 port_no
= u32_to_odp(atoi(argv
[3]));
4940 if (!port_no
|| port_no
== ODPP_NONE
) {
4941 unixctl_command_reply_error(conn
, "bad port number");
4944 if (dp_netdev_lookup_port(dp
, port_no
)) {
4945 unixctl_command_reply_error(conn
, "port number already in use");
4950 hmap_remove(&dp
->ports
, &port
->node
);
4951 reconfigure_datapath(dp
);
4953 /* Reinsert with new port number. */
4954 port
->port_no
= port_no
;
4955 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
4956 reconfigure_datapath(dp
);
4958 seq_change(dp
->port_seq
);
4959 unixctl_command_reply(conn
, NULL
);
4962 ovs_mutex_unlock(&dp
->port_mutex
);
4963 dp_netdev_unref(dp
);
4967 dpif_dummy_register__(const char *type
)
4969 struct dpif_class
*class;
4971 class = xmalloc(sizeof *class);
4972 *class = dpif_netdev_class
;
4973 class->type
= xstrdup(type
);
4974 dp_register_provider(class);
4978 dpif_dummy_override(const char *type
)
4983 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
4984 * a userland-only build. It's useful for testsuite.
4986 error
= dp_unregister_provider(type
);
4987 if (error
== 0 || error
== EAFNOSUPPORT
) {
4988 dpif_dummy_register__(type
);
4993 dpif_dummy_register(enum dummy_level level
)
4995 if (level
== DUMMY_OVERRIDE_ALL
) {
5000 dp_enumerate_types(&types
);
5001 SSET_FOR_EACH (type
, &types
) {
5002 dpif_dummy_override(type
);
5004 sset_destroy(&types
);
5005 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
5006 dpif_dummy_override("system");
5009 dpif_dummy_register__("dummy");
5011 unixctl_command_register("dpif-dummy/change-port-number",
5012 "dp port new-number",
5013 3, 3, dpif_dummy_change_port_number
, NULL
);
5016 /* Datapath Classifier. */
5018 /* A set of rules that all have the same fields wildcarded. */
5019 struct dpcls_subtable
{
5020 /* The fields are only used by writers. */
5021 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
5023 /* These fields are accessed by readers. */
5024 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
5025 uint32_t hit_cnt
; /* Number of match hits in subtable in current
5026 optimization interval. */
5027 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
5028 /* 'mask' must be the last field, additional space is allocated here. */
5031 /* Initializes 'cls' as a classifier that initially contains no classification
5034 dpcls_init(struct dpcls
*cls
)
5036 cmap_init(&cls
->subtables_map
);
5037 pvector_init(&cls
->subtables
);
5041 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
5043 VLOG_DBG("Destroying subtable %p for in_port %d", subtable
, cls
->in_port
);
5044 pvector_remove(&cls
->subtables
, subtable
);
5045 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
5046 subtable
->mask
.hash
);
5047 cmap_destroy(&subtable
->rules
);
5048 ovsrcu_postpone(free
, subtable
);
5051 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
5052 * caller's responsibility.
5053 * May only be called after all the readers have been terminated. */
5055 dpcls_destroy(struct dpcls
*cls
)
5058 struct dpcls_subtable
*subtable
;
5060 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
5061 ovs_assert(cmap_count(&subtable
->rules
) == 0);
5062 dpcls_destroy_subtable(cls
, subtable
);
5064 cmap_destroy(&cls
->subtables_map
);
5065 pvector_destroy(&cls
->subtables
);
5069 static struct dpcls_subtable
*
5070 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
5072 struct dpcls_subtable
*subtable
;
5074 /* Need to add one. */
5075 subtable
= xmalloc(sizeof *subtable
5076 - sizeof subtable
->mask
.mf
+ mask
->len
);
5077 cmap_init(&subtable
->rules
);
5078 subtable
->hit_cnt
= 0;
5079 netdev_flow_key_clone(&subtable
->mask
, mask
);
5080 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
5081 /* Add the new subtable at the end of the pvector (with no hits yet) */
5082 pvector_insert(&cls
->subtables
, subtable
, 0);
5083 VLOG_DBG("Creating %"PRIuSIZE
". subtable %p for in_port %d",
5084 cmap_count(&cls
->subtables_map
), subtable
, cls
->in_port
);
5085 pvector_publish(&cls
->subtables
);
5090 static inline struct dpcls_subtable
*
5091 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
5093 struct dpcls_subtable
*subtable
;
5095 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
5096 &cls
->subtables_map
) {
5097 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
5101 return dpcls_create_subtable(cls
, mask
);
5105 /* Periodically sort the dpcls subtable vectors according to hit counts */
5107 dpcls_sort_subtable_vector(struct dpcls
*cls
)
5109 struct pvector
*pvec
= &cls
->subtables
;
5110 struct dpcls_subtable
*subtable
;
5112 PVECTOR_FOR_EACH (subtable
, pvec
) {
5113 pvector_change_priority(pvec
, subtable
, subtable
->hit_cnt
);
5114 subtable
->hit_cnt
= 0;
5116 pvector_publish(pvec
);
5120 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
)
5123 long long int now
= time_msec();
5125 if (now
> pmd
->next_optimization
) {
5126 /* Try to obtain the flow lock to block out revalidator threads.
5127 * If not possible, just try next time. */
5128 if (!ovs_mutex_trylock(&pmd
->flow_mutex
)) {
5129 /* Optimize each classifier */
5130 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
5131 dpcls_sort_subtable_vector(cls
);
5133 ovs_mutex_unlock(&pmd
->flow_mutex
);
5134 /* Start new measuring interval */
5135 pmd
->next_optimization
= now
+ DPCLS_OPTIMIZATION_INTERVAL
;
5140 /* Insert 'rule' into 'cls'. */
5142 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
5143 const struct netdev_flow_key
*mask
)
5145 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
5147 /* Refer to subtable's mask, also for later removal. */
5148 rule
->mask
= &subtable
->mask
;
5149 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
5152 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
5154 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
5156 struct dpcls_subtable
*subtable
;
5158 ovs_assert(rule
->mask
);
5160 /* Get subtable from reference in rule->mask. */
5161 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
5162 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
5164 /* Delete empty subtable. */
5165 dpcls_destroy_subtable(cls
, subtable
);
5166 pvector_publish(&cls
->subtables
);
5170 /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
5171 * in 'mask' the values in 'key' and 'target' are the same. */
5173 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
5174 const struct netdev_flow_key
*target
)
5176 const uint64_t *keyp
= miniflow_get_values(&rule
->flow
.mf
);
5177 const uint64_t *maskp
= miniflow_get_values(&rule
->mask
->mf
);
5180 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, target
, rule
->flow
.mf
.map
) {
5181 if (OVS_UNLIKELY((value
& *maskp
++) != *keyp
++)) {
5188 /* For each miniflow in 'keys' performs a classifier lookup writing the result
5189 * into the corresponding slot in 'rules'. If a particular entry in 'keys' is
5190 * NULL it is skipped.
5192 * This function is optimized for use in the userspace datapath and therefore
5193 * does not implement a lot of features available in the standard
5194 * classifier_lookup() function. Specifically, it does not implement
5195 * priorities, instead returning any rule which matches the flow.
5197 * Returns true if all miniflows found a corresponding rule. */
5199 dpcls_lookup(struct dpcls
*cls
, const struct netdev_flow_key keys
[],
5200 struct dpcls_rule
**rules
, const size_t cnt
,
5203 /* The received 'cnt' miniflows are the search-keys that will be processed
5204 * to find a matching entry into the available subtables.
5205 * The number of bits in map_type is equal to NETDEV_MAX_BURST. */
5206 typedef uint32_t map_type
;
5207 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
5208 BUILD_ASSERT_DECL(MAP_BITS
>= NETDEV_MAX_BURST
);
5210 struct dpcls_subtable
*subtable
;
5212 map_type keys_map
= TYPE_MAXIMUM(map_type
); /* Set all bits. */
5214 uint32_t hashes
[MAP_BITS
];
5215 const struct cmap_node
*nodes
[MAP_BITS
];
5217 if (cnt
!= MAP_BITS
) {
5218 keys_map
>>= MAP_BITS
- cnt
; /* Clear extra bits. */
5220 memset(rules
, 0, cnt
* sizeof *rules
);
5222 int lookups_match
= 0, subtable_pos
= 1;
5224 /* The Datapath classifier - aka dpcls - is composed of subtables.
5225 * Subtables are dynamically created as needed when new rules are inserted.
5226 * Each subtable collects rules with matches on a specific subset of packet
5227 * fields as defined by the subtable's mask. We proceed to process every
5228 * search-key against each subtable, but when a match is found for a
5229 * search-key, the search for that key can stop because the rules are
5230 * non-overlapping. */
5231 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
5234 /* Compute hashes for the remaining keys. Each search-key is
5235 * masked with the subtable's mask to avoid hashing the wildcarded
5237 ULLONG_FOR_EACH_1(i
, keys_map
) {
5238 hashes
[i
] = netdev_flow_key_hash_in_mask(&keys
[i
],
5242 found_map
= cmap_find_batch(&subtable
->rules
, keys_map
, hashes
, nodes
);
5243 /* Check results. When the i-th bit of found_map is set, it means
5244 * that a set of nodes with a matching hash value was found for the
5245 * i-th search-key. Due to possible hash collisions we need to check
5246 * which of the found rules, if any, really matches our masked
5248 ULLONG_FOR_EACH_1(i
, found_map
) {
5249 struct dpcls_rule
*rule
;
5251 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
5252 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, &keys
[i
]))) {
5254 /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap
5255 * within one second optimization interval. */
5256 subtable
->hit_cnt
++;
5257 lookups_match
+= subtable_pos
;
5261 /* None of the found rules was a match. Reset the i-th bit to
5262 * keep searching this key in the next subtable. */
5263 ULLONG_SET0(found_map
, i
); /* Did not match. */
5265 ; /* Keep Sparse happy. */
5267 keys_map
&= ~found_map
; /* Clear the found rules. */
5269 if (num_lookups_p
) {
5270 *num_lookups_p
= lookups_match
;
5272 return true; /* All found. */
5276 if (num_lookups_p
) {
5277 *num_lookups_p
= lookups_match
;
5279 return false; /* Some misses. */