2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
25 #include <netinet/in.h>
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
35 #include <rte_cycles.h>
40 #include "conntrack.h"
44 #include "dp-packet.h"
46 #include "dpif-provider.h"
48 #include "fat-rwlock.h"
53 #include "netdev-vport.h"
55 #include "odp-execute.h"
57 #include "openvswitch/dynamic-string.h"
58 #include "openvswitch/list.h"
59 #include "openvswitch/match.h"
60 #include "openvswitch/ofp-print.h"
61 #include "openvswitch/ofp-util.h"
62 #include "openvswitch/ofpbuf.h"
63 #include "openvswitch/shash.h"
64 #include "openvswitch/vlog.h"
68 #include "poll-loop.h"
75 #include "tnl-neigh-cache.h"
76 #include "tnl-ports.h"
80 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
82 #define FLOW_DUMP_MAX_BATCH 50
83 /* Use per thread recirc_depth to prevent recirculation loop. */
84 #define MAX_RECIRC_DEPTH 5
85 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
87 /* Configuration parameters. */
88 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
90 /* Protects against changes to 'dp_netdevs'. */
91 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
93 /* Contains all 'struct dp_netdev's. */
94 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
95 = SHASH_INITIALIZER(&dp_netdevs
);
97 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
99 #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
100 | CS_INVALID | CS_REPLY_DIR | CS_TRACKED)
101 #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
103 static struct odp_support dp_netdev_support
= {
104 .max_mpls_depth
= SIZE_MAX
,
112 /* Stores a miniflow with inline values */
114 struct netdev_flow_key
{
115 uint32_t hash
; /* Hash function differs for different users. */
116 uint32_t len
; /* Length of the following miniflow (incl. map). */
118 uint64_t buf
[FLOW_MAX_PACKET_U64S
];
121 /* Exact match cache for frequently used flows
123 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
124 * search its entries for a miniflow that matches exactly the miniflow of the
125 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
127 * A cache entry holds a reference to its 'dp_netdev_flow'.
129 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
130 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
131 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
132 * value is the index of a cache entry where the miniflow could be.
138 * Each pmd_thread has its own private exact match cache.
139 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
142 #define EM_FLOW_HASH_SHIFT 13
143 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
144 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
145 #define EM_FLOW_HASH_SEGS 2
148 struct dp_netdev_flow
*flow
;
149 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
153 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
154 int sweep_idx
; /* For emc_cache_slow_sweep(). */
157 /* Iterate in the exact match cache through every entry that might contain a
158 * miniflow with hash 'HASH'. */
159 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
160 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
161 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
162 i__ < EM_FLOW_HASH_SEGS; \
163 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
165 /* Simple non-wildcarding single-priority classifier. */
167 /* Time in ms between successive optimizations of the dpcls subtable vector */
168 #define DPCLS_OPTIMIZATION_INTERVAL 1000
171 struct cmap_node node
; /* Within dp_netdev_pmd_thread.classifiers */
173 struct cmap subtables_map
;
174 struct pvector subtables
;
177 /* A rule to be inserted to the classifier. */
179 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
180 struct netdev_flow_key
*mask
; /* Subtable's mask. */
181 struct netdev_flow_key flow
; /* Matching key. */
182 /* 'flow' must be the last field, additional space is allocated here. */
185 static void dpcls_init(struct dpcls
*);
186 static void dpcls_destroy(struct dpcls
*);
187 static void dpcls_sort_subtable_vector(struct dpcls
*);
188 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
189 const struct netdev_flow_key
*mask
);
190 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
191 static bool dpcls_lookup(struct dpcls
*cls
,
192 const struct netdev_flow_key keys
[],
193 struct dpcls_rule
**rules
, size_t cnt
,
196 /* Datapath based on the network device interface from netdev.h.
202 * Some members, marked 'const', are immutable. Accessing other members
203 * requires synchronization, as noted in more detail below.
205 * Acquisition order is, from outermost to innermost:
207 * dp_netdev_mutex (global)
212 const struct dpif_class
*const class;
213 const char *const name
;
215 struct ovs_refcount ref_cnt
;
216 atomic_flag destroyed
;
220 * Any lookup into 'ports' or any access to the dp_netdev_ports found
221 * through 'ports' requires taking 'port_mutex'. */
222 struct ovs_mutex port_mutex
;
224 struct seq
*port_seq
; /* Incremented whenever a port changes. */
226 /* Protects access to ofproto-dpif-upcall interface during revalidator
227 * thread synchronization. */
228 struct fat_rwlock upcall_rwlock
;
229 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
232 /* Callback function for notifying the purging of dp flows (during
233 * reseting pmd deletion). */
234 dp_purge_callback
*dp_purge_cb
;
237 /* Stores all 'struct dp_netdev_pmd_thread's. */
238 struct cmap poll_threads
;
240 /* Protects the access of the 'struct dp_netdev_pmd_thread'
241 * instance for non-pmd thread. */
242 struct ovs_mutex non_pmd_mutex
;
244 /* Each pmd thread will store its pointer to
245 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
246 ovsthread_key_t per_pmd_key
;
248 struct seq
*reconfigure_seq
;
249 uint64_t last_reconfigure_seq
;
251 /* Cpu mask for pin of pmd threads. */
254 uint64_t last_tnl_conf_seq
;
256 struct conntrack conntrack
;
259 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
261 OVS_REQUIRES(dp
->port_mutex
);
264 DP_STAT_EXACT_HIT
, /* Packets that had an exact match (emc). */
265 DP_STAT_MASKED_HIT
, /* Packets that matched in the flow table. */
266 DP_STAT_MISS
, /* Packets that did not match. */
267 DP_STAT_LOST
, /* Packets not passed up to the client. */
268 DP_STAT_LOOKUP_HIT
, /* Number of subtable lookups for flow table
273 enum pmd_cycles_counter_type
{
274 PMD_CYCLES_POLLING
, /* Cycles spent polling NICs. */
275 PMD_CYCLES_PROCESSING
, /* Cycles spent processing packets */
279 #define XPS_TIMEOUT_MS 500LL
281 /* Contained by struct dp_netdev_port's 'rxqs' member. */
282 struct dp_netdev_rxq
{
283 struct netdev_rxq
*rxq
;
284 unsigned core_id
; /* Сore to which this queue is pinned. */
287 /* A port in a netdev-based datapath. */
288 struct dp_netdev_port
{
290 struct netdev
*netdev
;
291 struct hmap_node node
; /* Node in dp_netdev's 'ports'. */
292 struct netdev_saved_flags
*sf
;
293 struct dp_netdev_rxq
*rxqs
;
294 unsigned n_rxq
; /* Number of elements in 'rxq' */
295 bool dynamic_txqs
; /* If true XPS will be used. */
296 unsigned *txq_used
; /* Number of threads that uses each tx queue. */
297 struct ovs_mutex txq_used_mutex
;
298 char *type
; /* Port type as requested by user. */
299 char *rxq_affinity_list
; /* Requested affinity of rx queues. */
302 /* Contained by struct dp_netdev_flow's 'stats' member. */
303 struct dp_netdev_flow_stats
{
304 atomic_llong used
; /* Last used time, in monotonic msecs. */
305 atomic_ullong packet_count
; /* Number of packets matched. */
306 atomic_ullong byte_count
; /* Number of bytes matched. */
307 atomic_uint16_t tcp_flags
; /* Bitwise-OR of seen tcp_flags values. */
310 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
316 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
317 * its pmd thread's classifier. The text below calls this classifier 'cls'.
322 * The thread safety rules described here for "struct dp_netdev_flow" are
323 * motivated by two goals:
325 * - Prevent threads that read members of "struct dp_netdev_flow" from
326 * reading bad data due to changes by some thread concurrently modifying
329 * - Prevent two threads making changes to members of a given "struct
330 * dp_netdev_flow" from interfering with each other.
336 * A flow 'flow' may be accessed without a risk of being freed during an RCU
337 * grace period. Code that needs to hold onto a flow for a while
338 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
340 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
341 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
344 * Some members, marked 'const', are immutable. Accessing other members
345 * requires synchronization, as noted in more detail below.
347 struct dp_netdev_flow
{
348 const struct flow flow
; /* Unmasked flow that created this entry. */
349 /* Hash table index by unmasked flow. */
350 const struct cmap_node node
; /* In owning dp_netdev_pmd_thread's */
352 const ovs_u128 ufid
; /* Unique flow identifier. */
353 const unsigned pmd_id
; /* The 'core_id' of pmd thread owning this */
356 /* Number of references.
357 * The classifier owns one reference.
358 * Any thread trying to keep a rule from being freed should hold its own
360 struct ovs_refcount ref_cnt
;
365 struct dp_netdev_flow_stats stats
;
368 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
370 /* While processing a group of input packets, the datapath uses the next
371 * member to store a pointer to the output batch for the flow. It is
372 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
373 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
374 struct packet_batch_per_flow
*batch
;
376 /* Packet classification. */
377 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
378 /* 'cr' must be the last member. */
381 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
382 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
383 static int dpif_netdev_flow_from_nlattrs(const struct nlattr
*, uint32_t,
386 /* A set of datapath actions within a "struct dp_netdev_flow".
392 * A struct dp_netdev_actions 'actions' is protected with RCU. */
393 struct dp_netdev_actions
{
394 /* These members are immutable: they do not change during the struct's
396 unsigned int size
; /* Size of 'actions', in bytes. */
397 struct nlattr actions
[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
400 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
402 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
403 const struct dp_netdev_flow
*);
404 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
406 /* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
407 struct dp_netdev_pmd_stats
{
408 /* Indexed by DP_STAT_*. */
409 atomic_ullong n
[DP_N_STATS
];
412 /* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */
413 struct dp_netdev_pmd_cycles
{
414 /* Indexed by PMD_CYCLES_*. */
415 atomic_ullong n
[PMD_N_CYCLES
];
418 /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
420 struct dp_netdev_port
*port
;
421 struct netdev_rxq
*rx
;
422 struct ovs_list node
;
425 /* Contained by struct dp_netdev_pmd_thread's 'port_cache' or 'tx_ports'. */
427 struct dp_netdev_port
*port
;
430 struct hmap_node node
;
433 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
434 * the performance overhead of interrupt processing. Therefore netdev can
435 * not implement rx-wait for these devices. dpif-netdev needs to poll
436 * these device to check for recv buffer. pmd-thread does polling for
437 * devices assigned to itself.
439 * DPDK used PMD for accessing NIC.
441 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
442 * I/O of all non-pmd threads. There will be no actual thread created
445 * Each struct has its own flow table and classifier. Packets received
446 * from managed ports are looked up in the corresponding pmd thread's
447 * flow table, and are executed with the found actions.
449 struct dp_netdev_pmd_thread
{
450 struct dp_netdev
*dp
;
451 struct ovs_refcount ref_cnt
; /* Every reference must be refcount'ed. */
452 struct cmap_node node
; /* In 'dp->poll_threads'. */
454 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
455 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
457 /* Per thread exact-match cache. Note, the instance for cpu core
458 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
459 * need to be protected by 'non_pmd_mutex'. Every other instance
460 * will only be accessed by its own pmd thread. */
461 struct emc_cache flow_cache
;
463 /* Flow-Table and classifiers
465 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
466 * changes to 'classifiers' must be made while still holding the
469 struct ovs_mutex flow_mutex
;
470 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
472 /* One classifier per in_port polled by the pmd */
473 struct cmap classifiers
;
474 /* Periodically sort subtable vectors according to hit frequencies */
475 long long int next_optimization
;
478 struct dp_netdev_pmd_stats stats
;
480 /* Cycles counters */
481 struct dp_netdev_pmd_cycles cycles
;
483 /* Used to count cicles. See 'cycles_counter_end()' */
484 unsigned long long last_cycles
;
486 struct latch exit_latch
; /* For terminating the pmd thread. */
487 atomic_uint change_seq
; /* For reloading pmd ports. */
489 unsigned core_id
; /* CPU core id of this pmd thread. */
490 int numa_id
; /* numa node id of this pmd thread. */
493 /* Queue id used by this pmd thread to send packets on all netdevs if
494 * XPS disabled for this netdev. All static_tx_qid's are unique and less
495 * than 'ovs_numa_get_n_cores() + 1'. */
496 atomic_int static_tx_qid
;
498 struct ovs_mutex port_mutex
; /* Mutex for 'poll_list' and 'tx_ports'. */
499 /* List of rx queues to poll. */
500 struct ovs_list poll_list OVS_GUARDED
;
501 /* Number of elements in 'poll_list' */
503 /* Map of 'tx_port's used for transmission. Written by the main thread,
504 * read by the pmd thread. */
505 struct hmap tx_ports OVS_GUARDED
;
507 /* Map of 'tx_port' used in the fast path. This is a thread-local copy of
508 * 'tx_ports'. The instance for cpu core NON_PMD_CORE_ID can be accessed
509 * by multiple threads, and thusly need to be protected by 'non_pmd_mutex'.
510 * Every other instance will only be accessed by its own pmd thread. */
511 struct hmap port_cache
;
513 /* Only a pmd thread can write on its own 'cycles' and 'stats'.
514 * The main thread keeps 'stats_zero' and 'cycles_zero' as base
515 * values and subtracts them from 'stats' and 'cycles' before
516 * reporting to the user */
517 unsigned long long stats_zero
[DP_N_STATS
];
518 uint64_t cycles_zero
[PMD_N_CYCLES
];
521 #define PMD_INITIAL_SEQ 1
523 /* Interface to netdev-based datapath. */
526 struct dp_netdev
*dp
;
527 uint64_t last_port_seq
;
530 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
531 struct dp_netdev_port
**portp
)
532 OVS_REQUIRES(dp
->port_mutex
);
533 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
534 struct dp_netdev_port
**portp
)
535 OVS_REQUIRES(dp
->port_mutex
);
536 static void dp_netdev_free(struct dp_netdev
*)
537 OVS_REQUIRES(dp_netdev_mutex
);
538 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
539 const char *type
, odp_port_t port_no
)
540 OVS_REQUIRES(dp
->port_mutex
);
541 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
542 OVS_REQUIRES(dp
->port_mutex
);
543 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
544 bool create
, struct dpif
**);
545 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
546 struct dp_packet_batch
*,
547 bool may_steal
, const struct flow
*flow
,
548 const struct nlattr
*actions
,
551 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
552 struct dp_packet_batch
*, odp_port_t port_no
);
553 static void dp_netdev_recirculate(struct dp_netdev_pmd_thread
*,
554 struct dp_packet_batch
*);
556 static void dp_netdev_disable_upcall(struct dp_netdev
*);
557 static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
558 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
559 struct dp_netdev
*dp
, unsigned core_id
,
561 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
);
562 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
563 OVS_REQUIRES(dp
->port_mutex
);
565 static struct dp_netdev_pmd_thread
*dp_netdev_get_pmd(struct dp_netdev
*dp
,
567 static struct dp_netdev_pmd_thread
*
568 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
);
569 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
);
570 static void dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
571 static void dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
572 OVS_REQUIRES(dp
->port_mutex
);
573 static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
);
574 static void dp_netdev_del_port_from_all_pmds(struct dp_netdev
*dp
,
575 struct dp_netdev_port
*port
);
576 static void dp_netdev_add_port_to_pmds(struct dp_netdev
*dp
,
577 struct dp_netdev_port
*port
);
578 static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
579 struct dp_netdev_port
*port
);
580 static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
581 struct dp_netdev_port
*port
,
582 struct netdev_rxq
*rx
);
583 static struct dp_netdev_pmd_thread
*
584 dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev
*dp
, int numa_id
);
585 static void dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
586 OVS_REQUIRES(dp
->port_mutex
);
587 static void reconfigure_pmd_threads(struct dp_netdev
*dp
)
588 OVS_REQUIRES(dp
->port_mutex
);
589 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
);
590 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
);
591 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
);
592 static void pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
593 OVS_REQUIRES(pmd
->port_mutex
);
595 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
);
598 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
599 long long now
, bool purge
);
600 static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
601 struct tx_port
*tx
, long long now
);
603 static inline bool emc_entry_alive(struct emc_entry
*ce
);
604 static void emc_clear_entry(struct emc_entry
*ce
);
607 emc_cache_init(struct emc_cache
*flow_cache
)
611 flow_cache
->sweep_idx
= 0;
612 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
613 flow_cache
->entries
[i
].flow
= NULL
;
614 flow_cache
->entries
[i
].key
.hash
= 0;
615 flow_cache
->entries
[i
].key
.len
= sizeof(struct miniflow
);
616 flowmap_init(&flow_cache
->entries
[i
].key
.mf
.map
);
621 emc_cache_uninit(struct emc_cache
*flow_cache
)
625 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
626 emc_clear_entry(&flow_cache
->entries
[i
]);
630 /* Check and clear dead flow references slowly (one entry at each
633 emc_cache_slow_sweep(struct emc_cache
*flow_cache
)
635 struct emc_entry
*entry
= &flow_cache
->entries
[flow_cache
->sweep_idx
];
637 if (!emc_entry_alive(entry
)) {
638 emc_clear_entry(entry
);
640 flow_cache
->sweep_idx
= (flow_cache
->sweep_idx
+ 1) & EM_FLOW_HASH_MASK
;
643 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
645 dpif_is_netdev(const struct dpif
*dpif
)
647 return dpif
->dpif_class
->open
== dpif_netdev_open
;
650 static struct dpif_netdev
*
651 dpif_netdev_cast(const struct dpif
*dpif
)
653 ovs_assert(dpif_is_netdev(dpif
));
654 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
657 static struct dp_netdev
*
658 get_dp_netdev(const struct dpif
*dpif
)
660 return dpif_netdev_cast(dpif
)->dp
;
664 PMD_INFO_SHOW_STATS
, /* Show how cpu cycles are spent. */
665 PMD_INFO_CLEAR_STATS
, /* Set the cycles count to 0. */
666 PMD_INFO_SHOW_RXQ
/* Show poll-lists of pmd threads. */
670 pmd_info_show_stats(struct ds
*reply
,
671 struct dp_netdev_pmd_thread
*pmd
,
672 unsigned long long stats
[DP_N_STATS
],
673 uint64_t cycles
[PMD_N_CYCLES
])
675 unsigned long long total_packets
= 0;
676 uint64_t total_cycles
= 0;
679 /* These loops subtracts reference values ('*_zero') from the counters.
680 * Since loads and stores are relaxed, it might be possible for a '*_zero'
681 * value to be more recent than the current value we're reading from the
682 * counter. This is not a big problem, since these numbers are not
683 * supposed to be too accurate, but we should at least make sure that
684 * the result is not negative. */
685 for (i
= 0; i
< DP_N_STATS
; i
++) {
686 if (stats
[i
] > pmd
->stats_zero
[i
]) {
687 stats
[i
] -= pmd
->stats_zero
[i
];
692 if (i
!= DP_STAT_LOST
) {
693 /* Lost packets are already included in DP_STAT_MISS */
694 total_packets
+= stats
[i
];
698 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
699 if (cycles
[i
] > pmd
->cycles_zero
[i
]) {
700 cycles
[i
] -= pmd
->cycles_zero
[i
];
705 total_cycles
+= cycles
[i
];
708 ds_put_cstr(reply
, (pmd
->core_id
== NON_PMD_CORE_ID
)
709 ? "main thread" : "pmd thread");
711 if (pmd
->numa_id
!= OVS_NUMA_UNSPEC
) {
712 ds_put_format(reply
, " numa_id %d", pmd
->numa_id
);
714 if (pmd
->core_id
!= OVS_CORE_UNSPEC
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
715 ds_put_format(reply
, " core_id %u", pmd
->core_id
);
717 ds_put_cstr(reply
, ":\n");
720 "\temc hits:%llu\n\tmegaflow hits:%llu\n"
721 "\tavg. subtable lookups per hit:%.2f\n"
722 "\tmiss:%llu\n\tlost:%llu\n",
723 stats
[DP_STAT_EXACT_HIT
], stats
[DP_STAT_MASKED_HIT
],
724 stats
[DP_STAT_MASKED_HIT
] > 0
725 ? (1.0*stats
[DP_STAT_LOOKUP_HIT
])/stats
[DP_STAT_MASKED_HIT
]
727 stats
[DP_STAT_MISS
], stats
[DP_STAT_LOST
]);
729 if (total_cycles
== 0) {
734 "\tpolling cycles:%"PRIu64
" (%.02f%%)\n"
735 "\tprocessing cycles:%"PRIu64
" (%.02f%%)\n",
736 cycles
[PMD_CYCLES_POLLING
],
737 cycles
[PMD_CYCLES_POLLING
] / (double)total_cycles
* 100,
738 cycles
[PMD_CYCLES_PROCESSING
],
739 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_cycles
* 100);
741 if (total_packets
== 0) {
746 "\tavg cycles per packet: %.02f (%"PRIu64
"/%llu)\n",
747 total_cycles
/ (double)total_packets
,
748 total_cycles
, total_packets
);
751 "\tavg processing cycles per packet: "
752 "%.02f (%"PRIu64
"/%llu)\n",
753 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_packets
,
754 cycles
[PMD_CYCLES_PROCESSING
], total_packets
);
758 pmd_info_clear_stats(struct ds
*reply OVS_UNUSED
,
759 struct dp_netdev_pmd_thread
*pmd
,
760 unsigned long long stats
[DP_N_STATS
],
761 uint64_t cycles
[PMD_N_CYCLES
])
765 /* We cannot write 'stats' and 'cycles' (because they're written by other
766 * threads) and we shouldn't change 'stats' (because they're used to count
767 * datapath stats, which must not be cleared here). Instead, we save the
768 * current values and subtract them from the values to be displayed in the
770 for (i
= 0; i
< DP_N_STATS
; i
++) {
771 pmd
->stats_zero
[i
] = stats
[i
];
773 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
774 pmd
->cycles_zero
[i
] = cycles
[i
];
779 pmd_info_show_rxq(struct ds
*reply
, struct dp_netdev_pmd_thread
*pmd
)
781 if (pmd
->core_id
!= NON_PMD_CORE_ID
) {
782 struct rxq_poll
*poll
;
783 const char *prev_name
= NULL
;
786 "pmd thread numa_id %d core_id %u:\n\tisolated : %s\n",
787 pmd
->numa_id
, pmd
->core_id
, (pmd
->isolated
)
790 ovs_mutex_lock(&pmd
->port_mutex
);
791 LIST_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
792 const char *name
= netdev_get_name(poll
->port
->netdev
);
794 if (!prev_name
|| strcmp(name
, prev_name
)) {
796 ds_put_cstr(reply
, "\n");
798 ds_put_format(reply
, "\tport: %s\tqueue-id:",
799 netdev_get_name(poll
->port
->netdev
));
801 ds_put_format(reply
, " %d", netdev_rxq_get_queue_id(poll
->rx
));
804 ovs_mutex_unlock(&pmd
->port_mutex
);
805 ds_put_cstr(reply
, "\n");
810 dpif_netdev_pmd_info(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
813 struct ds reply
= DS_EMPTY_INITIALIZER
;
814 struct dp_netdev_pmd_thread
*pmd
;
815 struct dp_netdev
*dp
= NULL
;
816 enum pmd_info_type type
= *(enum pmd_info_type
*) aux
;
818 ovs_mutex_lock(&dp_netdev_mutex
);
821 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
822 } else if (shash_count(&dp_netdevs
) == 1) {
823 /* There's only one datapath */
824 dp
= shash_first(&dp_netdevs
)->data
;
828 ovs_mutex_unlock(&dp_netdev_mutex
);
829 unixctl_command_reply_error(conn
,
830 "please specify an existing datapath");
834 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
835 if (type
== PMD_INFO_SHOW_RXQ
) {
836 pmd_info_show_rxq(&reply
, pmd
);
838 unsigned long long stats
[DP_N_STATS
];
839 uint64_t cycles
[PMD_N_CYCLES
];
842 /* Read current stats and cycle counters */
843 for (i
= 0; i
< ARRAY_SIZE(stats
); i
++) {
844 atomic_read_relaxed(&pmd
->stats
.n
[i
], &stats
[i
]);
846 for (i
= 0; i
< ARRAY_SIZE(cycles
); i
++) {
847 atomic_read_relaxed(&pmd
->cycles
.n
[i
], &cycles
[i
]);
850 if (type
== PMD_INFO_CLEAR_STATS
) {
851 pmd_info_clear_stats(&reply
, pmd
, stats
, cycles
);
852 } else if (type
== PMD_INFO_SHOW_STATS
) {
853 pmd_info_show_stats(&reply
, pmd
, stats
, cycles
);
858 ovs_mutex_unlock(&dp_netdev_mutex
);
860 unixctl_command_reply(conn
, ds_cstr(&reply
));
865 dpif_netdev_init(void)
867 static enum pmd_info_type show_aux
= PMD_INFO_SHOW_STATS
,
868 clear_aux
= PMD_INFO_CLEAR_STATS
,
869 poll_aux
= PMD_INFO_SHOW_RXQ
;
871 unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
872 0, 1, dpif_netdev_pmd_info
,
874 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
875 0, 1, dpif_netdev_pmd_info
,
877 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]",
878 0, 1, dpif_netdev_pmd_info
,
884 dpif_netdev_enumerate(struct sset
*all_dps
,
885 const struct dpif_class
*dpif_class
)
887 struct shash_node
*node
;
889 ovs_mutex_lock(&dp_netdev_mutex
);
890 SHASH_FOR_EACH(node
, &dp_netdevs
) {
891 struct dp_netdev
*dp
= node
->data
;
892 if (dpif_class
!= dp
->class) {
893 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
894 * If the class doesn't match, skip this dpif. */
897 sset_add(all_dps
, node
->name
);
899 ovs_mutex_unlock(&dp_netdev_mutex
);
905 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
907 return class != &dpif_netdev_class
;
911 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
913 return strcmp(type
, "internal") ? type
914 : dpif_netdev_class_is_dummy(class) ? "dummy-internal"
919 create_dpif_netdev(struct dp_netdev
*dp
)
921 uint16_t netflow_id
= hash_string(dp
->name
, 0);
922 struct dpif_netdev
*dpif
;
924 ovs_refcount_ref(&dp
->ref_cnt
);
926 dpif
= xmalloc(sizeof *dpif
);
927 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
929 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
934 /* Choose an unused, non-zero port number and return it on success.
935 * Return ODPP_NONE on failure. */
937 choose_port(struct dp_netdev
*dp
, const char *name
)
938 OVS_REQUIRES(dp
->port_mutex
)
942 if (dp
->class != &dpif_netdev_class
) {
946 /* If the port name begins with "br", start the number search at
947 * 100 to make writing tests easier. */
948 if (!strncmp(name
, "br", 2)) {
952 /* If the port name contains a number, try to assign that port number.
953 * This can make writing unit tests easier because port numbers are
955 for (p
= name
; *p
!= '\0'; p
++) {
956 if (isdigit((unsigned char) *p
)) {
957 port_no
= start_no
+ strtol(p
, NULL
, 10);
958 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
959 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
960 return u32_to_odp(port_no
);
967 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
968 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
969 return u32_to_odp(port_no
);
977 create_dp_netdev(const char *name
, const struct dpif_class
*class,
978 struct dp_netdev
**dpp
)
979 OVS_REQUIRES(dp_netdev_mutex
)
981 struct dp_netdev
*dp
;
984 dp
= xzalloc(sizeof *dp
);
985 shash_add(&dp_netdevs
, name
, dp
);
987 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
988 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
989 ovs_refcount_init(&dp
->ref_cnt
);
990 atomic_flag_clear(&dp
->destroyed
);
992 ovs_mutex_init(&dp
->port_mutex
);
993 hmap_init(&dp
->ports
);
994 dp
->port_seq
= seq_create();
995 fat_rwlock_init(&dp
->upcall_rwlock
);
997 dp
->reconfigure_seq
= seq_create();
998 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
1000 /* Disable upcalls by default. */
1001 dp_netdev_disable_upcall(dp
);
1002 dp
->upcall_aux
= NULL
;
1003 dp
->upcall_cb
= NULL
;
1005 conntrack_init(&dp
->conntrack
);
1007 cmap_init(&dp
->poll_threads
);
1008 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
1009 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
1011 ovs_mutex_lock(&dp
->port_mutex
);
1012 dp_netdev_set_nonpmd(dp
);
1014 error
= do_add_port(dp
, name
, dpif_netdev_port_open_type(dp
->class,
1017 ovs_mutex_unlock(&dp
->port_mutex
);
1023 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
1029 dp_netdev_request_reconfigure(struct dp_netdev
*dp
)
1031 seq_change(dp
->reconfigure_seq
);
1035 dp_netdev_is_reconf_required(struct dp_netdev
*dp
)
1037 return seq_read(dp
->reconfigure_seq
) != dp
->last_reconfigure_seq
;
1041 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
1042 bool create
, struct dpif
**dpifp
)
1044 struct dp_netdev
*dp
;
1047 ovs_mutex_lock(&dp_netdev_mutex
);
1048 dp
= shash_find_data(&dp_netdevs
, name
);
1050 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
1052 error
= (dp
->class != class ? EINVAL
1057 *dpifp
= create_dpif_netdev(dp
);
1060 ovs_mutex_unlock(&dp_netdev_mutex
);
1066 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
1067 OVS_NO_THREAD_SAFETY_ANALYSIS
1069 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
1070 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
1072 /* Before freeing a lock we should release it */
1073 fat_rwlock_unlock(&dp
->upcall_rwlock
);
1074 fat_rwlock_destroy(&dp
->upcall_rwlock
);
1077 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
1078 * through the 'dp_netdevs' shash while freeing 'dp'. */
1080 dp_netdev_free(struct dp_netdev
*dp
)
1081 OVS_REQUIRES(dp_netdev_mutex
)
1083 struct dp_netdev_port
*port
, *next
;
1085 shash_find_and_delete(&dp_netdevs
, dp
->name
);
1087 dp_netdev_destroy_all_pmds(dp
);
1088 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
1089 ovsthread_key_delete(dp
->per_pmd_key
);
1091 conntrack_destroy(&dp
->conntrack
);
1093 ovs_mutex_lock(&dp
->port_mutex
);
1094 HMAP_FOR_EACH_SAFE (port
, next
, node
, &dp
->ports
) {
1095 do_del_port(dp
, port
);
1097 ovs_mutex_unlock(&dp
->port_mutex
);
1098 cmap_destroy(&dp
->poll_threads
);
1100 seq_destroy(dp
->reconfigure_seq
);
1102 seq_destroy(dp
->port_seq
);
1103 hmap_destroy(&dp
->ports
);
1104 ovs_mutex_destroy(&dp
->port_mutex
);
1106 /* Upcalls must be disabled at this point */
1107 dp_netdev_destroy_upcall_lock(dp
);
1109 free(dp
->pmd_cmask
);
1110 free(CONST_CAST(char *, dp
->name
));
1115 dp_netdev_unref(struct dp_netdev
*dp
)
1118 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1119 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1120 ovs_mutex_lock(&dp_netdev_mutex
);
1121 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1124 ovs_mutex_unlock(&dp_netdev_mutex
);
1129 dpif_netdev_close(struct dpif
*dpif
)
1131 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1133 dp_netdev_unref(dp
);
1138 dpif_netdev_destroy(struct dpif
*dpif
)
1140 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1142 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
1143 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1144 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1152 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1153 * load/store semantics. While the increment is not atomic, the load and
1154 * store operations are, making it impossible to read inconsistent values.
1156 * This is used to update thread local stats counters. */
1158 non_atomic_ullong_add(atomic_ullong
*var
, unsigned long long n
)
1160 unsigned long long tmp
;
1162 atomic_read_relaxed(var
, &tmp
);
1164 atomic_store_relaxed(var
, tmp
);
1168 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
1170 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1171 struct dp_netdev_pmd_thread
*pmd
;
1173 stats
->n_flows
= stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
1174 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1175 unsigned long long n
;
1176 stats
->n_flows
+= cmap_count(&pmd
->flow_table
);
1178 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MASKED_HIT
], &n
);
1180 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_EXACT_HIT
], &n
);
1182 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MISS
], &n
);
1183 stats
->n_missed
+= n
;
1184 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_LOST
], &n
);
1187 stats
->n_masks
= UINT32_MAX
;
1188 stats
->n_mask_hit
= UINT64_MAX
;
1194 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
1198 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1199 ovs_mutex_lock(&pmd
->dp
->non_pmd_mutex
);
1200 ovs_mutex_lock(&pmd
->port_mutex
);
1201 pmd_load_cached_ports(pmd
);
1202 ovs_mutex_unlock(&pmd
->port_mutex
);
1203 ovs_mutex_unlock(&pmd
->dp
->non_pmd_mutex
);
1207 ovs_mutex_lock(&pmd
->cond_mutex
);
1208 atomic_add_relaxed(&pmd
->change_seq
, 1, &old_seq
);
1209 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
1210 ovs_mutex_unlock(&pmd
->cond_mutex
);
1214 hash_port_no(odp_port_t port_no
)
1216 return hash_int(odp_to_u32(port_no
), 0);
1220 port_create(const char *devname
, const char *type
,
1221 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1223 struct netdev_saved_flags
*sf
;
1224 struct dp_netdev_port
*port
;
1225 enum netdev_flags flags
;
1226 struct netdev
*netdev
;
1227 int n_open_rxqs
= 0;
1230 bool dynamic_txqs
= false;
1234 /* Open and validate network device. */
1235 error
= netdev_open(devname
, type
, &netdev
);
1239 /* XXX reject non-Ethernet devices */
1241 netdev_get_flags(netdev
, &flags
);
1242 if (flags
& NETDEV_LOOPBACK
) {
1243 VLOG_ERR("%s: cannot add a loopback device", devname
);
1248 if (netdev_is_pmd(netdev
)) {
1249 n_cores
= ovs_numa_get_n_cores();
1251 if (n_cores
== OVS_CORE_UNSPEC
) {
1252 VLOG_ERR("%s, cannot get cpu core info", devname
);
1256 /* There can only be ovs_numa_get_n_cores() pmd threads,
1257 * so creates a txq for each, and one extra for the non
1259 error
= netdev_set_tx_multiq(netdev
, n_cores
+ 1);
1260 if (error
&& (error
!= EOPNOTSUPP
)) {
1261 VLOG_ERR("%s, cannot set multiq", devname
);
1266 if (netdev_is_reconf_required(netdev
)) {
1267 error
= netdev_reconfigure(netdev
);
1273 if (netdev_is_pmd(netdev
)) {
1274 if (netdev_n_txq(netdev
) < n_cores
+ 1) {
1275 dynamic_txqs
= true;
1279 port
= xzalloc(sizeof *port
);
1280 port
->port_no
= port_no
;
1281 port
->netdev
= netdev
;
1282 port
->n_rxq
= netdev_n_rxq(netdev
);
1283 port
->rxqs
= xcalloc(port
->n_rxq
, sizeof *port
->rxqs
);
1284 port
->txq_used
= xcalloc(netdev_n_txq(netdev
), sizeof *port
->txq_used
);
1285 port
->type
= xstrdup(type
);
1286 ovs_mutex_init(&port
->txq_used_mutex
);
1287 port
->dynamic_txqs
= dynamic_txqs
;
1289 for (i
= 0; i
< port
->n_rxq
; i
++) {
1290 error
= netdev_rxq_open(netdev
, &port
->rxqs
[i
].rxq
, i
);
1292 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
1293 devname
, ovs_strerror(errno
));
1296 port
->rxqs
[i
].core_id
= OVS_CORE_UNSPEC
;
1300 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
1311 for (i
= 0; i
< n_open_rxqs
; i
++) {
1312 netdev_rxq_close(port
->rxqs
[i
].rxq
);
1314 ovs_mutex_destroy(&port
->txq_used_mutex
);
1316 free(port
->txq_used
);
1321 netdev_close(netdev
);
1326 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
1328 OVS_REQUIRES(dp
->port_mutex
)
1330 struct dp_netdev_port
*port
;
1333 /* Reject devices already in 'dp'. */
1334 if (!get_port_by_name(dp
, devname
, &port
)) {
1338 error
= port_create(devname
, type
, port_no
, &port
);
1343 if (netdev_is_pmd(port
->netdev
)) {
1344 int numa_id
= netdev_get_numa_id(port
->netdev
);
1346 ovs_assert(ovs_numa_numa_id_is_valid(numa_id
));
1347 dp_netdev_set_pmds_on_numa(dp
, numa_id
);
1350 dp_netdev_add_port_to_pmds(dp
, port
);
1352 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
1353 seq_change(dp
->port_seq
);
1359 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
1360 odp_port_t
*port_nop
)
1362 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1363 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1364 const char *dpif_port
;
1368 ovs_mutex_lock(&dp
->port_mutex
);
1369 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1370 if (*port_nop
!= ODPP_NONE
) {
1371 port_no
= *port_nop
;
1372 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
1374 port_no
= choose_port(dp
, dpif_port
);
1375 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
1378 *port_nop
= port_no
;
1379 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
1381 ovs_mutex_unlock(&dp
->port_mutex
);
1387 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
1389 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1392 ovs_mutex_lock(&dp
->port_mutex
);
1393 if (port_no
== ODPP_LOCAL
) {
1396 struct dp_netdev_port
*port
;
1398 error
= get_port_by_number(dp
, port_no
, &port
);
1400 do_del_port(dp
, port
);
1403 ovs_mutex_unlock(&dp
->port_mutex
);
1409 is_valid_port_number(odp_port_t port_no
)
1411 return port_no
!= ODPP_NONE
;
1414 static struct dp_netdev_port
*
1415 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
1416 OVS_REQUIRES(dp
->port_mutex
)
1418 struct dp_netdev_port
*port
;
1420 HMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
1421 if (port
->port_no
== port_no
) {
1429 get_port_by_number(struct dp_netdev
*dp
,
1430 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1431 OVS_REQUIRES(dp
->port_mutex
)
1433 if (!is_valid_port_number(port_no
)) {
1437 *portp
= dp_netdev_lookup_port(dp
, port_no
);
1438 return *portp
? 0 : ENODEV
;
1443 port_destroy(struct dp_netdev_port
*port
)
1449 netdev_close(port
->netdev
);
1450 netdev_restore_flags(port
->sf
);
1452 for (unsigned i
= 0; i
< port
->n_rxq
; i
++) {
1453 netdev_rxq_close(port
->rxqs
[i
].rxq
);
1455 ovs_mutex_destroy(&port
->txq_used_mutex
);
1456 free(port
->rxq_affinity_list
);
1457 free(port
->txq_used
);
1464 get_port_by_name(struct dp_netdev
*dp
,
1465 const char *devname
, struct dp_netdev_port
**portp
)
1466 OVS_REQUIRES(dp
->port_mutex
)
1468 struct dp_netdev_port
*port
;
1470 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1471 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1477 /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non
1483 get_n_pmd_threads(struct dp_netdev
*dp
)
1485 /* There is one non pmd thread in dp->poll_threads */
1486 return cmap_count(&dp
->poll_threads
) - 1;
1490 get_n_pmd_threads_on_numa(struct dp_netdev
*dp
, int numa_id
)
1492 struct dp_netdev_pmd_thread
*pmd
;
1495 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1496 if (pmd
->numa_id
== numa_id
) {
1504 /* Returns 'true' if there is a port with pmd netdev and the netdev is on
1505 * numa node 'numa_id' or its rx queue assigned to core on that numa node . */
1507 has_pmd_rxq_for_numa(struct dp_netdev
*dp
, int numa_id
)
1508 OVS_REQUIRES(dp
->port_mutex
)
1510 struct dp_netdev_port
*port
;
1512 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1513 if (netdev_is_pmd(port
->netdev
)) {
1516 if (netdev_get_numa_id(port
->netdev
) == numa_id
) {
1520 for (i
= 0; i
< port
->n_rxq
; i
++) {
1521 unsigned core_id
= port
->rxqs
[i
].core_id
;
1523 if (core_id
!= OVS_CORE_UNSPEC
1524 && ovs_numa_get_numa_id(core_id
) == numa_id
) {
1536 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1537 OVS_REQUIRES(dp
->port_mutex
)
1539 hmap_remove(&dp
->ports
, &port
->node
);
1540 seq_change(dp
->port_seq
);
1542 dp_netdev_del_port_from_all_pmds(dp
, port
);
1544 if (netdev_is_pmd(port
->netdev
)) {
1545 int numa_id
= netdev_get_numa_id(port
->netdev
);
1547 /* PMD threads can not be on invalid numa node. */
1548 ovs_assert(ovs_numa_numa_id_is_valid(numa_id
));
1549 /* If there is no netdev on the numa node, deletes the pmd threads
1551 if (!has_pmd_rxq_for_numa(dp
, numa_id
)) {
1552 dp_netdev_del_pmds_on_numa(dp
, numa_id
);
1560 answer_port_query(const struct dp_netdev_port
*port
,
1561 struct dpif_port
*dpif_port
)
1563 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1564 dpif_port
->type
= xstrdup(port
->type
);
1565 dpif_port
->port_no
= port
->port_no
;
1569 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1570 struct dpif_port
*dpif_port
)
1572 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1573 struct dp_netdev_port
*port
;
1576 ovs_mutex_lock(&dp
->port_mutex
);
1577 error
= get_port_by_number(dp
, port_no
, &port
);
1578 if (!error
&& dpif_port
) {
1579 answer_port_query(port
, dpif_port
);
1581 ovs_mutex_unlock(&dp
->port_mutex
);
1587 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1588 struct dpif_port
*dpif_port
)
1590 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1591 struct dp_netdev_port
*port
;
1594 ovs_mutex_lock(&dp
->port_mutex
);
1595 error
= get_port_by_name(dp
, devname
, &port
);
1596 if (!error
&& dpif_port
) {
1597 answer_port_query(port
, dpif_port
);
1599 ovs_mutex_unlock(&dp
->port_mutex
);
1605 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1607 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1611 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1613 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1614 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1619 dp_netdev_flow_hash(const ovs_u128
*ufid
)
1621 return ufid
->u32
[0];
1624 static inline struct dpcls
*
1625 dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread
*pmd
,
1629 uint32_t hash
= hash_port_no(in_port
);
1630 CMAP_FOR_EACH_WITH_HASH (cls
, node
, hash
, &pmd
->classifiers
) {
1631 if (cls
->in_port
== in_port
) {
1632 /* Port classifier exists already */
1639 static inline struct dpcls
*
1640 dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread
*pmd
,
1642 OVS_REQUIRES(pmd
->flow_mutex
)
1644 struct dpcls
*cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
1645 uint32_t hash
= hash_port_no(in_port
);
1648 /* Create new classifier for in_port */
1649 cls
= xmalloc(sizeof(*cls
));
1651 cls
->in_port
= in_port
;
1652 cmap_insert(&pmd
->classifiers
, &cls
->node
, hash
);
1653 VLOG_DBG("Creating dpcls %p for in_port %d", cls
, in_port
);
1659 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread
*pmd
,
1660 struct dp_netdev_flow
*flow
)
1661 OVS_REQUIRES(pmd
->flow_mutex
)
1663 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1665 odp_port_t in_port
= flow
->flow
.in_port
.odp_port
;
1667 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
1668 ovs_assert(cls
!= NULL
);
1669 dpcls_remove(cls
, &flow
->cr
);
1670 cmap_remove(&pmd
->flow_table
, node
, dp_netdev_flow_hash(&flow
->ufid
));
1673 dp_netdev_flow_unref(flow
);
1677 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
)
1679 struct dp_netdev_flow
*netdev_flow
;
1681 ovs_mutex_lock(&pmd
->flow_mutex
);
1682 CMAP_FOR_EACH (netdev_flow
, node
, &pmd
->flow_table
) {
1683 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
1685 ovs_mutex_unlock(&pmd
->flow_mutex
);
1689 dpif_netdev_flow_flush(struct dpif
*dpif
)
1691 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1692 struct dp_netdev_pmd_thread
*pmd
;
1694 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1695 dp_netdev_pmd_flow_flush(pmd
);
1701 struct dp_netdev_port_state
{
1702 struct hmap_position position
;
1707 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1709 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
1714 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
1715 struct dpif_port
*dpif_port
)
1717 struct dp_netdev_port_state
*state
= state_
;
1718 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1719 struct hmap_node
*node
;
1722 ovs_mutex_lock(&dp
->port_mutex
);
1723 node
= hmap_at_position(&dp
->ports
, &state
->position
);
1725 struct dp_netdev_port
*port
;
1727 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
1730 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
1731 dpif_port
->name
= state
->name
;
1732 dpif_port
->type
= port
->type
;
1733 dpif_port
->port_no
= port
->port_no
;
1739 ovs_mutex_unlock(&dp
->port_mutex
);
1745 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
1747 struct dp_netdev_port_state
*state
= state_
;
1754 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
1756 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1757 uint64_t new_port_seq
;
1760 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
1761 if (dpif
->last_port_seq
!= new_port_seq
) {
1762 dpif
->last_port_seq
= new_port_seq
;
1772 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
1774 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1776 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
1779 static struct dp_netdev_flow
*
1780 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
1782 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
1785 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
1787 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
1790 /* netdev_flow_key utilities.
1792 * netdev_flow_key is basically a miniflow. We use these functions
1793 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1794 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1796 * - Since we are dealing exclusively with miniflows created by
1797 * miniflow_extract(), if the map is different the miniflow is different.
1798 * Therefore we can be faster by comparing the map and the miniflow in a
1800 * - These functions can be inlined by the compiler. */
1802 /* Given the number of bits set in miniflow's maps, returns the size of the
1803 * 'netdev_flow_key.mf' */
1804 static inline size_t
1805 netdev_flow_key_size(size_t flow_u64s
)
1807 return sizeof(struct miniflow
) + MINIFLOW_VALUES_SIZE(flow_u64s
);
1811 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
1812 const struct netdev_flow_key
*b
)
1814 /* 'b->len' may be not set yet. */
1815 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
1818 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
1819 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
1820 * generated by miniflow_extract. */
1822 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
1823 const struct miniflow
*mf
)
1825 return !memcmp(&key
->mf
, mf
, key
->len
);
1829 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
1830 const struct netdev_flow_key
*src
)
1833 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
1838 netdev_flow_key_from_flow(struct netdev_flow_key
*dst
,
1839 const struct flow
*src
)
1841 struct dp_packet packet
;
1842 uint64_t buf_stub
[512 / 8];
1844 dp_packet_use_stub(&packet
, buf_stub
, sizeof buf_stub
);
1845 pkt_metadata_from_flow(&packet
.md
, src
);
1846 flow_compose(&packet
, src
);
1847 miniflow_extract(&packet
, &dst
->mf
);
1848 dp_packet_uninit(&packet
);
1850 dst
->len
= netdev_flow_key_size(miniflow_n_values(&dst
->mf
));
1851 dst
->hash
= 0; /* Not computed yet. */
1854 /* Initialize a netdev_flow_key 'mask' from 'match'. */
1856 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
1857 const struct match
*match
)
1859 uint64_t *dst
= miniflow_values(&mask
->mf
);
1860 struct flowmap fmap
;
1864 /* Only check masks that make sense for the flow. */
1865 flow_wc_map(&match
->flow
, &fmap
);
1866 flowmap_init(&mask
->mf
.map
);
1868 FLOWMAP_FOR_EACH_INDEX(idx
, fmap
) {
1869 uint64_t mask_u64
= flow_u64_value(&match
->wc
.masks
, idx
);
1872 flowmap_set(&mask
->mf
.map
, idx
, 1);
1874 hash
= hash_add64(hash
, mask_u64
);
1880 FLOWMAP_FOR_EACH_MAP (map
, mask
->mf
.map
) {
1881 hash
= hash_add64(hash
, map
);
1884 size_t n
= dst
- miniflow_get_values(&mask
->mf
);
1886 mask
->hash
= hash_finish(hash
, n
* 8);
1887 mask
->len
= netdev_flow_key_size(n
);
1890 /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
1892 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
1893 const struct flow
*flow
,
1894 const struct netdev_flow_key
*mask
)
1896 uint64_t *dst_u64
= miniflow_values(&dst
->mf
);
1897 const uint64_t *mask_u64
= miniflow_get_values(&mask
->mf
);
1901 dst
->len
= mask
->len
;
1902 dst
->mf
= mask
->mf
; /* Copy maps. */
1904 FLOW_FOR_EACH_IN_MAPS(value
, flow
, mask
->mf
.map
) {
1905 *dst_u64
= value
& *mask_u64
++;
1906 hash
= hash_add64(hash
, *dst_u64
++);
1908 dst
->hash
= hash_finish(hash
,
1909 (dst_u64
- miniflow_get_values(&dst
->mf
)) * 8);
1912 /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
1913 #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
1914 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
1916 /* Returns a hash value for the bits of 'key' where there are 1-bits in
1918 static inline uint32_t
1919 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
1920 const struct netdev_flow_key
*mask
)
1922 const uint64_t *p
= miniflow_get_values(&mask
->mf
);
1926 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, key
, mask
->mf
.map
) {
1927 hash
= hash_add64(hash
, value
& *p
++);
1930 return hash_finish(hash
, (p
- miniflow_get_values(&mask
->mf
)) * 8);
1934 emc_entry_alive(struct emc_entry
*ce
)
1936 return ce
->flow
&& !ce
->flow
->dead
;
1940 emc_clear_entry(struct emc_entry
*ce
)
1943 dp_netdev_flow_unref(ce
->flow
);
1949 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
1950 const struct netdev_flow_key
*key
)
1952 if (ce
->flow
!= flow
) {
1954 dp_netdev_flow_unref(ce
->flow
);
1957 if (dp_netdev_flow_ref(flow
)) {
1964 netdev_flow_key_clone(&ce
->key
, key
);
1969 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
1970 struct dp_netdev_flow
*flow
)
1972 struct emc_entry
*to_be_replaced
= NULL
;
1973 struct emc_entry
*current_entry
;
1975 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1976 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
1977 /* We found the entry with the 'mf' miniflow */
1978 emc_change_entry(current_entry
, flow
, NULL
);
1982 /* Replacement policy: put the flow in an empty (not alive) entry, or
1983 * in the first entry where it can be */
1985 || (emc_entry_alive(to_be_replaced
)
1986 && !emc_entry_alive(current_entry
))
1987 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
1988 to_be_replaced
= current_entry
;
1991 /* We didn't find the miniflow in the cache.
1992 * The 'to_be_replaced' entry is where the new flow will be stored */
1994 emc_change_entry(to_be_replaced
, flow
, key
);
1997 static inline struct dp_netdev_flow
*
1998 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
2000 struct emc_entry
*current_entry
;
2002 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
2003 if (current_entry
->key
.hash
== key
->hash
2004 && emc_entry_alive(current_entry
)
2005 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
2007 /* We found the entry with the 'key->mf' miniflow */
2008 return current_entry
->flow
;
2015 static struct dp_netdev_flow
*
2016 dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread
*pmd
,
2017 const struct netdev_flow_key
*key
,
2021 struct dpcls_rule
*rule
;
2022 odp_port_t in_port
= u32_to_odp(MINIFLOW_GET_U32(&key
->mf
, in_port
));
2023 struct dp_netdev_flow
*netdev_flow
= NULL
;
2025 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
2026 if (OVS_LIKELY(cls
)) {
2027 dpcls_lookup(cls
, key
, &rule
, 1, lookup_num_p
);
2028 netdev_flow
= dp_netdev_flow_cast(rule
);
2033 static struct dp_netdev_flow
*
2034 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread
*pmd
,
2035 const ovs_u128
*ufidp
, const struct nlattr
*key
,
2038 struct dp_netdev_flow
*netdev_flow
;
2042 /* If a UFID is not provided, determine one based on the key. */
2043 if (!ufidp
&& key
&& key_len
2044 && !dpif_netdev_flow_from_nlattrs(key
, key_len
, &flow
)) {
2045 dpif_flow_hash(pmd
->dp
->dpif
, &flow
, sizeof flow
, &ufid
);
2050 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, dp_netdev_flow_hash(ufidp
),
2052 if (ovs_u128_equals(netdev_flow
->ufid
, *ufidp
)) {
2062 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_
,
2063 struct dpif_flow_stats
*stats
)
2065 struct dp_netdev_flow
*netdev_flow
;
2066 unsigned long long n
;
2070 netdev_flow
= CONST_CAST(struct dp_netdev_flow
*, netdev_flow_
);
2072 atomic_read_relaxed(&netdev_flow
->stats
.packet_count
, &n
);
2073 stats
->n_packets
= n
;
2074 atomic_read_relaxed(&netdev_flow
->stats
.byte_count
, &n
);
2076 atomic_read_relaxed(&netdev_flow
->stats
.used
, &used
);
2078 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
2079 stats
->tcp_flags
= flags
;
2082 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
2083 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
2084 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
2087 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
2088 struct ofpbuf
*key_buf
, struct ofpbuf
*mask_buf
,
2089 struct dpif_flow
*flow
, bool terse
)
2092 memset(flow
, 0, sizeof *flow
);
2094 struct flow_wildcards wc
;
2095 struct dp_netdev_actions
*actions
;
2097 struct odp_flow_key_parms odp_parms
= {
2098 .flow
= &netdev_flow
->flow
,
2100 .support
= dp_netdev_support
,
2103 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
2104 /* in_port is exact matched, but we have left it out from the mask for
2105 * optimnization reasons. Add in_port back to the mask. */
2106 wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
2109 offset
= key_buf
->size
;
2110 flow
->key
= ofpbuf_tail(key_buf
);
2111 odp_flow_key_from_flow(&odp_parms
, key_buf
);
2112 flow
->key_len
= key_buf
->size
- offset
;
2115 offset
= mask_buf
->size
;
2116 flow
->mask
= ofpbuf_tail(mask_buf
);
2117 odp_parms
.key_buf
= key_buf
;
2118 odp_flow_key_from_mask(&odp_parms
, mask_buf
);
2119 flow
->mask_len
= mask_buf
->size
- offset
;
2122 actions
= dp_netdev_flow_get_actions(netdev_flow
);
2123 flow
->actions
= actions
->actions
;
2124 flow
->actions_len
= actions
->size
;
2127 flow
->ufid
= netdev_flow
->ufid
;
2128 flow
->ufid_present
= true;
2129 flow
->pmd_id
= netdev_flow
->pmd_id
;
2130 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
2134 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
2135 const struct nlattr
*mask_key
,
2136 uint32_t mask_key_len
, const struct flow
*flow
,
2137 struct flow_wildcards
*wc
)
2139 enum odp_key_fitness fitness
;
2141 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, wc
, flow
);
2143 /* This should not happen: it indicates that
2144 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
2145 * disagree on the acceptable form of a mask. Log the problem
2146 * as an error, with enough details to enable debugging. */
2147 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2149 if (!VLOG_DROP_ERR(&rl
)) {
2153 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
2155 VLOG_ERR("internal error parsing flow mask %s (%s)",
2156 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
2167 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
2172 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
2173 /* This should not happen: it indicates that odp_flow_key_from_flow()
2174 * and odp_flow_key_to_flow() disagree on the acceptable form of a
2175 * flow. Log the problem as an error, with enough details to enable
2177 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2179 if (!VLOG_DROP_ERR(&rl
)) {
2183 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
2184 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
2191 in_port
= flow
->in_port
.odp_port
;
2192 if (!is_valid_port_number(in_port
) && in_port
!= ODPP_NONE
) {
2196 if (flow
->ct_state
& DP_NETDEV_CS_UNSUPPORTED_MASK
) {
2204 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
2206 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2207 struct dp_netdev_flow
*netdev_flow
;
2208 struct dp_netdev_pmd_thread
*pmd
;
2209 struct hmapx to_find
= HMAPX_INITIALIZER(&to_find
);
2210 struct hmapx_node
*node
;
2213 if (get
->pmd_id
== PMD_ID_NULL
) {
2214 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2215 if (dp_netdev_pmd_try_ref(pmd
) && !hmapx_add(&to_find
, pmd
)) {
2216 dp_netdev_pmd_unref(pmd
);
2220 pmd
= dp_netdev_get_pmd(dp
, get
->pmd_id
);
2224 hmapx_add(&to_find
, pmd
);
2227 if (!hmapx_count(&to_find
)) {
2231 HMAPX_FOR_EACH (node
, &to_find
) {
2232 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
2233 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, get
->ufid
, get
->key
,
2236 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->buffer
,
2245 HMAPX_FOR_EACH (node
, &to_find
) {
2246 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
2247 dp_netdev_pmd_unref(pmd
);
2250 hmapx_destroy(&to_find
);
2254 static struct dp_netdev_flow
*
2255 dp_netdev_flow_add(struct dp_netdev_pmd_thread
*pmd
,
2256 struct match
*match
, const ovs_u128
*ufid
,
2257 const struct nlattr
*actions
, size_t actions_len
)
2258 OVS_REQUIRES(pmd
->flow_mutex
)
2260 struct dp_netdev_flow
*flow
;
2261 struct netdev_flow_key mask
;
2264 /* Make sure in_port is exact matched before we read it. */
2265 ovs_assert(match
->wc
.masks
.in_port
.odp_port
== ODPP_NONE
);
2266 odp_port_t in_port
= match
->flow
.in_port
.odp_port
;
2268 /* As we select the dpcls based on the port number, each netdev flow
2269 * belonging to the same dpcls will have the same odp_port value.
2270 * For performance reasons we wildcard odp_port here in the mask. In the
2271 * typical case dp_hash is also wildcarded, and the resulting 8-byte
2272 * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and
2273 * will not be part of the subtable mask.
2274 * This will speed up the hash computation during dpcls_lookup() because
2275 * there is one less call to hash_add64() in this case. */
2276 match
->wc
.masks
.in_port
.odp_port
= 0;
2277 netdev_flow_mask_init(&mask
, match
);
2278 match
->wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
2280 /* Make sure wc does not have metadata. */
2281 ovs_assert(!FLOWMAP_HAS_FIELD(&mask
.mf
.map
, metadata
)
2282 && !FLOWMAP_HAS_FIELD(&mask
.mf
.map
, regs
));
2284 /* Do not allocate extra space. */
2285 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
2286 memset(&flow
->stats
, 0, sizeof flow
->stats
);
2289 *CONST_CAST(unsigned *, &flow
->pmd_id
) = pmd
->core_id
;
2290 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
2291 *CONST_CAST(ovs_u128
*, &flow
->ufid
) = *ufid
;
2292 ovs_refcount_init(&flow
->ref_cnt
);
2293 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
2295 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
2297 /* Select dpcls for in_port. Relies on in_port to be exact match. */
2298 cls
= dp_netdev_pmd_find_dpcls(pmd
, in_port
);
2299 dpcls_insert(cls
, &flow
->cr
, &mask
);
2301 cmap_insert(&pmd
->flow_table
, CONST_CAST(struct cmap_node
*, &flow
->node
),
2302 dp_netdev_flow_hash(&flow
->ufid
));
2304 if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
2305 struct ds ds
= DS_EMPTY_INITIALIZER
;
2306 struct ofpbuf key_buf
, mask_buf
;
2307 struct odp_flow_key_parms odp_parms
= {
2308 .flow
= &match
->flow
,
2309 .mask
= &match
->wc
.masks
,
2310 .support
= dp_netdev_support
,
2313 ofpbuf_init(&key_buf
, 0);
2314 ofpbuf_init(&mask_buf
, 0);
2316 odp_flow_key_from_flow(&odp_parms
, &key_buf
);
2317 odp_parms
.key_buf
= &key_buf
;
2318 odp_flow_key_from_mask(&odp_parms
, &mask_buf
);
2320 ds_put_cstr(&ds
, "flow_add: ");
2321 odp_format_ufid(ufid
, &ds
);
2322 ds_put_cstr(&ds
, " ");
2323 odp_flow_format(key_buf
.data
, key_buf
.size
,
2324 mask_buf
.data
, mask_buf
.size
,
2326 ds_put_cstr(&ds
, ", actions:");
2327 format_odp_actions(&ds
, actions
, actions_len
);
2329 VLOG_DBG_RL(&upcall_rl
, "%s", ds_cstr(&ds
));
2331 ofpbuf_uninit(&key_buf
);
2332 ofpbuf_uninit(&mask_buf
);
2340 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
2342 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2343 struct dp_netdev_flow
*netdev_flow
;
2344 struct netdev_flow_key key
;
2345 struct dp_netdev_pmd_thread
*pmd
;
2348 unsigned pmd_id
= put
->pmd_id
== PMD_ID_NULL
2349 ? NON_PMD_CORE_ID
: put
->pmd_id
;
2352 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
);
2356 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
2357 put
->mask
, put
->mask_len
,
2358 &match
.flow
, &match
.wc
);
2363 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2368 /* Must produce a netdev_flow_key for lookup.
2369 * This interface is no longer performance critical, since it is not used
2370 * for upcall processing any more. */
2371 netdev_flow_key_from_flow(&key
, &match
.flow
);
2376 dpif_flow_hash(dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
2379 ovs_mutex_lock(&pmd
->flow_mutex
);
2380 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &key
, NULL
);
2382 if (put
->flags
& DPIF_FP_CREATE
) {
2383 if (cmap_count(&pmd
->flow_table
) < MAX_FLOWS
) {
2385 memset(put
->stats
, 0, sizeof *put
->stats
);
2387 dp_netdev_flow_add(pmd
, &match
, &ufid
, put
->actions
,
2397 if (put
->flags
& DPIF_FP_MODIFY
2398 && flow_equal(&match
.flow
, &netdev_flow
->flow
)) {
2399 struct dp_netdev_actions
*new_actions
;
2400 struct dp_netdev_actions
*old_actions
;
2402 new_actions
= dp_netdev_actions_create(put
->actions
,
2405 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
2406 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
2409 get_dpif_flow_stats(netdev_flow
, put
->stats
);
2411 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
2412 /* XXX: The userspace datapath uses thread local statistics
2413 * (for flows), which should be updated only by the owning
2414 * thread. Since we cannot write on stats memory here,
2415 * we choose not to support this flag. Please note:
2416 * - This feature is currently used only by dpctl commands with
2418 * - Should the need arise, this operation can be implemented
2419 * by keeping a base value (to be update here) for each
2420 * counter, and subtracting it before outputting the stats */
2424 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
2425 } else if (put
->flags
& DPIF_FP_CREATE
) {
2428 /* Overlapping flow. */
2432 ovs_mutex_unlock(&pmd
->flow_mutex
);
2433 dp_netdev_pmd_unref(pmd
);
2439 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
2441 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2442 struct dp_netdev_flow
*netdev_flow
;
2443 struct dp_netdev_pmd_thread
*pmd
;
2444 unsigned pmd_id
= del
->pmd_id
== PMD_ID_NULL
2445 ? NON_PMD_CORE_ID
: del
->pmd_id
;
2448 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2453 ovs_mutex_lock(&pmd
->flow_mutex
);
2454 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, del
->ufid
, del
->key
,
2458 get_dpif_flow_stats(netdev_flow
, del
->stats
);
2460 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
2464 ovs_mutex_unlock(&pmd
->flow_mutex
);
2465 dp_netdev_pmd_unref(pmd
);
2470 struct dpif_netdev_flow_dump
{
2471 struct dpif_flow_dump up
;
2472 struct cmap_position poll_thread_pos
;
2473 struct cmap_position flow_pos
;
2474 struct dp_netdev_pmd_thread
*cur_pmd
;
2476 struct ovs_mutex mutex
;
2479 static struct dpif_netdev_flow_dump
*
2480 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
2482 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
2485 static struct dpif_flow_dump
*
2486 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
, bool terse
)
2488 struct dpif_netdev_flow_dump
*dump
;
2490 dump
= xzalloc(sizeof *dump
);
2491 dpif_flow_dump_init(&dump
->up
, dpif_
);
2492 dump
->up
.terse
= terse
;
2493 ovs_mutex_init(&dump
->mutex
);
2499 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
2501 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2503 ovs_mutex_destroy(&dump
->mutex
);
2508 struct dpif_netdev_flow_dump_thread
{
2509 struct dpif_flow_dump_thread up
;
2510 struct dpif_netdev_flow_dump
*dump
;
2511 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
2512 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
2515 static struct dpif_netdev_flow_dump_thread
*
2516 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
2518 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
2521 static struct dpif_flow_dump_thread
*
2522 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
2524 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2525 struct dpif_netdev_flow_dump_thread
*thread
;
2527 thread
= xmalloc(sizeof *thread
);
2528 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
2529 thread
->dump
= dump
;
2534 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
2536 struct dpif_netdev_flow_dump_thread
*thread
2537 = dpif_netdev_flow_dump_thread_cast(thread_
);
2543 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
2544 struct dpif_flow
*flows
, int max_flows
)
2546 struct dpif_netdev_flow_dump_thread
*thread
2547 = dpif_netdev_flow_dump_thread_cast(thread_
);
2548 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
2549 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
2553 ovs_mutex_lock(&dump
->mutex
);
2554 if (!dump
->status
) {
2555 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
2556 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
2557 struct dp_netdev_pmd_thread
*pmd
= dump
->cur_pmd
;
2558 int flow_limit
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
2560 /* First call to dump_next(), extracts the first pmd thread.
2561 * If there is no pmd thread, returns immediately. */
2563 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2565 ovs_mutex_unlock(&dump
->mutex
);
2572 for (n_flows
= 0; n_flows
< flow_limit
; n_flows
++) {
2573 struct cmap_node
*node
;
2575 node
= cmap_next_position(&pmd
->flow_table
, &dump
->flow_pos
);
2579 netdev_flows
[n_flows
] = CONTAINER_OF(node
,
2580 struct dp_netdev_flow
,
2583 /* When finishing dumping the current pmd thread, moves to
2585 if (n_flows
< flow_limit
) {
2586 memset(&dump
->flow_pos
, 0, sizeof dump
->flow_pos
);
2587 dp_netdev_pmd_unref(pmd
);
2588 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2594 /* Keeps the reference to next caller. */
2595 dump
->cur_pmd
= pmd
;
2597 /* If the current dump is empty, do not exit the loop, since the
2598 * remaining pmds could have flows to be dumped. Just dumps again
2599 * on the new 'pmd'. */
2602 ovs_mutex_unlock(&dump
->mutex
);
2604 for (i
= 0; i
< n_flows
; i
++) {
2605 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
2606 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
2607 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
2608 struct dpif_flow
*f
= &flows
[i
];
2609 struct ofpbuf key
, mask
;
2611 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
2612 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
2613 dp_netdev_flow_to_dpif_flow(netdev_flow
, &key
, &mask
, f
,
2621 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
2622 OVS_NO_THREAD_SAFETY_ANALYSIS
2624 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2625 struct dp_netdev_pmd_thread
*pmd
;
2626 struct dp_packet_batch pp
;
2628 if (dp_packet_size(execute
->packet
) < ETH_HEADER_LEN
||
2629 dp_packet_size(execute
->packet
) > UINT16_MAX
) {
2633 /* Tries finding the 'pmd'. If NULL is returned, that means
2634 * the current thread is a non-pmd thread and should use
2635 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
2636 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
2638 pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
2644 /* If the current thread is non-pmd thread, acquires
2645 * the 'non_pmd_mutex'. */
2646 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2647 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2650 /* The action processing expects the RSS hash to be valid, because
2651 * it's always initialized at the beginning of datapath processing.
2652 * In this case, though, 'execute->packet' may not have gone through
2653 * the datapath at all, it may have been generated by the upper layer
2654 * (OpenFlow packet-out, BFD frame, ...). */
2655 if (!dp_packet_rss_valid(execute
->packet
)) {
2656 dp_packet_set_rss_hash(execute
->packet
,
2657 flow_hash_5tuple(execute
->flow
, 0));
2660 packet_batch_init_packet(&pp
, execute
->packet
);
2661 dp_netdev_execute_actions(pmd
, &pp
, false, execute
->flow
,
2662 execute
->actions
, execute
->actions_len
,
2665 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2666 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2667 dp_netdev_pmd_unref(pmd
);
2674 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
2678 for (i
= 0; i
< n_ops
; i
++) {
2679 struct dpif_op
*op
= ops
[i
];
2682 case DPIF_OP_FLOW_PUT
:
2683 op
->error
= dpif_netdev_flow_put(dpif
, &op
->u
.flow_put
);
2686 case DPIF_OP_FLOW_DEL
:
2687 op
->error
= dpif_netdev_flow_del(dpif
, &op
->u
.flow_del
);
2690 case DPIF_OP_EXECUTE
:
2691 op
->error
= dpif_netdev_execute(dpif
, &op
->u
.execute
);
2694 case DPIF_OP_FLOW_GET
:
2695 op
->error
= dpif_netdev_flow_get(dpif
, &op
->u
.flow_get
);
2701 /* Changes the number or the affinity of pmd threads. The changes are actually
2702 * applied in dpif_netdev_run(). */
2704 dpif_netdev_pmd_set(struct dpif
*dpif
, const char *cmask
)
2706 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2708 if (!nullable_string_is_equal(dp
->pmd_cmask
, cmask
)) {
2709 free(dp
->pmd_cmask
);
2710 dp
->pmd_cmask
= nullable_xstrdup(cmask
);
2711 dp_netdev_request_reconfigure(dp
);
2717 /* Parses affinity list and returns result in 'core_ids'. */
2719 parse_affinity_list(const char *affinity_list
, unsigned *core_ids
, int n_rxq
)
2722 char *list
, *copy
, *key
, *value
;
2725 for (i
= 0; i
< n_rxq
; i
++) {
2726 core_ids
[i
] = OVS_CORE_UNSPEC
;
2729 if (!affinity_list
) {
2733 list
= copy
= xstrdup(affinity_list
);
2735 while (ofputil_parse_key_value(&list
, &key
, &value
)) {
2736 int rxq_id
, core_id
;
2738 if (!str_to_int(key
, 0, &rxq_id
) || rxq_id
< 0
2739 || !str_to_int(value
, 0, &core_id
) || core_id
< 0) {
2744 if (rxq_id
< n_rxq
) {
2745 core_ids
[rxq_id
] = core_id
;
2753 /* Parses 'affinity_list' and applies configuration if it is valid. */
2755 dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port
*port
,
2756 const char *affinity_list
)
2758 unsigned *core_ids
, i
;
2761 core_ids
= xmalloc(port
->n_rxq
* sizeof *core_ids
);
2762 if (parse_affinity_list(affinity_list
, core_ids
, port
->n_rxq
)) {
2767 for (i
= 0; i
< port
->n_rxq
; i
++) {
2768 port
->rxqs
[i
].core_id
= core_ids
[i
];
2776 /* Changes the affinity of port's rx queues. The changes are actually applied
2777 * in dpif_netdev_run(). */
2779 dpif_netdev_port_set_config(struct dpif
*dpif
, odp_port_t port_no
,
2780 const struct smap
*cfg
)
2782 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2783 struct dp_netdev_port
*port
;
2785 const char *affinity_list
= smap_get(cfg
, "pmd-rxq-affinity");
2787 ovs_mutex_lock(&dp
->port_mutex
);
2788 error
= get_port_by_number(dp
, port_no
, &port
);
2789 if (error
|| !netdev_is_pmd(port
->netdev
)
2790 || nullable_string_is_equal(affinity_list
, port
->rxq_affinity_list
)) {
2794 error
= dpif_netdev_port_set_rxq_affinity(port
, affinity_list
);
2798 free(port
->rxq_affinity_list
);
2799 port
->rxq_affinity_list
= nullable_xstrdup(affinity_list
);
2801 dp_netdev_request_reconfigure(dp
);
2803 ovs_mutex_unlock(&dp
->port_mutex
);
2808 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2809 uint32_t queue_id
, uint32_t *priority
)
2811 *priority
= queue_id
;
2816 /* Creates and returns a new 'struct dp_netdev_actions', whose actions are
2817 * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */
2818 struct dp_netdev_actions
*
2819 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
2821 struct dp_netdev_actions
*netdev_actions
;
2823 netdev_actions
= xmalloc(sizeof *netdev_actions
+ size
);
2824 memcpy(netdev_actions
->actions
, actions
, size
);
2825 netdev_actions
->size
= size
;
2827 return netdev_actions
;
2830 struct dp_netdev_actions
*
2831 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
2833 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
2837 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
2842 static inline unsigned long long
2843 cycles_counter(void)
2846 return rte_get_tsc_cycles();
2852 /* Fake mutex to make sure that the calls to cycles_count_* are balanced */
2853 extern struct ovs_mutex cycles_counter_fake_mutex
;
2855 /* Start counting cycles. Must be followed by 'cycles_count_end()' */
2857 cycles_count_start(struct dp_netdev_pmd_thread
*pmd
)
2858 OVS_ACQUIRES(&cycles_counter_fake_mutex
)
2859 OVS_NO_THREAD_SAFETY_ANALYSIS
2861 pmd
->last_cycles
= cycles_counter();
2864 /* Stop counting cycles and add them to the counter 'type' */
2866 cycles_count_end(struct dp_netdev_pmd_thread
*pmd
,
2867 enum pmd_cycles_counter_type type
)
2868 OVS_RELEASES(&cycles_counter_fake_mutex
)
2869 OVS_NO_THREAD_SAFETY_ANALYSIS
2871 unsigned long long interval
= cycles_counter() - pmd
->last_cycles
;
2873 non_atomic_ullong_add(&pmd
->cycles
.n
[type
], interval
);
2877 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
2878 struct dp_netdev_port
*port
,
2879 struct netdev_rxq
*rxq
)
2881 struct dp_packet_batch batch
;
2884 dp_packet_batch_init(&batch
);
2885 cycles_count_start(pmd
);
2886 error
= netdev_rxq_recv(rxq
, &batch
);
2887 cycles_count_end(pmd
, PMD_CYCLES_POLLING
);
2889 *recirc_depth_get() = 0;
2891 cycles_count_start(pmd
);
2892 dp_netdev_input(pmd
, &batch
, port
->port_no
);
2893 cycles_count_end(pmd
, PMD_CYCLES_PROCESSING
);
2894 } else if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
2895 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2897 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
2898 netdev_get_name(port
->netdev
), ovs_strerror(error
));
2903 port_reconfigure(struct dp_netdev_port
*port
)
2905 struct netdev
*netdev
= port
->netdev
;
2908 if (!netdev_is_reconf_required(netdev
)) {
2912 /* Closes the existing 'rxq's. */
2913 for (i
= 0; i
< port
->n_rxq
; i
++) {
2914 netdev_rxq_close(port
->rxqs
[i
].rxq
);
2915 port
->rxqs
[i
].rxq
= NULL
;
2919 /* Allows 'netdev' to apply the pending configuration changes. */
2920 err
= netdev_reconfigure(netdev
);
2921 if (err
&& (err
!= EOPNOTSUPP
)) {
2922 VLOG_ERR("Failed to set interface %s new configuration",
2923 netdev_get_name(netdev
));
2926 /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */
2927 port
->rxqs
= xrealloc(port
->rxqs
,
2928 sizeof *port
->rxqs
* netdev_n_rxq(netdev
));
2929 /* Realloc 'used' counters for tx queues. */
2930 free(port
->txq_used
);
2931 port
->txq_used
= xcalloc(netdev_n_txq(netdev
), sizeof *port
->txq_used
);
2933 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
2934 err
= netdev_rxq_open(netdev
, &port
->rxqs
[i
].rxq
, i
);
2941 /* Parse affinity list to apply configuration for new queues. */
2942 dpif_netdev_port_set_rxq_affinity(port
, port
->rxq_affinity_list
);
2948 reconfigure_pmd_threads(struct dp_netdev
*dp
)
2949 OVS_REQUIRES(dp
->port_mutex
)
2951 struct dp_netdev_port
*port
, *next
;
2954 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
2956 dp_netdev_destroy_all_pmds(dp
);
2958 /* Reconfigures the cpu mask. */
2959 ovs_numa_set_cpu_mask(dp
->pmd_cmask
);
2961 n_cores
= ovs_numa_get_n_cores();
2962 if (n_cores
== OVS_CORE_UNSPEC
) {
2963 VLOG_ERR("Cannot get cpu core info");
2967 HMAP_FOR_EACH_SAFE (port
, next
, node
, &dp
->ports
) {
2970 err
= port_reconfigure(port
);
2972 hmap_remove(&dp
->ports
, &port
->node
);
2973 seq_change(dp
->port_seq
);
2976 port
->dynamic_txqs
= netdev_n_txq(port
->netdev
) < n_cores
+ 1;
2979 /* Restores the non-pmd. */
2980 dp_netdev_set_nonpmd(dp
);
2981 /* Restores all pmd threads. */
2982 dp_netdev_reset_pmd_threads(dp
);
2985 /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
2987 ports_require_restart(const struct dp_netdev
*dp
)
2988 OVS_REQUIRES(dp
->port_mutex
)
2990 struct dp_netdev_port
*port
;
2992 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2993 if (netdev_is_reconf_required(port
->netdev
)) {
3001 /* Return true if needs to revalidate datapath flows. */
3003 dpif_netdev_run(struct dpif
*dpif
)
3005 struct dp_netdev_port
*port
;
3006 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3007 struct dp_netdev_pmd_thread
*non_pmd
;
3008 uint64_t new_tnl_seq
;
3010 ovs_mutex_lock(&dp
->port_mutex
);
3011 non_pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
3013 ovs_mutex_lock(&dp
->non_pmd_mutex
);
3014 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3015 if (!netdev_is_pmd(port
->netdev
)) {
3018 for (i
= 0; i
< port
->n_rxq
; i
++) {
3019 dp_netdev_process_rxq_port(non_pmd
, port
,
3024 dpif_netdev_xps_revalidate_pmd(non_pmd
, time_msec(), false);
3025 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
3027 dp_netdev_pmd_unref(non_pmd
);
3030 if (dp_netdev_is_reconf_required(dp
) || ports_require_restart(dp
)) {
3031 reconfigure_pmd_threads(dp
);
3033 ovs_mutex_unlock(&dp
->port_mutex
);
3035 tnl_neigh_cache_run();
3037 new_tnl_seq
= seq_read(tnl_conf_seq
);
3039 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
3040 dp
->last_tnl_conf_seq
= new_tnl_seq
;
3047 dpif_netdev_wait(struct dpif
*dpif
)
3049 struct dp_netdev_port
*port
;
3050 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3052 ovs_mutex_lock(&dp_netdev_mutex
);
3053 ovs_mutex_lock(&dp
->port_mutex
);
3054 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3055 netdev_wait_reconf_required(port
->netdev
);
3056 if (!netdev_is_pmd(port
->netdev
)) {
3059 for (i
= 0; i
< port
->n_rxq
; i
++) {
3060 netdev_rxq_wait(port
->rxqs
[i
].rxq
);
3064 ovs_mutex_unlock(&dp
->port_mutex
);
3065 ovs_mutex_unlock(&dp_netdev_mutex
);
3066 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
3070 pmd_free_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
3072 struct tx_port
*tx_port_cached
;
3074 /* Free all used tx queue ids. */
3075 dpif_netdev_xps_revalidate_pmd(pmd
, 0, true);
3077 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->port_cache
) {
3078 free(tx_port_cached
);
3082 /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to
3083 * 'pmd->port_cache' (thread local) */
3085 pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
3086 OVS_REQUIRES(pmd
->port_mutex
)
3088 struct tx_port
*tx_port
, *tx_port_cached
;
3090 pmd_free_cached_ports(pmd
);
3091 hmap_shrink(&pmd
->port_cache
);
3093 HMAP_FOR_EACH (tx_port
, node
, &pmd
->tx_ports
) {
3094 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
3095 hmap_insert(&pmd
->port_cache
, &tx_port_cached
->node
,
3096 hash_port_no(tx_port_cached
->port
->port_no
));
3101 pmd_load_queues_and_ports(struct dp_netdev_pmd_thread
*pmd
,
3102 struct rxq_poll
**ppoll_list
)
3104 struct rxq_poll
*poll_list
= *ppoll_list
;
3105 struct rxq_poll
*poll
;
3108 ovs_mutex_lock(&pmd
->port_mutex
);
3109 poll_list
= xrealloc(poll_list
, pmd
->poll_cnt
* sizeof *poll_list
);
3112 LIST_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
3113 poll_list
[i
++] = *poll
;
3116 pmd_load_cached_ports(pmd
);
3118 ovs_mutex_unlock(&pmd
->port_mutex
);
3120 *ppoll_list
= poll_list
;
3125 pmd_thread_main(void *f_
)
3127 struct dp_netdev_pmd_thread
*pmd
= f_
;
3128 unsigned int lc
= 0;
3129 struct rxq_poll
*poll_list
;
3130 unsigned int port_seq
= PMD_INITIAL_SEQ
;
3137 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
3138 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
3139 ovs_numa_thread_setaffinity_core(pmd
->core_id
);
3140 dpdk_set_lcore_id(pmd
->core_id
);
3141 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
3143 emc_cache_init(&pmd
->flow_cache
);
3145 /* List port/core affinity */
3146 for (i
= 0; i
< poll_cnt
; i
++) {
3147 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
3148 pmd
->core_id
, netdev_get_name(poll_list
[i
].port
->netdev
),
3149 netdev_rxq_get_queue_id(poll_list
[i
].rx
));
3153 for (i
= 0; i
< poll_cnt
; i
++) {
3154 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].port
, poll_list
[i
].rx
);
3162 coverage_try_clear();
3163 dp_netdev_pmd_try_optimize(pmd
);
3164 if (!ovsrcu_try_quiesce()) {
3165 emc_cache_slow_sweep(&pmd
->flow_cache
);
3168 atomic_read_relaxed(&pmd
->change_seq
, &seq
);
3169 if (seq
!= port_seq
) {
3176 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
3177 exiting
= latch_is_set(&pmd
->exit_latch
);
3178 /* Signal here to make sure the pmd finishes
3179 * reloading the updated configuration. */
3180 dp_netdev_pmd_reload_done(pmd
);
3182 emc_cache_uninit(&pmd
->flow_cache
);
3189 pmd_free_cached_ports(pmd
);
3194 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
3195 OVS_ACQUIRES(dp
->upcall_rwlock
)
3197 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
3201 dpif_netdev_disable_upcall(struct dpif
*dpif
)
3202 OVS_NO_THREAD_SAFETY_ANALYSIS
3204 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3205 dp_netdev_disable_upcall(dp
);
3209 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
3210 OVS_RELEASES(dp
->upcall_rwlock
)
3212 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3216 dpif_netdev_enable_upcall(struct dpif
*dpif
)
3217 OVS_NO_THREAD_SAFETY_ANALYSIS
3219 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3220 dp_netdev_enable_upcall(dp
);
3224 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
3226 ovs_mutex_lock(&pmd
->cond_mutex
);
3227 xpthread_cond_signal(&pmd
->cond
);
3228 ovs_mutex_unlock(&pmd
->cond_mutex
);
3231 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
3232 * the pointer if succeeds, otherwise, NULL (it can return NULL even if
3233 * 'core_id' is NON_PMD_CORE_ID).
3235 * Caller must unrefs the returned reference. */
3236 static struct dp_netdev_pmd_thread
*
3237 dp_netdev_get_pmd(struct dp_netdev
*dp
, unsigned core_id
)
3239 struct dp_netdev_pmd_thread
*pmd
;
3240 const struct cmap_node
*pnode
;
3242 pnode
= cmap_find(&dp
->poll_threads
, hash_int(core_id
, 0));
3246 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
3248 return dp_netdev_pmd_try_ref(pmd
) ? pmd
: NULL
;
3251 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
3253 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
3254 OVS_REQUIRES(dp
->port_mutex
)
3256 struct dp_netdev_pmd_thread
*non_pmd
;
3257 struct dp_netdev_port
*port
;
3259 non_pmd
= xzalloc(sizeof *non_pmd
);
3260 dp_netdev_configure_pmd(non_pmd
, dp
, NON_PMD_CORE_ID
, OVS_NUMA_UNSPEC
);
3262 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3263 dp_netdev_add_port_tx_to_pmd(non_pmd
, port
);
3266 dp_netdev_reload_pmd__(non_pmd
);
3269 /* Caller must have valid pointer to 'pmd'. */
3271 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
)
3273 return ovs_refcount_try_ref_rcu(&pmd
->ref_cnt
);
3277 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
)
3279 if (pmd
&& ovs_refcount_unref(&pmd
->ref_cnt
) == 1) {
3280 ovsrcu_postpone(dp_netdev_destroy_pmd
, pmd
);
3284 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
3285 * fails, keeps checking for next node until reaching the end of cmap.
3287 * Caller must unrefs the returned reference. */
3288 static struct dp_netdev_pmd_thread
*
3289 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
)
3291 struct dp_netdev_pmd_thread
*next
;
3294 struct cmap_node
*node
;
3296 node
= cmap_next_position(&dp
->poll_threads
, pos
);
3297 next
= node
? CONTAINER_OF(node
, struct dp_netdev_pmd_thread
, node
)
3299 } while (next
&& !dp_netdev_pmd_try_ref(next
));
3304 /* Configures the 'pmd' based on the input argument. */
3306 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
3307 unsigned core_id
, int numa_id
)
3310 pmd
->core_id
= core_id
;
3311 pmd
->numa_id
= numa_id
;
3314 atomic_init(&pmd
->static_tx_qid
,
3315 (core_id
== NON_PMD_CORE_ID
)
3316 ? ovs_numa_get_n_cores()
3317 : get_n_pmd_threads(dp
));
3319 ovs_refcount_init(&pmd
->ref_cnt
);
3320 latch_init(&pmd
->exit_latch
);
3321 atomic_init(&pmd
->change_seq
, PMD_INITIAL_SEQ
);
3322 xpthread_cond_init(&pmd
->cond
, NULL
);
3323 ovs_mutex_init(&pmd
->cond_mutex
);
3324 ovs_mutex_init(&pmd
->flow_mutex
);
3325 ovs_mutex_init(&pmd
->port_mutex
);
3326 cmap_init(&pmd
->flow_table
);
3327 cmap_init(&pmd
->classifiers
);
3328 pmd
->next_optimization
= time_msec() + DPCLS_OPTIMIZATION_INTERVAL
;
3329 ovs_list_init(&pmd
->poll_list
);
3330 hmap_init(&pmd
->tx_ports
);
3331 hmap_init(&pmd
->port_cache
);
3332 /* init the 'flow_cache' since there is no
3333 * actual thread created for NON_PMD_CORE_ID. */
3334 if (core_id
== NON_PMD_CORE_ID
) {
3335 emc_cache_init(&pmd
->flow_cache
);
3337 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
3338 hash_int(core_id
, 0));
3342 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
)
3346 dp_netdev_pmd_flow_flush(pmd
);
3347 hmap_destroy(&pmd
->port_cache
);
3348 hmap_destroy(&pmd
->tx_ports
);
3349 /* All flows (including their dpcls_rules) have been deleted already */
3350 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
3352 ovsrcu_postpone(free
, cls
);
3354 cmap_destroy(&pmd
->classifiers
);
3355 cmap_destroy(&pmd
->flow_table
);
3356 ovs_mutex_destroy(&pmd
->flow_mutex
);
3357 latch_destroy(&pmd
->exit_latch
);
3358 xpthread_cond_destroy(&pmd
->cond
);
3359 ovs_mutex_destroy(&pmd
->cond_mutex
);
3360 ovs_mutex_destroy(&pmd
->port_mutex
);
3364 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
3365 * and unrefs the struct. */
3367 dp_netdev_del_pmd(struct dp_netdev
*dp
, struct dp_netdev_pmd_thread
*pmd
)
3369 /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize,
3370 * but extra cleanup is necessary */
3371 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3372 ovs_mutex_lock(&dp
->non_pmd_mutex
);
3373 emc_cache_uninit(&pmd
->flow_cache
);
3374 pmd_free_cached_ports(pmd
);
3375 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
3377 latch_set(&pmd
->exit_latch
);
3378 dp_netdev_reload_pmd__(pmd
);
3379 ovs_numa_unpin_core(pmd
->core_id
);
3380 xpthread_join(pmd
->thread
, NULL
);
3383 dp_netdev_pmd_clear_ports(pmd
);
3385 /* Purges the 'pmd''s flows after stopping the thread, but before
3386 * destroying the flows, so that the flow stats can be collected. */
3387 if (dp
->dp_purge_cb
) {
3388 dp
->dp_purge_cb(dp
->dp_purge_aux
, pmd
->core_id
);
3390 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
3391 dp_netdev_pmd_unref(pmd
);
3394 /* Destroys all pmd threads. */
3396 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
)
3398 struct dp_netdev_pmd_thread
*pmd
;
3399 struct dp_netdev_pmd_thread
**pmd_list
;
3400 size_t k
= 0, n_pmds
;
3402 n_pmds
= cmap_count(&dp
->poll_threads
);
3403 pmd_list
= xcalloc(n_pmds
, sizeof *pmd_list
);
3405 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3406 /* We cannot call dp_netdev_del_pmd(), since it alters
3407 * 'dp->poll_threads' (while we're iterating it) and it
3409 ovs_assert(k
< n_pmds
);
3410 pmd_list
[k
++] = pmd
;
3413 for (size_t i
= 0; i
< k
; i
++) {
3414 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
3419 /* Deletes all pmd threads on numa node 'numa_id' and
3420 * fixes static_tx_qids of other threads to keep them sequential. */
3422 dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
3424 struct dp_netdev_pmd_thread
*pmd
;
3425 int n_pmds_on_numa
, n_pmds
;
3426 int *free_idx
, k
= 0;
3427 struct dp_netdev_pmd_thread
**pmd_list
;
3429 n_pmds_on_numa
= get_n_pmd_threads_on_numa(dp
, numa_id
);
3430 free_idx
= xcalloc(n_pmds_on_numa
, sizeof *free_idx
);
3431 pmd_list
= xcalloc(n_pmds_on_numa
, sizeof *pmd_list
);
3433 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3434 /* We cannot call dp_netdev_del_pmd(), since it alters
3435 * 'dp->poll_threads' (while we're iterating it) and it
3437 if (pmd
->numa_id
== numa_id
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
3438 atomic_read_relaxed(&pmd
->static_tx_qid
, &free_idx
[k
]);
3440 ovs_assert(k
< n_pmds_on_numa
);
3445 for (int i
= 0; i
< k
; i
++) {
3446 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
3449 n_pmds
= get_n_pmd_threads(dp
);
3450 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3453 atomic_read_relaxed(&pmd
->static_tx_qid
, &old_tx_qid
);
3455 if (old_tx_qid
>= n_pmds
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
3456 int new_tx_qid
= free_idx
[--k
];
3458 atomic_store_relaxed(&pmd
->static_tx_qid
, new_tx_qid
);
3466 /* Deletes all rx queues from pmd->poll_list and all the ports from
3469 dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
)
3471 struct rxq_poll
*poll
;
3472 struct tx_port
*port
;
3474 ovs_mutex_lock(&pmd
->port_mutex
);
3475 LIST_FOR_EACH_POP (poll
, node
, &pmd
->poll_list
) {
3479 HMAP_FOR_EACH_POP (port
, node
, &pmd
->tx_ports
) {
3482 ovs_mutex_unlock(&pmd
->port_mutex
);
3485 static struct tx_port
*
3486 tx_port_lookup(const struct hmap
*hmap
, odp_port_t port_no
)
3490 HMAP_FOR_EACH_IN_BUCKET (tx
, node
, hash_port_no(port_no
), hmap
) {
3491 if (tx
->port
->port_no
== port_no
) {
3499 /* Deletes all rx queues of 'port' from 'poll_list', and the 'port' from
3500 * 'tx_ports' of 'pmd' thread. Returns true if 'port' was found in 'pmd'
3501 * (therefore a restart is required). */
3503 dp_netdev_del_port_from_pmd__(struct dp_netdev_port
*port
,
3504 struct dp_netdev_pmd_thread
*pmd
)
3506 struct rxq_poll
*poll
, *next
;
3510 ovs_mutex_lock(&pmd
->port_mutex
);
3511 LIST_FOR_EACH_SAFE (poll
, next
, node
, &pmd
->poll_list
) {
3512 if (poll
->port
== port
) {
3514 ovs_list_remove(&poll
->node
);
3520 tx
= tx_port_lookup(&pmd
->tx_ports
, port
->port_no
);
3522 hmap_remove(&pmd
->tx_ports
, &tx
->node
);
3526 ovs_mutex_unlock(&pmd
->port_mutex
);
3531 /* Deletes 'port' from the 'poll_list' and from the 'tx_ports' of all the pmd
3532 * threads. The pmd threads that need to be restarted are inserted in
3535 dp_netdev_del_port_from_all_pmds__(struct dp_netdev
*dp
,
3536 struct dp_netdev_port
*port
,
3537 struct hmapx
*to_reload
)
3539 struct dp_netdev_pmd_thread
*pmd
;
3541 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3544 found
= dp_netdev_del_port_from_pmd__(port
, pmd
);
3547 hmapx_add(to_reload
, pmd
);
3552 /* Deletes 'port' from the 'poll_list' and from the 'tx_ports' of all the pmd
3553 * threads. Reloads the threads if needed. */
3555 dp_netdev_del_port_from_all_pmds(struct dp_netdev
*dp
,
3556 struct dp_netdev_port
*port
)
3558 struct dp_netdev_pmd_thread
*pmd
;
3559 struct hmapx to_reload
= HMAPX_INITIALIZER(&to_reload
);
3560 struct hmapx_node
*node
;
3562 dp_netdev_del_port_from_all_pmds__(dp
, port
, &to_reload
);
3564 HMAPX_FOR_EACH (node
, &to_reload
) {
3565 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3566 dp_netdev_reload_pmd__(pmd
);
3569 hmapx_destroy(&to_reload
);
3573 /* Returns non-isolated PMD thread from this numa node with fewer
3574 * rx queues to poll. Returns NULL if there is no non-isolated PMD threads
3575 * on this numa node. Can be called safely only by main thread. */
3576 static struct dp_netdev_pmd_thread
*
3577 dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev
*dp
, int numa_id
)
3580 struct dp_netdev_pmd_thread
*pmd
, *res
= NULL
;
3582 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3583 if (!pmd
->isolated
&& pmd
->numa_id
== numa_id
3584 && (min_cnt
> pmd
->poll_cnt
|| res
== NULL
)) {
3585 min_cnt
= pmd
->poll_cnt
;
3593 /* Adds rx queue to poll_list of PMD thread. */
3595 dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
3596 struct dp_netdev_port
*port
, struct netdev_rxq
*rx
)
3597 OVS_REQUIRES(pmd
->port_mutex
)
3599 struct rxq_poll
*poll
= xmalloc(sizeof *poll
);
3604 ovs_list_push_back(&pmd
->poll_list
, &poll
->node
);
3608 /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the
3609 * changes to take effect. */
3611 dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
3612 struct dp_netdev_port
*port
)
3614 struct tx_port
*tx
= xzalloc(sizeof *tx
);
3619 ovs_mutex_lock(&pmd
->port_mutex
);
3620 hmap_insert(&pmd
->tx_ports
, &tx
->node
, hash_port_no(tx
->port
->port_no
));
3621 ovs_mutex_unlock(&pmd
->port_mutex
);
3624 /* Distribute all {pinned|non-pinned} rx queues of 'port' between PMD
3625 * threads in 'dp'. The pmd threads that need to be restarted are inserted
3626 * in 'to_reload'. PMD threads with pinned queues marked as isolated. */
3628 dp_netdev_add_port_rx_to_pmds(struct dp_netdev
*dp
,
3629 struct dp_netdev_port
*port
,
3630 struct hmapx
*to_reload
, bool pinned
)
3632 int numa_id
= netdev_get_numa_id(port
->netdev
);
3633 struct dp_netdev_pmd_thread
*pmd
;
3636 if (!netdev_is_pmd(port
->netdev
)) {
3640 for (i
= 0; i
< port
->n_rxq
; i
++) {
3642 if (port
->rxqs
[i
].core_id
== OVS_CORE_UNSPEC
) {
3645 pmd
= dp_netdev_get_pmd(dp
, port
->rxqs
[i
].core_id
);
3647 VLOG_WARN("There is no PMD thread on core %d. "
3648 "Queue %d on port \'%s\' will not be polled.",
3649 port
->rxqs
[i
].core_id
, i
,
3650 netdev_get_name(port
->netdev
));
3653 pmd
->isolated
= true;
3654 dp_netdev_pmd_unref(pmd
);
3656 if (port
->rxqs
[i
].core_id
!= OVS_CORE_UNSPEC
) {
3659 pmd
= dp_netdev_less_loaded_pmd_on_numa(dp
, numa_id
);
3661 VLOG_WARN("There's no available pmd thread on numa node %d",
3667 ovs_mutex_lock(&pmd
->port_mutex
);
3668 dp_netdev_add_rxq_to_pmd(pmd
, port
, port
->rxqs
[i
].rxq
);
3669 ovs_mutex_unlock(&pmd
->port_mutex
);
3671 hmapx_add(to_reload
, pmd
);
3675 /* Distributes all non-pinned rx queues of 'port' between all PMD threads
3676 * in 'dp' and inserts 'port' in the PMD threads 'tx_ports'. The pmd threads
3677 * that need to be restarted are inserted in 'to_reload'. */
3679 dp_netdev_add_port_to_pmds__(struct dp_netdev
*dp
, struct dp_netdev_port
*port
,
3680 struct hmapx
*to_reload
)
3682 struct dp_netdev_pmd_thread
*pmd
;
3684 dp_netdev_add_port_rx_to_pmds(dp
, port
, to_reload
, false);
3686 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3687 dp_netdev_add_port_tx_to_pmd(pmd
, port
);
3688 hmapx_add(to_reload
, pmd
);
3692 /* Distributes all non-pinned rx queues of 'port' between all PMD threads
3693 * in 'dp', inserts 'port' in the PMD threads 'tx_ports' and reloads them,
3696 dp_netdev_add_port_to_pmds(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
3698 struct dp_netdev_pmd_thread
*pmd
;
3699 struct hmapx to_reload
= HMAPX_INITIALIZER(&to_reload
);
3700 struct hmapx_node
*node
;
3702 dp_netdev_add_port_to_pmds__(dp
, port
, &to_reload
);
3704 HMAPX_FOR_EACH (node
, &to_reload
) {
3705 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3706 dp_netdev_reload_pmd__(pmd
);
3709 hmapx_destroy(&to_reload
);
3712 /* Starts pmd threads for the numa node 'numa_id', if not already started.
3713 * The function takes care of filling the threads tx port cache. */
3715 dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
3716 OVS_REQUIRES(dp
->port_mutex
)
3720 if (!ovs_numa_numa_id_is_valid(numa_id
)) {
3721 VLOG_WARN("Cannot create pmd threads due to numa id (%d) invalid",
3726 n_pmds
= get_n_pmd_threads_on_numa(dp
, numa_id
);
3728 /* If there are already pmd threads created for the numa node
3729 * in which 'netdev' is on, do nothing. Else, creates the
3730 * pmd threads for the numa node. */
3732 int can_have
, n_unpinned
, i
;
3734 n_unpinned
= ovs_numa_get_n_unpinned_cores_on_numa(numa_id
);
3736 VLOG_WARN("Cannot create pmd threads due to out of unpinned "
3737 "cores on numa node %d", numa_id
);
3741 /* If cpu mask is specified, uses all unpinned cores, otherwise
3742 * tries creating NR_PMD_THREADS pmd threads. */
3743 can_have
= dp
->pmd_cmask
? n_unpinned
: MIN(n_unpinned
, NR_PMD_THREADS
);
3744 for (i
= 0; i
< can_have
; i
++) {
3745 unsigned core_id
= ovs_numa_get_unpinned_core_on_numa(numa_id
);
3746 struct dp_netdev_pmd_thread
*pmd
= xzalloc(sizeof *pmd
);
3747 struct dp_netdev_port
*port
;
3749 dp_netdev_configure_pmd(pmd
, dp
, core_id
, numa_id
);
3751 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3752 dp_netdev_add_port_tx_to_pmd(pmd
, port
);
3755 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
3757 VLOG_INFO("Created %d pmd threads on numa node %d", can_have
, numa_id
);
3762 /* Called after pmd threads config change. Restarts pmd threads with
3763 * new configuration. */
3765 dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
3766 OVS_REQUIRES(dp
->port_mutex
)
3768 struct hmapx to_reload
= HMAPX_INITIALIZER(&to_reload
);
3769 struct dp_netdev_pmd_thread
*pmd
;
3770 struct dp_netdev_port
*port
;
3771 struct hmapx_node
*node
;
3773 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3774 if (netdev_is_pmd(port
->netdev
)) {
3775 struct hmapx numas
= HMAPX_INITIALIZER(&numas
);
3776 struct hmapx_node
*numa_node
;
3780 numa_id
= netdev_get_numa_id(port
->netdev
);
3781 hmapx_add(&numas
, (void *) numa_id
);
3782 for (i
= 0; i
< port
->n_rxq
; i
++) {
3783 unsigned core_id
= port
->rxqs
[i
].core_id
;
3785 if (core_id
!= OVS_CORE_UNSPEC
) {
3786 numa_id
= ovs_numa_get_numa_id(core_id
);
3787 hmapx_add(&numas
, (void *) numa_id
);
3791 HMAPX_FOR_EACH (numa_node
, &numas
) {
3792 dp_netdev_set_pmds_on_numa(dp
, (uintptr_t) numa_node
->data
);
3795 hmapx_destroy(&numas
);
3797 /* Distribute only pinned rx queues first to mark threads as isolated */
3798 dp_netdev_add_port_rx_to_pmds(dp
, port
, &to_reload
, true);
3801 /* Distribute remaining non-pinned rx queues to non-isolated PMD threads. */
3802 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3803 dp_netdev_add_port_rx_to_pmds(dp
, port
, &to_reload
, false);
3806 HMAPX_FOR_EACH (node
, &to_reload
) {
3807 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3808 dp_netdev_reload_pmd__(pmd
);
3811 hmapx_destroy(&to_reload
);
3815 dpif_netdev_get_datapath_version(void)
3817 return xstrdup("<built-in>");
3821 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
, int cnt
, int size
,
3822 uint16_t tcp_flags
, long long now
)
3826 atomic_store_relaxed(&netdev_flow
->stats
.used
, now
);
3827 non_atomic_ullong_add(&netdev_flow
->stats
.packet_count
, cnt
);
3828 non_atomic_ullong_add(&netdev_flow
->stats
.byte_count
, size
);
3829 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
3831 atomic_store_relaxed(&netdev_flow
->stats
.tcp_flags
, flags
);
3835 dp_netdev_count_packet(struct dp_netdev_pmd_thread
*pmd
,
3836 enum dp_stat_type type
, int cnt
)
3838 non_atomic_ullong_add(&pmd
->stats
.n
[type
], cnt
);
3842 dp_netdev_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet_
,
3843 struct flow
*flow
, struct flow_wildcards
*wc
, ovs_u128
*ufid
,
3844 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
3845 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
3847 struct dp_netdev
*dp
= pmd
->dp
;
3849 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
3853 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
3854 struct ds ds
= DS_EMPTY_INITIALIZER
;
3857 struct odp_flow_key_parms odp_parms
= {
3859 .mask
= wc
? &wc
->masks
: NULL
,
3860 .support
= dp_netdev_support
,
3863 ofpbuf_init(&key
, 0);
3864 odp_flow_key_from_flow(&odp_parms
, &key
);
3865 packet_str
= ofp_packet_to_string(dp_packet_data(packet_
),
3866 dp_packet_size(packet_
));
3868 odp_flow_key_format(key
.data
, key
.size
, &ds
);
3870 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
3871 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
3873 ofpbuf_uninit(&key
);
3879 return dp
->upcall_cb(packet_
, flow
, ufid
, pmd
->core_id
, type
, userdata
,
3880 actions
, wc
, put_actions
, dp
->upcall_aux
);
3883 static inline uint32_t
3884 dpif_netdev_packet_get_rss_hash(struct dp_packet
*packet
,
3885 const struct miniflow
*mf
)
3887 uint32_t hash
, recirc_depth
;
3889 if (OVS_LIKELY(dp_packet_rss_valid(packet
))) {
3890 hash
= dp_packet_get_rss_hash(packet
);
3892 hash
= miniflow_hash_5tuple(mf
, 0);
3893 dp_packet_set_rss_hash(packet
, hash
);
3896 /* The RSS hash must account for the recirculation depth to avoid
3897 * collisions in the exact match cache */
3898 recirc_depth
= *recirc_depth_get_unsafe();
3899 if (OVS_UNLIKELY(recirc_depth
)) {
3900 hash
= hash_finish(hash
, recirc_depth
);
3901 dp_packet_set_rss_hash(packet
, hash
);
3906 struct packet_batch_per_flow
{
3907 unsigned int byte_count
;
3909 struct dp_netdev_flow
*flow
;
3911 struct dp_packet_batch array
;
3915 packet_batch_per_flow_update(struct packet_batch_per_flow
*batch
,
3916 struct dp_packet
*packet
,
3917 const struct miniflow
*mf
)
3919 batch
->byte_count
+= dp_packet_size(packet
);
3920 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
3921 batch
->array
.packets
[batch
->array
.count
++] = packet
;
3925 packet_batch_per_flow_init(struct packet_batch_per_flow
*batch
,
3926 struct dp_netdev_flow
*flow
)
3928 flow
->batch
= batch
;
3931 dp_packet_batch_init(&batch
->array
);
3932 batch
->byte_count
= 0;
3933 batch
->tcp_flags
= 0;
3937 packet_batch_per_flow_execute(struct packet_batch_per_flow
*batch
,
3938 struct dp_netdev_pmd_thread
*pmd
,
3941 struct dp_netdev_actions
*actions
;
3942 struct dp_netdev_flow
*flow
= batch
->flow
;
3944 dp_netdev_flow_used(flow
, batch
->array
.count
, batch
->byte_count
,
3945 batch
->tcp_flags
, now
);
3947 actions
= dp_netdev_flow_get_actions(flow
);
3949 dp_netdev_execute_actions(pmd
, &batch
->array
, true, &flow
->flow
,
3950 actions
->actions
, actions
->size
, now
);
3954 dp_netdev_queue_batches(struct dp_packet
*pkt
,
3955 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
3956 struct packet_batch_per_flow
*batches
, size_t *n_batches
)
3958 struct packet_batch_per_flow
*batch
= flow
->batch
;
3960 if (OVS_UNLIKELY(!batch
)) {
3961 batch
= &batches
[(*n_batches
)++];
3962 packet_batch_per_flow_init(batch
, flow
);
3965 packet_batch_per_flow_update(batch
, pkt
, mf
);
3968 /* Try to process all ('cnt') the 'packets' using only the exact match cache
3969 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
3970 * miniflow is copied into 'keys' and the packet pointer is moved at the
3971 * beginning of the 'packets' array.
3973 * The function returns the number of packets that needs to be processed in the
3974 * 'packets' array (they have been moved to the beginning of the vector).
3976 * If 'md_is_valid' is false, the metadata in 'packets' is not valid and must be
3977 * initialized by this function using 'port_no'.
3979 static inline size_t
3980 emc_processing(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet_batch
*packets_
,
3981 struct netdev_flow_key
*keys
,
3982 struct packet_batch_per_flow batches
[], size_t *n_batches
,
3983 bool md_is_valid
, odp_port_t port_no
)
3985 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
3986 struct netdev_flow_key
*key
= &keys
[0];
3987 size_t i
, n_missed
= 0, n_dropped
= 0;
3988 struct dp_packet
**packets
= packets_
->packets
;
3989 int cnt
= packets_
->count
;
3991 for (i
= 0; i
< cnt
; i
++) {
3992 struct dp_netdev_flow
*flow
;
3993 struct dp_packet
*packet
= packets
[i
];
3995 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
3996 dp_packet_delete(packet
);
4002 /* Prefetch next packet data and metadata. */
4003 OVS_PREFETCH(dp_packet_data(packets
[i
+1]));
4004 pkt_metadata_prefetch_init(&packets
[i
+1]->md
);
4008 pkt_metadata_init(&packet
->md
, port_no
);
4010 miniflow_extract(packet
, &key
->mf
);
4011 key
->len
= 0; /* Not computed yet. */
4012 key
->hash
= dpif_netdev_packet_get_rss_hash(packet
, &key
->mf
);
4014 flow
= emc_lookup(flow_cache
, key
);
4015 if (OVS_LIKELY(flow
)) {
4016 dp_netdev_queue_batches(packet
, flow
, &key
->mf
, batches
,
4019 /* Exact match cache missed. Group missed packets together at
4020 * the beginning of the 'packets' array. */
4021 packets
[n_missed
] = packet
;
4022 /* 'key[n_missed]' contains the key of the current packet and it
4023 * must be returned to the caller. The next key should be extracted
4024 * to 'keys[n_missed + 1]'. */
4025 key
= &keys
[++n_missed
];
4029 dp_netdev_count_packet(pmd
, DP_STAT_EXACT_HIT
, cnt
- n_dropped
- n_missed
);
4035 handle_packet_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet
,
4036 const struct netdev_flow_key
*key
,
4037 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
,
4038 int *lost_cnt
, long long now
)
4040 struct ofpbuf
*add_actions
;
4041 struct dp_packet_batch b
;
4046 match
.tun_md
.valid
= false;
4047 miniflow_expand(&key
->mf
, &match
.flow
);
4049 ofpbuf_clear(actions
);
4050 ofpbuf_clear(put_actions
);
4052 dpif_flow_hash(pmd
->dp
->dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
4053 error
= dp_netdev_upcall(pmd
, packet
, &match
.flow
, &match
.wc
,
4054 &ufid
, DPIF_UC_MISS
, NULL
, actions
,
4056 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
4057 dp_packet_delete(packet
);
4062 /* The Netlink encoding of datapath flow keys cannot express
4063 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
4064 * tag is interpreted as exact match on the fact that there is no
4065 * VLAN. Unless we refactor a lot of code that translates between
4066 * Netlink and struct flow representations, we have to do the same
4068 if (!match
.wc
.masks
.vlan_tci
) {
4069 match
.wc
.masks
.vlan_tci
= htons(0xffff);
4072 /* We can't allow the packet batching in the next loop to execute
4073 * the actions. Otherwise, if there are any slow path actions,
4074 * we'll send the packet up twice. */
4075 packet_batch_init_packet(&b
, packet
);
4076 dp_netdev_execute_actions(pmd
, &b
, true, &match
.flow
,
4077 actions
->data
, actions
->size
, now
);
4079 add_actions
= put_actions
->size
? put_actions
: actions
;
4080 if (OVS_LIKELY(error
!= ENOSPC
)) {
4081 struct dp_netdev_flow
*netdev_flow
;
4083 /* XXX: There's a race window where a flow covering this packet
4084 * could have already been installed since we last did the flow
4085 * lookup before upcall. This could be solved by moving the
4086 * mutex lock outside the loop, but that's an awful long time
4087 * to be locking everyone out of making flow installs. If we
4088 * move to a per-core classifier, it would be reasonable. */
4089 ovs_mutex_lock(&pmd
->flow_mutex
);
4090 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
, NULL
);
4091 if (OVS_LIKELY(!netdev_flow
)) {
4092 netdev_flow
= dp_netdev_flow_add(pmd
, &match
, &ufid
,
4096 ovs_mutex_unlock(&pmd
->flow_mutex
);
4098 emc_insert(&pmd
->flow_cache
, key
, netdev_flow
);
4103 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
4104 struct dp_packet_batch
*packets_
,
4105 struct netdev_flow_key
*keys
,
4106 struct packet_batch_per_flow batches
[], size_t *n_batches
,
4110 int cnt
= packets_
->count
;
4111 #if !defined(__CHECKER__) && !defined(_WIN32)
4112 const size_t PKT_ARRAY_SIZE
= cnt
;
4114 /* Sparse or MSVC doesn't like variable length array. */
4115 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
4117 struct dp_packet
**packets
= packets_
->packets
;
4119 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
4120 struct dp_netdev
*dp
= pmd
->dp
;
4121 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
4122 int miss_cnt
= 0, lost_cnt
= 0;
4123 int lookup_cnt
= 0, add_lookup_cnt
;
4127 for (i
= 0; i
< cnt
; i
++) {
4128 /* Key length is needed in all the cases, hash computed on demand. */
4129 keys
[i
].len
= netdev_flow_key_size(miniflow_n_values(&keys
[i
].mf
));
4131 /* Get the classifier for the in_port */
4132 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
4133 if (OVS_LIKELY(cls
)) {
4134 any_miss
= !dpcls_lookup(cls
, keys
, rules
, cnt
, &lookup_cnt
);
4137 memset(rules
, 0, sizeof(rules
));
4139 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
4140 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
4141 struct ofpbuf actions
, put_actions
;
4143 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
4144 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
4146 for (i
= 0; i
< cnt
; i
++) {
4147 struct dp_netdev_flow
*netdev_flow
;
4149 if (OVS_LIKELY(rules
[i
])) {
4153 /* It's possible that an earlier slow path execution installed
4154 * a rule covering this flow. In this case, it's a lot cheaper
4155 * to catch it here than execute a miss. */
4156 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
],
4159 lookup_cnt
+= add_lookup_cnt
;
4160 rules
[i
] = &netdev_flow
->cr
;
4165 handle_packet_upcall(pmd
, packets
[i
], &keys
[i
], &actions
,
4166 &put_actions
, &lost_cnt
, now
);
4169 ofpbuf_uninit(&actions
);
4170 ofpbuf_uninit(&put_actions
);
4171 fat_rwlock_unlock(&dp
->upcall_rwlock
);
4172 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
4173 } else if (OVS_UNLIKELY(any_miss
)) {
4174 for (i
= 0; i
< cnt
; i
++) {
4175 if (OVS_UNLIKELY(!rules
[i
])) {
4176 dp_packet_delete(packets
[i
]);
4183 for (i
= 0; i
< cnt
; i
++) {
4184 struct dp_packet
*packet
= packets
[i
];
4185 struct dp_netdev_flow
*flow
;
4187 if (OVS_UNLIKELY(!rules
[i
])) {
4191 flow
= dp_netdev_flow_cast(rules
[i
]);
4193 emc_insert(flow_cache
, &keys
[i
], flow
);
4194 dp_netdev_queue_batches(packet
, flow
, &keys
[i
].mf
, batches
, n_batches
);
4197 dp_netdev_count_packet(pmd
, DP_STAT_MASKED_HIT
, cnt
- miss_cnt
);
4198 dp_netdev_count_packet(pmd
, DP_STAT_LOOKUP_HIT
, lookup_cnt
);
4199 dp_netdev_count_packet(pmd
, DP_STAT_MISS
, miss_cnt
);
4200 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
4203 /* Packets enter the datapath from a port (or from recirculation) here.
4205 * For performance reasons a caller may choose not to initialize the metadata
4206 * in 'packets': in this case 'mdinit' is false and this function needs to
4207 * initialize it using 'port_no'. If the metadata in 'packets' is already
4208 * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */
4210 dp_netdev_input__(struct dp_netdev_pmd_thread
*pmd
,
4211 struct dp_packet_batch
*packets
,
4212 bool md_is_valid
, odp_port_t port_no
)
4214 int cnt
= packets
->count
;
4215 #if !defined(__CHECKER__) && !defined(_WIN32)
4216 const size_t PKT_ARRAY_SIZE
= cnt
;
4218 /* Sparse or MSVC doesn't like variable length array. */
4219 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
4221 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
) struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
4222 struct packet_batch_per_flow batches
[PKT_ARRAY_SIZE
];
4223 long long now
= time_msec();
4224 size_t newcnt
, n_batches
, i
;
4228 newcnt
= emc_processing(pmd
, packets
, keys
, batches
, &n_batches
,
4229 md_is_valid
, port_no
);
4230 if (OVS_UNLIKELY(newcnt
)) {
4231 packets
->count
= newcnt
;
4232 /* Get ingress port from first packet's metadata. */
4233 in_port
= packets
->packets
[0]->md
.in_port
.odp_port
;
4234 fast_path_processing(pmd
, packets
, keys
, batches
, &n_batches
, in_port
, now
);
4237 /* All the flow batches need to be reset before any call to
4238 * packet_batch_per_flow_execute() as it could potentially trigger
4239 * recirculation. When a packet matching flow ‘j’ happens to be
4240 * recirculated, the nested call to dp_netdev_input__() could potentially
4241 * classify the packet as matching another flow - say 'k'. It could happen
4242 * that in the previous call to dp_netdev_input__() that same flow 'k' had
4243 * already its own batches[k] still waiting to be served. So if its
4244 * ‘batch’ member is not reset, the recirculated packet would be wrongly
4245 * appended to batches[k] of the 1st call to dp_netdev_input__(). */
4246 for (i
= 0; i
< n_batches
; i
++) {
4247 batches
[i
].flow
->batch
= NULL
;
4250 for (i
= 0; i
< n_batches
; i
++) {
4251 packet_batch_per_flow_execute(&batches
[i
], pmd
, now
);
4256 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
4257 struct dp_packet_batch
*packets
,
4260 dp_netdev_input__(pmd
, packets
, false, port_no
);
4264 dp_netdev_recirculate(struct dp_netdev_pmd_thread
*pmd
,
4265 struct dp_packet_batch
*packets
)
4267 dp_netdev_input__(pmd
, packets
, true, 0);
4270 struct dp_netdev_execute_aux
{
4271 struct dp_netdev_pmd_thread
*pmd
;
4273 const struct flow
*flow
;
4277 dpif_netdev_register_dp_purge_cb(struct dpif
*dpif
, dp_purge_callback
*cb
,
4280 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4281 dp
->dp_purge_aux
= aux
;
4282 dp
->dp_purge_cb
= cb
;
4286 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
4289 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4290 dp
->upcall_aux
= aux
;
4295 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
4296 long long now
, bool purge
)
4299 struct dp_netdev_port
*port
;
4302 HMAP_FOR_EACH (tx
, node
, &pmd
->port_cache
) {
4303 if (!tx
->port
->dynamic_txqs
) {
4306 interval
= now
- tx
->last_used
;
4307 if (tx
->qid
>= 0 && (purge
|| interval
>= XPS_TIMEOUT_MS
)) {
4309 ovs_mutex_lock(&port
->txq_used_mutex
);
4310 port
->txq_used
[tx
->qid
]--;
4311 ovs_mutex_unlock(&port
->txq_used_mutex
);
4318 dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
4319 struct tx_port
*tx
, long long now
)
4321 struct dp_netdev_port
*port
;
4323 int i
, min_cnt
, min_qid
;
4325 if (OVS_UNLIKELY(!now
)) {
4329 interval
= now
- tx
->last_used
;
4330 tx
->last_used
= now
;
4332 if (OVS_LIKELY(tx
->qid
>= 0 && interval
< XPS_TIMEOUT_MS
)) {
4338 ovs_mutex_lock(&port
->txq_used_mutex
);
4340 port
->txq_used
[tx
->qid
]--;
4346 for (i
= 0; i
< netdev_n_txq(port
->netdev
); i
++) {
4347 if (port
->txq_used
[i
] < min_cnt
|| min_cnt
== -1) {
4348 min_cnt
= port
->txq_used
[i
];
4353 port
->txq_used
[min_qid
]++;
4356 ovs_mutex_unlock(&port
->txq_used_mutex
);
4358 dpif_netdev_xps_revalidate_pmd(pmd
, now
, false);
4360 VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
4361 pmd
->core_id
, tx
->qid
, netdev_get_name(tx
->port
->netdev
));
4365 static struct tx_port
*
4366 pmd_tx_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
4369 return tx_port_lookup(&pmd
->port_cache
, port_no
);
4373 push_tnl_action(const struct dp_netdev_pmd_thread
*pmd
,
4374 const struct nlattr
*attr
,
4375 struct dp_packet_batch
*batch
)
4377 struct tx_port
*tun_port
;
4378 const struct ovs_action_push_tnl
*data
;
4381 data
= nl_attr_get(attr
);
4383 tun_port
= pmd_tx_port_cache_lookup(pmd
, u32_to_odp(data
->tnl_port
));
4388 err
= netdev_push_header(tun_port
->port
->netdev
, batch
, data
);
4393 dp_packet_delete_batch(batch
, true);
4398 dp_execute_userspace_action(struct dp_netdev_pmd_thread
*pmd
,
4399 struct dp_packet
*packet
, bool may_steal
,
4400 struct flow
*flow
, ovs_u128
*ufid
,
4401 struct ofpbuf
*actions
,
4402 const struct nlattr
*userdata
, long long now
)
4404 struct dp_packet_batch b
;
4407 ofpbuf_clear(actions
);
4409 error
= dp_netdev_upcall(pmd
, packet
, flow
, NULL
, ufid
,
4410 DPIF_UC_ACTION
, userdata
, actions
,
4412 if (!error
|| error
== ENOSPC
) {
4413 packet_batch_init_packet(&b
, packet
);
4414 dp_netdev_execute_actions(pmd
, &b
, may_steal
, flow
,
4415 actions
->data
, actions
->size
, now
);
4416 } else if (may_steal
) {
4417 dp_packet_delete(packet
);
4422 dp_execute_cb(void *aux_
, struct dp_packet_batch
*packets_
,
4423 const struct nlattr
*a
, bool may_steal
)
4425 struct dp_netdev_execute_aux
*aux
= aux_
;
4426 uint32_t *depth
= recirc_depth_get();
4427 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
4428 struct dp_netdev
*dp
= pmd
->dp
;
4429 int type
= nl_attr_type(a
);
4430 long long now
= aux
->now
;
4433 switch ((enum ovs_action_attr
)type
) {
4434 case OVS_ACTION_ATTR_OUTPUT
:
4435 p
= pmd_tx_port_cache_lookup(pmd
, nl_attr_get_odp_port(a
));
4436 if (OVS_LIKELY(p
)) {
4440 dynamic_txqs
= p
->port
->dynamic_txqs
;
4442 tx_qid
= dpif_netdev_xps_get_tx_qid(pmd
, p
, now
);
4444 atomic_read_relaxed(&pmd
->static_tx_qid
, &tx_qid
);
4447 netdev_send(p
->port
->netdev
, tx_qid
, packets_
, may_steal
,
4453 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
4454 if (*depth
< MAX_RECIRC_DEPTH
) {
4455 struct dp_packet_batch tnl_pkt
;
4456 struct dp_packet_batch
*orig_packets_
= packets_
;
4460 dp_packet_batch_clone(&tnl_pkt
, packets_
);
4461 packets_
= &tnl_pkt
;
4462 dp_packet_batch_reset_cutlen(orig_packets_
);
4465 dp_packet_batch_apply_cutlen(packets_
);
4467 err
= push_tnl_action(pmd
, a
, packets_
);
4470 dp_netdev_recirculate(pmd
, packets_
);
4477 case OVS_ACTION_ATTR_TUNNEL_POP
:
4478 if (*depth
< MAX_RECIRC_DEPTH
) {
4479 struct dp_packet_batch
*orig_packets_
= packets_
;
4480 odp_port_t portno
= nl_attr_get_odp_port(a
);
4482 p
= pmd_tx_port_cache_lookup(pmd
, portno
);
4484 struct dp_packet_batch tnl_pkt
;
4488 dp_packet_batch_clone(&tnl_pkt
, packets_
);
4489 packets_
= &tnl_pkt
;
4490 dp_packet_batch_reset_cutlen(orig_packets_
);
4493 dp_packet_batch_apply_cutlen(packets_
);
4495 netdev_pop_header(p
->port
->netdev
, packets_
);
4496 if (!packets_
->count
) {
4500 for (i
= 0; i
< packets_
->count
; i
++) {
4501 packets_
->packets
[i
]->md
.in_port
.odp_port
= portno
;
4505 dp_netdev_recirculate(pmd
, packets_
);
4512 case OVS_ACTION_ATTR_USERSPACE
:
4513 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
4514 struct dp_packet_batch
*orig_packets_
= packets_
;
4515 struct dp_packet
**packets
= packets_
->packets
;
4516 const struct nlattr
*userdata
;
4517 struct dp_packet_batch usr_pkt
;
4518 struct ofpbuf actions
;
4524 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
4525 ofpbuf_init(&actions
, 0);
4527 if (packets_
->trunc
) {
4529 dp_packet_batch_clone(&usr_pkt
, packets_
);
4530 packets_
= &usr_pkt
;
4531 packets
= packets_
->packets
;
4533 dp_packet_batch_reset_cutlen(orig_packets_
);
4536 dp_packet_batch_apply_cutlen(packets_
);
4539 for (i
= 0; i
< packets_
->count
; i
++) {
4540 flow_extract(packets
[i
], &flow
);
4541 dpif_flow_hash(dp
->dpif
, &flow
, sizeof flow
, &ufid
);
4542 dp_execute_userspace_action(pmd
, packets
[i
], may_steal
, &flow
,
4543 &ufid
, &actions
, userdata
, now
);
4547 dp_packet_delete_batch(packets_
, true);
4550 ofpbuf_uninit(&actions
);
4551 fat_rwlock_unlock(&dp
->upcall_rwlock
);
4557 case OVS_ACTION_ATTR_RECIRC
:
4558 if (*depth
< MAX_RECIRC_DEPTH
) {
4559 struct dp_packet_batch recirc_pkts
;
4563 dp_packet_batch_clone(&recirc_pkts
, packets_
);
4564 packets_
= &recirc_pkts
;
4567 for (i
= 0; i
< packets_
->count
; i
++) {
4568 packets_
->packets
[i
]->md
.recirc_id
= nl_attr_get_u32(a
);
4572 dp_netdev_recirculate(pmd
, packets_
);
4578 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
4581 case OVS_ACTION_ATTR_CT
: {
4582 const struct nlattr
*b
;
4583 bool commit
= false;
4586 const char *helper
= NULL
;
4587 const uint32_t *setmark
= NULL
;
4588 const struct ovs_key_ct_labels
*setlabel
= NULL
;
4590 NL_ATTR_FOR_EACH_UNSAFE (b
, left
, nl_attr_get(a
),
4591 nl_attr_get_size(a
)) {
4592 enum ovs_ct_attr sub_type
= nl_attr_type(b
);
4595 case OVS_CT_ATTR_COMMIT
:
4598 case OVS_CT_ATTR_ZONE
:
4599 zone
= nl_attr_get_u16(b
);
4601 case OVS_CT_ATTR_HELPER
:
4602 helper
= nl_attr_get_string(b
);
4604 case OVS_CT_ATTR_MARK
:
4605 setmark
= nl_attr_get(b
);
4607 case OVS_CT_ATTR_LABELS
:
4608 setlabel
= nl_attr_get(b
);
4610 case OVS_CT_ATTR_NAT
:
4611 case OVS_CT_ATTR_UNSPEC
:
4612 case __OVS_CT_ATTR_MAX
:
4617 conntrack_execute(&dp
->conntrack
, packets_
, aux
->flow
->dl_type
, commit
,
4618 zone
, setmark
, setlabel
, helper
);
4622 case OVS_ACTION_ATTR_PUSH_VLAN
:
4623 case OVS_ACTION_ATTR_POP_VLAN
:
4624 case OVS_ACTION_ATTR_PUSH_MPLS
:
4625 case OVS_ACTION_ATTR_POP_MPLS
:
4626 case OVS_ACTION_ATTR_SET
:
4627 case OVS_ACTION_ATTR_SET_MASKED
:
4628 case OVS_ACTION_ATTR_SAMPLE
:
4629 case OVS_ACTION_ATTR_HASH
:
4630 case OVS_ACTION_ATTR_UNSPEC
:
4631 case OVS_ACTION_ATTR_TRUNC
:
4632 case __OVS_ACTION_ATTR_MAX
:
4636 dp_packet_delete_batch(packets_
, may_steal
);
4640 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
4641 struct dp_packet_batch
*packets
,
4642 bool may_steal
, const struct flow
*flow
,
4643 const struct nlattr
*actions
, size_t actions_len
,
4646 struct dp_netdev_execute_aux aux
= { pmd
, now
, flow
};
4648 odp_execute_actions(&aux
, packets
, may_steal
, actions
,
4649 actions_len
, dp_execute_cb
);
4652 struct dp_netdev_ct_dump
{
4653 struct ct_dpif_dump_state up
;
4654 struct conntrack_dump dump
;
4655 struct conntrack
*ct
;
4656 struct dp_netdev
*dp
;
4660 dpif_netdev_ct_dump_start(struct dpif
*dpif
, struct ct_dpif_dump_state
**dump_
,
4661 const uint16_t *pzone
)
4663 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4664 struct dp_netdev_ct_dump
*dump
;
4666 dump
= xzalloc(sizeof *dump
);
4668 dump
->ct
= &dp
->conntrack
;
4670 conntrack_dump_start(&dp
->conntrack
, &dump
->dump
, pzone
);
4678 dpif_netdev_ct_dump_next(struct dpif
*dpif OVS_UNUSED
,
4679 struct ct_dpif_dump_state
*dump_
,
4680 struct ct_dpif_entry
*entry
)
4682 struct dp_netdev_ct_dump
*dump
;
4684 INIT_CONTAINER(dump
, dump_
, up
);
4686 return conntrack_dump_next(&dump
->dump
, entry
);
4690 dpif_netdev_ct_dump_done(struct dpif
*dpif OVS_UNUSED
,
4691 struct ct_dpif_dump_state
*dump_
)
4693 struct dp_netdev_ct_dump
*dump
;
4696 INIT_CONTAINER(dump
, dump_
, up
);
4698 err
= conntrack_dump_done(&dump
->dump
);
4706 dpif_netdev_ct_flush(struct dpif
*dpif
, const uint16_t *zone
)
4708 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4710 return conntrack_flush(&dp
->conntrack
, zone
);
4713 const struct dpif_class dpif_netdev_class
= {
4716 dpif_netdev_enumerate
,
4717 dpif_netdev_port_open_type
,
4720 dpif_netdev_destroy
,
4723 dpif_netdev_get_stats
,
4724 dpif_netdev_port_add
,
4725 dpif_netdev_port_del
,
4726 dpif_netdev_port_set_config
,
4727 dpif_netdev_port_query_by_number
,
4728 dpif_netdev_port_query_by_name
,
4729 NULL
, /* port_get_pid */
4730 dpif_netdev_port_dump_start
,
4731 dpif_netdev_port_dump_next
,
4732 dpif_netdev_port_dump_done
,
4733 dpif_netdev_port_poll
,
4734 dpif_netdev_port_poll_wait
,
4735 dpif_netdev_flow_flush
,
4736 dpif_netdev_flow_dump_create
,
4737 dpif_netdev_flow_dump_destroy
,
4738 dpif_netdev_flow_dump_thread_create
,
4739 dpif_netdev_flow_dump_thread_destroy
,
4740 dpif_netdev_flow_dump_next
,
4741 dpif_netdev_operate
,
4742 NULL
, /* recv_set */
4743 NULL
, /* handlers_set */
4744 dpif_netdev_pmd_set
,
4745 dpif_netdev_queue_to_priority
,
4747 NULL
, /* recv_wait */
4748 NULL
, /* recv_purge */
4749 dpif_netdev_register_dp_purge_cb
,
4750 dpif_netdev_register_upcall_cb
,
4751 dpif_netdev_enable_upcall
,
4752 dpif_netdev_disable_upcall
,
4753 dpif_netdev_get_datapath_version
,
4754 dpif_netdev_ct_dump_start
,
4755 dpif_netdev_ct_dump_next
,
4756 dpif_netdev_ct_dump_done
,
4757 dpif_netdev_ct_flush
,
4761 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
4762 const char *argv
[], void *aux OVS_UNUSED
)
4764 struct dp_netdev_port
*port
;
4765 struct dp_netdev
*dp
;
4768 ovs_mutex_lock(&dp_netdev_mutex
);
4769 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
4770 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
4771 ovs_mutex_unlock(&dp_netdev_mutex
);
4772 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
4775 ovs_refcount_ref(&dp
->ref_cnt
);
4776 ovs_mutex_unlock(&dp_netdev_mutex
);
4778 ovs_mutex_lock(&dp
->port_mutex
);
4779 if (get_port_by_name(dp
, argv
[2], &port
)) {
4780 unixctl_command_reply_error(conn
, "unknown port");
4784 port_no
= u32_to_odp(atoi(argv
[3]));
4785 if (!port_no
|| port_no
== ODPP_NONE
) {
4786 unixctl_command_reply_error(conn
, "bad port number");
4789 if (dp_netdev_lookup_port(dp
, port_no
)) {
4790 unixctl_command_reply_error(conn
, "port number already in use");
4795 hmap_remove(&dp
->ports
, &port
->node
);
4796 dp_netdev_del_port_from_all_pmds(dp
, port
);
4798 /* Reinsert with new port number. */
4799 port
->port_no
= port_no
;
4800 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
4801 dp_netdev_add_port_to_pmds(dp
, port
);
4803 seq_change(dp
->port_seq
);
4804 unixctl_command_reply(conn
, NULL
);
4807 ovs_mutex_unlock(&dp
->port_mutex
);
4808 dp_netdev_unref(dp
);
4812 dpif_dummy_register__(const char *type
)
4814 struct dpif_class
*class;
4816 class = xmalloc(sizeof *class);
4817 *class = dpif_netdev_class
;
4818 class->type
= xstrdup(type
);
4819 dp_register_provider(class);
4823 dpif_dummy_override(const char *type
)
4828 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
4829 * a userland-only build. It's useful for testsuite.
4831 error
= dp_unregister_provider(type
);
4832 if (error
== 0 || error
== EAFNOSUPPORT
) {
4833 dpif_dummy_register__(type
);
4838 dpif_dummy_register(enum dummy_level level
)
4840 if (level
== DUMMY_OVERRIDE_ALL
) {
4845 dp_enumerate_types(&types
);
4846 SSET_FOR_EACH (type
, &types
) {
4847 dpif_dummy_override(type
);
4849 sset_destroy(&types
);
4850 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
4851 dpif_dummy_override("system");
4854 dpif_dummy_register__("dummy");
4856 unixctl_command_register("dpif-dummy/change-port-number",
4857 "dp port new-number",
4858 3, 3, dpif_dummy_change_port_number
, NULL
);
4861 /* Datapath Classifier. */
4863 /* A set of rules that all have the same fields wildcarded. */
4864 struct dpcls_subtable
{
4865 /* The fields are only used by writers. */
4866 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
4868 /* These fields are accessed by readers. */
4869 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
4870 uint32_t hit_cnt
; /* Number of match hits in subtable in current
4871 optimization interval. */
4872 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
4873 /* 'mask' must be the last field, additional space is allocated here. */
4876 /* Initializes 'cls' as a classifier that initially contains no classification
4879 dpcls_init(struct dpcls
*cls
)
4881 cmap_init(&cls
->subtables_map
);
4882 pvector_init(&cls
->subtables
);
4886 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
4888 VLOG_DBG("Destroying subtable %p for in_port %d", subtable
, cls
->in_port
);
4889 pvector_remove(&cls
->subtables
, subtable
);
4890 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
4891 subtable
->mask
.hash
);
4892 cmap_destroy(&subtable
->rules
);
4893 ovsrcu_postpone(free
, subtable
);
4896 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
4897 * caller's responsibility.
4898 * May only be called after all the readers have been terminated. */
4900 dpcls_destroy(struct dpcls
*cls
)
4903 struct dpcls_subtable
*subtable
;
4905 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
4906 ovs_assert(cmap_count(&subtable
->rules
) == 0);
4907 dpcls_destroy_subtable(cls
, subtable
);
4909 cmap_destroy(&cls
->subtables_map
);
4910 pvector_destroy(&cls
->subtables
);
4914 static struct dpcls_subtable
*
4915 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
4917 struct dpcls_subtable
*subtable
;
4919 /* Need to add one. */
4920 subtable
= xmalloc(sizeof *subtable
4921 - sizeof subtable
->mask
.mf
+ mask
->len
);
4922 cmap_init(&subtable
->rules
);
4923 subtable
->hit_cnt
= 0;
4924 netdev_flow_key_clone(&subtable
->mask
, mask
);
4925 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
4926 /* Add the new subtable at the end of the pvector (with no hits yet) */
4927 pvector_insert(&cls
->subtables
, subtable
, 0);
4928 VLOG_DBG("Creating %"PRIuSIZE
". subtable %p for in_port %d",
4929 cmap_count(&cls
->subtables_map
), subtable
, cls
->in_port
);
4930 pvector_publish(&cls
->subtables
);
4935 static inline struct dpcls_subtable
*
4936 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
4938 struct dpcls_subtable
*subtable
;
4940 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
4941 &cls
->subtables_map
) {
4942 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
4946 return dpcls_create_subtable(cls
, mask
);
4950 /* Periodically sort the dpcls subtable vectors according to hit counts */
4952 dpcls_sort_subtable_vector(struct dpcls
*cls
)
4954 struct pvector
*pvec
= &cls
->subtables
;
4955 struct dpcls_subtable
*subtable
;
4957 PVECTOR_FOR_EACH (subtable
, pvec
) {
4958 pvector_change_priority(pvec
, subtable
, subtable
->hit_cnt
);
4959 subtable
->hit_cnt
= 0;
4961 pvector_publish(pvec
);
4965 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
)
4968 long long int now
= time_msec();
4970 if (now
> pmd
->next_optimization
) {
4971 /* Try to obtain the flow lock to block out revalidator threads.
4972 * If not possible, just try next time. */
4973 if (!ovs_mutex_trylock(&pmd
->flow_mutex
)) {
4974 /* Optimize each classifier */
4975 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
4976 dpcls_sort_subtable_vector(cls
);
4978 ovs_mutex_unlock(&pmd
->flow_mutex
);
4979 /* Start new measuring interval */
4980 pmd
->next_optimization
= now
+ DPCLS_OPTIMIZATION_INTERVAL
;
4985 /* Insert 'rule' into 'cls'. */
4987 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
4988 const struct netdev_flow_key
*mask
)
4990 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
4992 /* Refer to subtable's mask, also for later removal. */
4993 rule
->mask
= &subtable
->mask
;
4994 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
4997 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
4999 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
5001 struct dpcls_subtable
*subtable
;
5003 ovs_assert(rule
->mask
);
5005 /* Get subtable from reference in rule->mask. */
5006 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
5007 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
5009 /* Delete empty subtable. */
5010 dpcls_destroy_subtable(cls
, subtable
);
5011 pvector_publish(&cls
->subtables
);
5015 /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
5016 * in 'mask' the values in 'key' and 'target' are the same. */
5018 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
5019 const struct netdev_flow_key
*target
)
5021 const uint64_t *keyp
= miniflow_get_values(&rule
->flow
.mf
);
5022 const uint64_t *maskp
= miniflow_get_values(&rule
->mask
->mf
);
5025 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, target
, rule
->flow
.mf
.map
) {
5026 if (OVS_UNLIKELY((value
& *maskp
++) != *keyp
++)) {
5033 /* For each miniflow in 'keys' performs a classifier lookup writing the result
5034 * into the corresponding slot in 'rules'. If a particular entry in 'keys' is
5035 * NULL it is skipped.
5037 * This function is optimized for use in the userspace datapath and therefore
5038 * does not implement a lot of features available in the standard
5039 * classifier_lookup() function. Specifically, it does not implement
5040 * priorities, instead returning any rule which matches the flow.
5042 * Returns true if all miniflows found a corresponding rule. */
5044 dpcls_lookup(struct dpcls
*cls
, const struct netdev_flow_key keys
[],
5045 struct dpcls_rule
**rules
, const size_t cnt
,
5048 /* The received 'cnt' miniflows are the search-keys that will be processed
5049 * to find a matching entry into the available subtables.
5050 * The number of bits in map_type is equal to NETDEV_MAX_BURST. */
5051 typedef uint32_t map_type
;
5052 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
5053 BUILD_ASSERT_DECL(MAP_BITS
>= NETDEV_MAX_BURST
);
5055 struct dpcls_subtable
*subtable
;
5057 map_type keys_map
= TYPE_MAXIMUM(map_type
); /* Set all bits. */
5059 uint32_t hashes
[MAP_BITS
];
5060 const struct cmap_node
*nodes
[MAP_BITS
];
5062 if (cnt
!= MAP_BITS
) {
5063 keys_map
>>= MAP_BITS
- cnt
; /* Clear extra bits. */
5065 memset(rules
, 0, cnt
* sizeof *rules
);
5067 int lookups_match
= 0, subtable_pos
= 1;
5069 /* The Datapath classifier - aka dpcls - is composed of subtables.
5070 * Subtables are dynamically created as needed when new rules are inserted.
5071 * Each subtable collects rules with matches on a specific subset of packet
5072 * fields as defined by the subtable's mask. We proceed to process every
5073 * search-key against each subtable, but when a match is found for a
5074 * search-key, the search for that key can stop because the rules are
5075 * non-overlapping. */
5076 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
5079 /* Compute hashes for the remaining keys. Each search-key is
5080 * masked with the subtable's mask to avoid hashing the wildcarded
5082 ULLONG_FOR_EACH_1(i
, keys_map
) {
5083 hashes
[i
] = netdev_flow_key_hash_in_mask(&keys
[i
],
5087 found_map
= cmap_find_batch(&subtable
->rules
, keys_map
, hashes
, nodes
);
5088 /* Check results. When the i-th bit of found_map is set, it means
5089 * that a set of nodes with a matching hash value was found for the
5090 * i-th search-key. Due to possible hash collisions we need to check
5091 * which of the found rules, if any, really matches our masked
5093 ULLONG_FOR_EACH_1(i
, found_map
) {
5094 struct dpcls_rule
*rule
;
5096 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
5097 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, &keys
[i
]))) {
5099 /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap
5100 * within one second optimization interval. */
5101 subtable
->hit_cnt
++;
5102 lookups_match
+= subtable_pos
;
5106 /* None of the found rules was a match. Reset the i-th bit to
5107 * keep searching this key in the next subtable. */
5108 ULLONG_SET0(found_map
, i
); /* Did not match. */
5110 ; /* Keep Sparse happy. */
5112 keys_map
&= ~found_map
; /* Clear the found rules. */
5114 if (num_lookups_p
) {
5115 *num_lookups_p
= lookups_match
;
5117 return true; /* All found. */
5121 if (num_lookups_p
) {
5122 *num_lookups_p
= lookups_match
;
5124 return false; /* Some misses. */