2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
25 #include <netinet/in.h>
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
35 #include <rte_cycles.h>
40 #include "conntrack.h"
44 #include "dp-packet.h"
46 #include "dpif-provider.h"
48 #include "fat-rwlock.h"
53 #include "netdev-vport.h"
55 #include "odp-execute.h"
57 #include "openvswitch/dynamic-string.h"
58 #include "openvswitch/list.h"
59 #include "openvswitch/match.h"
60 #include "openvswitch/ofp-print.h"
61 #include "openvswitch/ofp-util.h"
62 #include "openvswitch/ofpbuf.h"
63 #include "openvswitch/shash.h"
64 #include "openvswitch/vlog.h"
68 #include "poll-loop.h"
75 #include "tnl-neigh-cache.h"
76 #include "tnl-ports.h"
80 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
82 #define FLOW_DUMP_MAX_BATCH 50
83 /* Use per thread recirc_depth to prevent recirculation loop. */
84 #define MAX_RECIRC_DEPTH 5
85 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
87 /* Configuration parameters. */
88 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
90 /* Protects against changes to 'dp_netdevs'. */
91 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
93 /* Contains all 'struct dp_netdev's. */
94 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
95 = SHASH_INITIALIZER(&dp_netdevs
);
97 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
99 #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
100 | CS_INVALID | CS_REPLY_DIR | CS_TRACKED)
101 #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
103 static struct odp_support dp_netdev_support
= {
104 .max_mpls_depth
= SIZE_MAX
,
112 /* Stores a miniflow with inline values */
114 struct netdev_flow_key
{
115 uint32_t hash
; /* Hash function differs for different users. */
116 uint32_t len
; /* Length of the following miniflow (incl. map). */
118 uint64_t buf
[FLOW_MAX_PACKET_U64S
];
121 /* Exact match cache for frequently used flows
123 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
124 * search its entries for a miniflow that matches exactly the miniflow of the
125 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
127 * A cache entry holds a reference to its 'dp_netdev_flow'.
129 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
130 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
131 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
132 * value is the index of a cache entry where the miniflow could be.
138 * Each pmd_thread has its own private exact match cache.
139 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
142 #define EM_FLOW_HASH_SHIFT 13
143 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
144 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
145 #define EM_FLOW_HASH_SEGS 2
148 struct dp_netdev_flow
*flow
;
149 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
153 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
154 int sweep_idx
; /* For emc_cache_slow_sweep(). */
157 /* Iterate in the exact match cache through every entry that might contain a
158 * miniflow with hash 'HASH'. */
159 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
160 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
161 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
162 i__ < EM_FLOW_HASH_SEGS; \
163 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
165 /* Simple non-wildcarding single-priority classifier. */
167 /* Time in ms between successive optimizations of the dpcls subtable vector */
168 #define DPCLS_OPTIMIZATION_INTERVAL 1000
171 struct cmap_node node
; /* Within dp_netdev_pmd_thread.classifiers */
173 struct cmap subtables_map
;
174 struct pvector subtables
;
177 /* A rule to be inserted to the classifier. */
179 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
180 struct netdev_flow_key
*mask
; /* Subtable's mask. */
181 struct netdev_flow_key flow
; /* Matching key. */
182 /* 'flow' must be the last field, additional space is allocated here. */
185 static void dpcls_init(struct dpcls
*);
186 static void dpcls_destroy(struct dpcls
*);
187 static void dpcls_sort_subtable_vector(struct dpcls
*);
188 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
189 const struct netdev_flow_key
*mask
);
190 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
191 static bool dpcls_lookup(struct dpcls
*cls
,
192 const struct netdev_flow_key keys
[],
193 struct dpcls_rule
**rules
, size_t cnt
,
196 /* Datapath based on the network device interface from netdev.h.
202 * Some members, marked 'const', are immutable. Accessing other members
203 * requires synchronization, as noted in more detail below.
205 * Acquisition order is, from outermost to innermost:
207 * dp_netdev_mutex (global)
212 const struct dpif_class
*const class;
213 const char *const name
;
215 struct ovs_refcount ref_cnt
;
216 atomic_flag destroyed
;
220 * Any lookup into 'ports' or any access to the dp_netdev_ports found
221 * through 'ports' requires taking 'port_mutex'. */
222 struct ovs_mutex port_mutex
;
224 struct seq
*port_seq
; /* Incremented whenever a port changes. */
226 /* Protects access to ofproto-dpif-upcall interface during revalidator
227 * thread synchronization. */
228 struct fat_rwlock upcall_rwlock
;
229 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
232 /* Callback function for notifying the purging of dp flows (during
233 * reseting pmd deletion). */
234 dp_purge_callback
*dp_purge_cb
;
237 /* Stores all 'struct dp_netdev_pmd_thread's. */
238 struct cmap poll_threads
;
240 /* Protects the access of the 'struct dp_netdev_pmd_thread'
241 * instance for non-pmd thread. */
242 struct ovs_mutex non_pmd_mutex
;
244 /* Each pmd thread will store its pointer to
245 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
246 ovsthread_key_t per_pmd_key
;
248 struct seq
*reconfigure_seq
;
249 uint64_t last_reconfigure_seq
;
251 /* Cpu mask for pin of pmd threads. */
254 uint64_t last_tnl_conf_seq
;
256 struct conntrack conntrack
;
259 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
261 OVS_REQUIRES(dp
->port_mutex
);
264 DP_STAT_EXACT_HIT
, /* Packets that had an exact match (emc). */
265 DP_STAT_MASKED_HIT
, /* Packets that matched in the flow table. */
266 DP_STAT_MISS
, /* Packets that did not match. */
267 DP_STAT_LOST
, /* Packets not passed up to the client. */
268 DP_STAT_LOOKUP_HIT
, /* Number of subtable lookups for flow table
273 enum pmd_cycles_counter_type
{
274 PMD_CYCLES_POLLING
, /* Cycles spent polling NICs. */
275 PMD_CYCLES_PROCESSING
, /* Cycles spent processing packets */
279 #define XPS_TIMEOUT_MS 500LL
281 /* Contained by struct dp_netdev_port's 'rxqs' member. */
282 struct dp_netdev_rxq
{
283 struct netdev_rxq
*rxq
;
284 unsigned core_id
; /* Сore to which this queue is pinned. */
287 /* A port in a netdev-based datapath. */
288 struct dp_netdev_port
{
290 struct netdev
*netdev
;
291 struct hmap_node node
; /* Node in dp_netdev's 'ports'. */
292 struct netdev_saved_flags
*sf
;
293 struct dp_netdev_rxq
*rxqs
;
294 unsigned n_rxq
; /* Number of elements in 'rxq' */
295 bool dynamic_txqs
; /* If true XPS will be used. */
296 unsigned *txq_used
; /* Number of threads that uses each tx queue. */
297 struct ovs_mutex txq_used_mutex
;
298 char *type
; /* Port type as requested by user. */
299 char *rxq_affinity_list
; /* Requested affinity of rx queues. */
302 /* Contained by struct dp_netdev_flow's 'stats' member. */
303 struct dp_netdev_flow_stats
{
304 atomic_llong used
; /* Last used time, in monotonic msecs. */
305 atomic_ullong packet_count
; /* Number of packets matched. */
306 atomic_ullong byte_count
; /* Number of bytes matched. */
307 atomic_uint16_t tcp_flags
; /* Bitwise-OR of seen tcp_flags values. */
310 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
316 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
317 * its pmd thread's classifier. The text below calls this classifier 'cls'.
322 * The thread safety rules described here for "struct dp_netdev_flow" are
323 * motivated by two goals:
325 * - Prevent threads that read members of "struct dp_netdev_flow" from
326 * reading bad data due to changes by some thread concurrently modifying
329 * - Prevent two threads making changes to members of a given "struct
330 * dp_netdev_flow" from interfering with each other.
336 * A flow 'flow' may be accessed without a risk of being freed during an RCU
337 * grace period. Code that needs to hold onto a flow for a while
338 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
340 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
341 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
344 * Some members, marked 'const', are immutable. Accessing other members
345 * requires synchronization, as noted in more detail below.
347 struct dp_netdev_flow
{
348 const struct flow flow
; /* Unmasked flow that created this entry. */
349 /* Hash table index by unmasked flow. */
350 const struct cmap_node node
; /* In owning dp_netdev_pmd_thread's */
352 const ovs_u128 ufid
; /* Unique flow identifier. */
353 const unsigned pmd_id
; /* The 'core_id' of pmd thread owning this */
356 /* Number of references.
357 * The classifier owns one reference.
358 * Any thread trying to keep a rule from being freed should hold its own
360 struct ovs_refcount ref_cnt
;
365 struct dp_netdev_flow_stats stats
;
368 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
370 /* While processing a group of input packets, the datapath uses the next
371 * member to store a pointer to the output batch for the flow. It is
372 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
373 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
374 struct packet_batch_per_flow
*batch
;
376 /* Packet classification. */
377 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
378 /* 'cr' must be the last member. */
381 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
382 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
383 static int dpif_netdev_flow_from_nlattrs(const struct nlattr
*, uint32_t,
386 /* A set of datapath actions within a "struct dp_netdev_flow".
392 * A struct dp_netdev_actions 'actions' is protected with RCU. */
393 struct dp_netdev_actions
{
394 /* These members are immutable: they do not change during the struct's
396 unsigned int size
; /* Size of 'actions', in bytes. */
397 struct nlattr actions
[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
400 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
402 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
403 const struct dp_netdev_flow
*);
404 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
406 /* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
407 struct dp_netdev_pmd_stats
{
408 /* Indexed by DP_STAT_*. */
409 atomic_ullong n
[DP_N_STATS
];
412 /* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */
413 struct dp_netdev_pmd_cycles
{
414 /* Indexed by PMD_CYCLES_*. */
415 atomic_ullong n
[PMD_N_CYCLES
];
418 /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
420 struct dp_netdev_port
*port
;
421 struct netdev_rxq
*rx
;
422 struct ovs_list node
;
425 /* Contained by struct dp_netdev_pmd_thread's 'send_port_cache',
426 * 'tnl_port_cache' or 'tx_ports'. */
428 struct dp_netdev_port
*port
;
431 struct hmap_node node
;
434 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
435 * the performance overhead of interrupt processing. Therefore netdev can
436 * not implement rx-wait for these devices. dpif-netdev needs to poll
437 * these device to check for recv buffer. pmd-thread does polling for
438 * devices assigned to itself.
440 * DPDK used PMD for accessing NIC.
442 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
443 * I/O of all non-pmd threads. There will be no actual thread created
446 * Each struct has its own flow table and classifier. Packets received
447 * from managed ports are looked up in the corresponding pmd thread's
448 * flow table, and are executed with the found actions.
450 struct dp_netdev_pmd_thread
{
451 struct dp_netdev
*dp
;
452 struct ovs_refcount ref_cnt
; /* Every reference must be refcount'ed. */
453 struct cmap_node node
; /* In 'dp->poll_threads'. */
455 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
456 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
458 /* Per thread exact-match cache. Note, the instance for cpu core
459 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
460 * need to be protected by 'non_pmd_mutex'. Every other instance
461 * will only be accessed by its own pmd thread. */
462 struct emc_cache flow_cache
;
464 /* Flow-Table and classifiers
466 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
467 * changes to 'classifiers' must be made while still holding the
470 struct ovs_mutex flow_mutex
;
471 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
473 /* One classifier per in_port polled by the pmd */
474 struct cmap classifiers
;
475 /* Periodically sort subtable vectors according to hit frequencies */
476 long long int next_optimization
;
479 struct dp_netdev_pmd_stats stats
;
481 /* Cycles counters */
482 struct dp_netdev_pmd_cycles cycles
;
484 /* Used to count cicles. See 'cycles_counter_end()' */
485 unsigned long long last_cycles
;
487 struct latch exit_latch
; /* For terminating the pmd thread. */
488 struct seq
*reload_seq
;
489 uint64_t last_reload_seq
;
490 atomic_bool reload
; /* Do we need to reload ports? */
492 unsigned core_id
; /* CPU core id of this pmd thread. */
493 int numa_id
; /* numa node id of this pmd thread. */
496 /* Queue id used by this pmd thread to send packets on all netdevs if
497 * XPS disabled for this netdev. All static_tx_qid's are unique and less
498 * than 'ovs_numa_get_n_cores() + 1'. */
499 atomic_int static_tx_qid
;
501 struct ovs_mutex port_mutex
; /* Mutex for 'poll_list' and 'tx_ports'. */
502 /* List of rx queues to poll. */
503 struct ovs_list poll_list OVS_GUARDED
;
504 /* Number of elements in 'poll_list' */
506 /* Map of 'tx_port's used for transmission. Written by the main thread,
507 * read by the pmd thread. */
508 struct hmap tx_ports OVS_GUARDED
;
510 /* These are thread-local copies of 'tx_ports'. One contains only tunnel
511 * ports (that support push_tunnel/pop_tunnel), the other contains ports
512 * with at least one txq (that support send). A port can be in both.
514 * There are two separate maps to make sure that we don't try to execute
515 * OUTPUT on a device which has 0 txqs or PUSH/POP on a non-tunnel device.
517 * The instances for cpu core NON_PMD_CORE_ID can be accessed by multiple
518 * threads, and thusly need to be protected by 'non_pmd_mutex'. Every
519 * other instance will only be accessed by its own pmd thread. */
520 struct hmap tnl_port_cache
;
521 struct hmap send_port_cache
;
523 /* Only a pmd thread can write on its own 'cycles' and 'stats'.
524 * The main thread keeps 'stats_zero' and 'cycles_zero' as base
525 * values and subtracts them from 'stats' and 'cycles' before
526 * reporting to the user */
527 unsigned long long stats_zero
[DP_N_STATS
];
528 uint64_t cycles_zero
[PMD_N_CYCLES
];
531 /* Interface to netdev-based datapath. */
534 struct dp_netdev
*dp
;
535 uint64_t last_port_seq
;
538 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
539 struct dp_netdev_port
**portp
)
540 OVS_REQUIRES(dp
->port_mutex
);
541 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
542 struct dp_netdev_port
**portp
)
543 OVS_REQUIRES(dp
->port_mutex
);
544 static void dp_netdev_free(struct dp_netdev
*)
545 OVS_REQUIRES(dp_netdev_mutex
);
546 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
547 const char *type
, odp_port_t port_no
)
548 OVS_REQUIRES(dp
->port_mutex
);
549 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
550 OVS_REQUIRES(dp
->port_mutex
);
551 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
552 bool create
, struct dpif
**);
553 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
554 struct dp_packet_batch
*,
555 bool may_steal
, const struct flow
*flow
,
556 const struct nlattr
*actions
,
559 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
560 struct dp_packet_batch
*, odp_port_t port_no
);
561 static void dp_netdev_recirculate(struct dp_netdev_pmd_thread
*,
562 struct dp_packet_batch
*);
564 static void dp_netdev_disable_upcall(struct dp_netdev
*);
565 static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
566 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
567 struct dp_netdev
*dp
, unsigned core_id
,
569 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
);
570 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
571 OVS_REQUIRES(dp
->port_mutex
);
573 static struct dp_netdev_pmd_thread
*dp_netdev_get_pmd(struct dp_netdev
*dp
,
575 static struct dp_netdev_pmd_thread
*
576 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
);
577 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
);
578 static void dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
579 static void dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
580 OVS_REQUIRES(dp
->port_mutex
);
581 static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
);
582 static void dp_netdev_del_port_from_all_pmds(struct dp_netdev
*dp
,
583 struct dp_netdev_port
*port
);
584 static void dp_netdev_add_port_to_pmds(struct dp_netdev
*dp
,
585 struct dp_netdev_port
*port
);
586 static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
587 struct dp_netdev_port
*port
);
588 static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
589 struct dp_netdev_port
*port
,
590 struct netdev_rxq
*rx
);
591 static struct dp_netdev_pmd_thread
*
592 dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev
*dp
, int numa_id
);
593 static void dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
594 OVS_REQUIRES(dp
->port_mutex
);
595 static void reconfigure_pmd_threads(struct dp_netdev
*dp
)
596 OVS_REQUIRES(dp
->port_mutex
);
597 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
);
598 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
);
599 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
);
600 static void pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
601 OVS_REQUIRES(pmd
->port_mutex
);
603 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
);
606 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
607 long long now
, bool purge
);
608 static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
609 struct tx_port
*tx
, long long now
);
611 static inline bool emc_entry_alive(struct emc_entry
*ce
);
612 static void emc_clear_entry(struct emc_entry
*ce
);
615 emc_cache_init(struct emc_cache
*flow_cache
)
619 flow_cache
->sweep_idx
= 0;
620 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
621 flow_cache
->entries
[i
].flow
= NULL
;
622 flow_cache
->entries
[i
].key
.hash
= 0;
623 flow_cache
->entries
[i
].key
.len
= sizeof(struct miniflow
);
624 flowmap_init(&flow_cache
->entries
[i
].key
.mf
.map
);
629 emc_cache_uninit(struct emc_cache
*flow_cache
)
633 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
634 emc_clear_entry(&flow_cache
->entries
[i
]);
638 /* Check and clear dead flow references slowly (one entry at each
641 emc_cache_slow_sweep(struct emc_cache
*flow_cache
)
643 struct emc_entry
*entry
= &flow_cache
->entries
[flow_cache
->sweep_idx
];
645 if (!emc_entry_alive(entry
)) {
646 emc_clear_entry(entry
);
648 flow_cache
->sweep_idx
= (flow_cache
->sweep_idx
+ 1) & EM_FLOW_HASH_MASK
;
651 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
653 dpif_is_netdev(const struct dpif
*dpif
)
655 return dpif
->dpif_class
->open
== dpif_netdev_open
;
658 static struct dpif_netdev
*
659 dpif_netdev_cast(const struct dpif
*dpif
)
661 ovs_assert(dpif_is_netdev(dpif
));
662 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
665 static struct dp_netdev
*
666 get_dp_netdev(const struct dpif
*dpif
)
668 return dpif_netdev_cast(dpif
)->dp
;
672 PMD_INFO_SHOW_STATS
, /* Show how cpu cycles are spent. */
673 PMD_INFO_CLEAR_STATS
, /* Set the cycles count to 0. */
674 PMD_INFO_SHOW_RXQ
/* Show poll-lists of pmd threads. */
678 pmd_info_show_stats(struct ds
*reply
,
679 struct dp_netdev_pmd_thread
*pmd
,
680 unsigned long long stats
[DP_N_STATS
],
681 uint64_t cycles
[PMD_N_CYCLES
])
683 unsigned long long total_packets
= 0;
684 uint64_t total_cycles
= 0;
687 /* These loops subtracts reference values ('*_zero') from the counters.
688 * Since loads and stores are relaxed, it might be possible for a '*_zero'
689 * value to be more recent than the current value we're reading from the
690 * counter. This is not a big problem, since these numbers are not
691 * supposed to be too accurate, but we should at least make sure that
692 * the result is not negative. */
693 for (i
= 0; i
< DP_N_STATS
; i
++) {
694 if (stats
[i
] > pmd
->stats_zero
[i
]) {
695 stats
[i
] -= pmd
->stats_zero
[i
];
700 if (i
!= DP_STAT_LOST
) {
701 /* Lost packets are already included in DP_STAT_MISS */
702 total_packets
+= stats
[i
];
706 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
707 if (cycles
[i
] > pmd
->cycles_zero
[i
]) {
708 cycles
[i
] -= pmd
->cycles_zero
[i
];
713 total_cycles
+= cycles
[i
];
716 ds_put_cstr(reply
, (pmd
->core_id
== NON_PMD_CORE_ID
)
717 ? "main thread" : "pmd thread");
719 if (pmd
->numa_id
!= OVS_NUMA_UNSPEC
) {
720 ds_put_format(reply
, " numa_id %d", pmd
->numa_id
);
722 if (pmd
->core_id
!= OVS_CORE_UNSPEC
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
723 ds_put_format(reply
, " core_id %u", pmd
->core_id
);
725 ds_put_cstr(reply
, ":\n");
728 "\temc hits:%llu\n\tmegaflow hits:%llu\n"
729 "\tavg. subtable lookups per hit:%.2f\n"
730 "\tmiss:%llu\n\tlost:%llu\n",
731 stats
[DP_STAT_EXACT_HIT
], stats
[DP_STAT_MASKED_HIT
],
732 stats
[DP_STAT_MASKED_HIT
] > 0
733 ? (1.0*stats
[DP_STAT_LOOKUP_HIT
])/stats
[DP_STAT_MASKED_HIT
]
735 stats
[DP_STAT_MISS
], stats
[DP_STAT_LOST
]);
737 if (total_cycles
== 0) {
742 "\tpolling cycles:%"PRIu64
" (%.02f%%)\n"
743 "\tprocessing cycles:%"PRIu64
" (%.02f%%)\n",
744 cycles
[PMD_CYCLES_POLLING
],
745 cycles
[PMD_CYCLES_POLLING
] / (double)total_cycles
* 100,
746 cycles
[PMD_CYCLES_PROCESSING
],
747 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_cycles
* 100);
749 if (total_packets
== 0) {
754 "\tavg cycles per packet: %.02f (%"PRIu64
"/%llu)\n",
755 total_cycles
/ (double)total_packets
,
756 total_cycles
, total_packets
);
759 "\tavg processing cycles per packet: "
760 "%.02f (%"PRIu64
"/%llu)\n",
761 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_packets
,
762 cycles
[PMD_CYCLES_PROCESSING
], total_packets
);
766 pmd_info_clear_stats(struct ds
*reply OVS_UNUSED
,
767 struct dp_netdev_pmd_thread
*pmd
,
768 unsigned long long stats
[DP_N_STATS
],
769 uint64_t cycles
[PMD_N_CYCLES
])
773 /* We cannot write 'stats' and 'cycles' (because they're written by other
774 * threads) and we shouldn't change 'stats' (because they're used to count
775 * datapath stats, which must not be cleared here). Instead, we save the
776 * current values and subtract them from the values to be displayed in the
778 for (i
= 0; i
< DP_N_STATS
; i
++) {
779 pmd
->stats_zero
[i
] = stats
[i
];
781 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
782 pmd
->cycles_zero
[i
] = cycles
[i
];
787 pmd_info_show_rxq(struct ds
*reply
, struct dp_netdev_pmd_thread
*pmd
)
789 if (pmd
->core_id
!= NON_PMD_CORE_ID
) {
790 struct rxq_poll
*poll
;
791 const char *prev_name
= NULL
;
794 "pmd thread numa_id %d core_id %u:\n\tisolated : %s\n",
795 pmd
->numa_id
, pmd
->core_id
, (pmd
->isolated
)
798 ovs_mutex_lock(&pmd
->port_mutex
);
799 LIST_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
800 const char *name
= netdev_get_name(poll
->port
->netdev
);
802 if (!prev_name
|| strcmp(name
, prev_name
)) {
804 ds_put_cstr(reply
, "\n");
806 ds_put_format(reply
, "\tport: %s\tqueue-id:",
807 netdev_get_name(poll
->port
->netdev
));
809 ds_put_format(reply
, " %d", netdev_rxq_get_queue_id(poll
->rx
));
812 ovs_mutex_unlock(&pmd
->port_mutex
);
813 ds_put_cstr(reply
, "\n");
818 dpif_netdev_pmd_info(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
821 struct ds reply
= DS_EMPTY_INITIALIZER
;
822 struct dp_netdev_pmd_thread
*pmd
;
823 struct dp_netdev
*dp
= NULL
;
824 enum pmd_info_type type
= *(enum pmd_info_type
*) aux
;
826 ovs_mutex_lock(&dp_netdev_mutex
);
829 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
830 } else if (shash_count(&dp_netdevs
) == 1) {
831 /* There's only one datapath */
832 dp
= shash_first(&dp_netdevs
)->data
;
836 ovs_mutex_unlock(&dp_netdev_mutex
);
837 unixctl_command_reply_error(conn
,
838 "please specify an existing datapath");
842 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
843 if (type
== PMD_INFO_SHOW_RXQ
) {
844 pmd_info_show_rxq(&reply
, pmd
);
846 unsigned long long stats
[DP_N_STATS
];
847 uint64_t cycles
[PMD_N_CYCLES
];
850 /* Read current stats and cycle counters */
851 for (i
= 0; i
< ARRAY_SIZE(stats
); i
++) {
852 atomic_read_relaxed(&pmd
->stats
.n
[i
], &stats
[i
]);
854 for (i
= 0; i
< ARRAY_SIZE(cycles
); i
++) {
855 atomic_read_relaxed(&pmd
->cycles
.n
[i
], &cycles
[i
]);
858 if (type
== PMD_INFO_CLEAR_STATS
) {
859 pmd_info_clear_stats(&reply
, pmd
, stats
, cycles
);
860 } else if (type
== PMD_INFO_SHOW_STATS
) {
861 pmd_info_show_stats(&reply
, pmd
, stats
, cycles
);
866 ovs_mutex_unlock(&dp_netdev_mutex
);
868 unixctl_command_reply(conn
, ds_cstr(&reply
));
873 dpif_netdev_init(void)
875 static enum pmd_info_type show_aux
= PMD_INFO_SHOW_STATS
,
876 clear_aux
= PMD_INFO_CLEAR_STATS
,
877 poll_aux
= PMD_INFO_SHOW_RXQ
;
879 unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
880 0, 1, dpif_netdev_pmd_info
,
882 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
883 0, 1, dpif_netdev_pmd_info
,
885 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]",
886 0, 1, dpif_netdev_pmd_info
,
892 dpif_netdev_enumerate(struct sset
*all_dps
,
893 const struct dpif_class
*dpif_class
)
895 struct shash_node
*node
;
897 ovs_mutex_lock(&dp_netdev_mutex
);
898 SHASH_FOR_EACH(node
, &dp_netdevs
) {
899 struct dp_netdev
*dp
= node
->data
;
900 if (dpif_class
!= dp
->class) {
901 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
902 * If the class doesn't match, skip this dpif. */
905 sset_add(all_dps
, node
->name
);
907 ovs_mutex_unlock(&dp_netdev_mutex
);
913 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
915 return class != &dpif_netdev_class
;
919 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
921 return strcmp(type
, "internal") ? type
922 : dpif_netdev_class_is_dummy(class) ? "dummy-internal"
927 create_dpif_netdev(struct dp_netdev
*dp
)
929 uint16_t netflow_id
= hash_string(dp
->name
, 0);
930 struct dpif_netdev
*dpif
;
932 ovs_refcount_ref(&dp
->ref_cnt
);
934 dpif
= xmalloc(sizeof *dpif
);
935 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
937 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
942 /* Choose an unused, non-zero port number and return it on success.
943 * Return ODPP_NONE on failure. */
945 choose_port(struct dp_netdev
*dp
, const char *name
)
946 OVS_REQUIRES(dp
->port_mutex
)
950 if (dp
->class != &dpif_netdev_class
) {
954 /* If the port name begins with "br", start the number search at
955 * 100 to make writing tests easier. */
956 if (!strncmp(name
, "br", 2)) {
960 /* If the port name contains a number, try to assign that port number.
961 * This can make writing unit tests easier because port numbers are
963 for (p
= name
; *p
!= '\0'; p
++) {
964 if (isdigit((unsigned char) *p
)) {
965 port_no
= start_no
+ strtol(p
, NULL
, 10);
966 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
967 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
968 return u32_to_odp(port_no
);
975 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
976 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
977 return u32_to_odp(port_no
);
985 create_dp_netdev(const char *name
, const struct dpif_class
*class,
986 struct dp_netdev
**dpp
)
987 OVS_REQUIRES(dp_netdev_mutex
)
989 struct dp_netdev
*dp
;
992 dp
= xzalloc(sizeof *dp
);
993 shash_add(&dp_netdevs
, name
, dp
);
995 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
996 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
997 ovs_refcount_init(&dp
->ref_cnt
);
998 atomic_flag_clear(&dp
->destroyed
);
1000 ovs_mutex_init(&dp
->port_mutex
);
1001 hmap_init(&dp
->ports
);
1002 dp
->port_seq
= seq_create();
1003 fat_rwlock_init(&dp
->upcall_rwlock
);
1005 dp
->reconfigure_seq
= seq_create();
1006 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
1008 /* Disable upcalls by default. */
1009 dp_netdev_disable_upcall(dp
);
1010 dp
->upcall_aux
= NULL
;
1011 dp
->upcall_cb
= NULL
;
1013 conntrack_init(&dp
->conntrack
);
1015 cmap_init(&dp
->poll_threads
);
1016 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
1017 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
1019 ovs_mutex_lock(&dp
->port_mutex
);
1020 dp_netdev_set_nonpmd(dp
);
1022 error
= do_add_port(dp
, name
, dpif_netdev_port_open_type(dp
->class,
1025 ovs_mutex_unlock(&dp
->port_mutex
);
1031 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
1037 dp_netdev_request_reconfigure(struct dp_netdev
*dp
)
1039 seq_change(dp
->reconfigure_seq
);
1043 dp_netdev_is_reconf_required(struct dp_netdev
*dp
)
1045 return seq_read(dp
->reconfigure_seq
) != dp
->last_reconfigure_seq
;
1049 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
1050 bool create
, struct dpif
**dpifp
)
1052 struct dp_netdev
*dp
;
1055 ovs_mutex_lock(&dp_netdev_mutex
);
1056 dp
= shash_find_data(&dp_netdevs
, name
);
1058 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
1060 error
= (dp
->class != class ? EINVAL
1065 *dpifp
= create_dpif_netdev(dp
);
1068 ovs_mutex_unlock(&dp_netdev_mutex
);
1074 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
1075 OVS_NO_THREAD_SAFETY_ANALYSIS
1077 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
1078 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
1080 /* Before freeing a lock we should release it */
1081 fat_rwlock_unlock(&dp
->upcall_rwlock
);
1082 fat_rwlock_destroy(&dp
->upcall_rwlock
);
1085 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
1086 * through the 'dp_netdevs' shash while freeing 'dp'. */
1088 dp_netdev_free(struct dp_netdev
*dp
)
1089 OVS_REQUIRES(dp_netdev_mutex
)
1091 struct dp_netdev_port
*port
, *next
;
1093 shash_find_and_delete(&dp_netdevs
, dp
->name
);
1095 dp_netdev_destroy_all_pmds(dp
);
1096 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
1097 ovsthread_key_delete(dp
->per_pmd_key
);
1099 conntrack_destroy(&dp
->conntrack
);
1101 ovs_mutex_lock(&dp
->port_mutex
);
1102 HMAP_FOR_EACH_SAFE (port
, next
, node
, &dp
->ports
) {
1103 do_del_port(dp
, port
);
1105 ovs_mutex_unlock(&dp
->port_mutex
);
1106 cmap_destroy(&dp
->poll_threads
);
1108 seq_destroy(dp
->reconfigure_seq
);
1110 seq_destroy(dp
->port_seq
);
1111 hmap_destroy(&dp
->ports
);
1112 ovs_mutex_destroy(&dp
->port_mutex
);
1114 /* Upcalls must be disabled at this point */
1115 dp_netdev_destroy_upcall_lock(dp
);
1117 free(dp
->pmd_cmask
);
1118 free(CONST_CAST(char *, dp
->name
));
1123 dp_netdev_unref(struct dp_netdev
*dp
)
1126 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1127 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1128 ovs_mutex_lock(&dp_netdev_mutex
);
1129 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1132 ovs_mutex_unlock(&dp_netdev_mutex
);
1137 dpif_netdev_close(struct dpif
*dpif
)
1139 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1141 dp_netdev_unref(dp
);
1146 dpif_netdev_destroy(struct dpif
*dpif
)
1148 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1150 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
1151 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1152 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1160 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1161 * load/store semantics. While the increment is not atomic, the load and
1162 * store operations are, making it impossible to read inconsistent values.
1164 * This is used to update thread local stats counters. */
1166 non_atomic_ullong_add(atomic_ullong
*var
, unsigned long long n
)
1168 unsigned long long tmp
;
1170 atomic_read_relaxed(var
, &tmp
);
1172 atomic_store_relaxed(var
, tmp
);
1176 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
1178 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1179 struct dp_netdev_pmd_thread
*pmd
;
1181 stats
->n_flows
= stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
1182 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1183 unsigned long long n
;
1184 stats
->n_flows
+= cmap_count(&pmd
->flow_table
);
1186 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MASKED_HIT
], &n
);
1188 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_EXACT_HIT
], &n
);
1190 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MISS
], &n
);
1191 stats
->n_missed
+= n
;
1192 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_LOST
], &n
);
1195 stats
->n_masks
= UINT32_MAX
;
1196 stats
->n_mask_hit
= UINT64_MAX
;
1202 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
1204 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1205 ovs_mutex_lock(&pmd
->dp
->non_pmd_mutex
);
1206 ovs_mutex_lock(&pmd
->port_mutex
);
1207 pmd_load_cached_ports(pmd
);
1208 ovs_mutex_unlock(&pmd
->port_mutex
);
1209 ovs_mutex_unlock(&pmd
->dp
->non_pmd_mutex
);
1213 ovs_mutex_lock(&pmd
->cond_mutex
);
1214 seq_change(pmd
->reload_seq
);
1215 atomic_store_relaxed(&pmd
->reload
, true);
1216 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
1217 ovs_mutex_unlock(&pmd
->cond_mutex
);
1221 hash_port_no(odp_port_t port_no
)
1223 return hash_int(odp_to_u32(port_no
), 0);
1227 port_create(const char *devname
, const char *type
,
1228 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1230 struct netdev_saved_flags
*sf
;
1231 struct dp_netdev_port
*port
;
1232 enum netdev_flags flags
;
1233 struct netdev
*netdev
;
1234 int n_open_rxqs
= 0;
1237 bool dynamic_txqs
= false;
1241 /* Open and validate network device. */
1242 error
= netdev_open(devname
, type
, &netdev
);
1246 /* XXX reject non-Ethernet devices */
1248 netdev_get_flags(netdev
, &flags
);
1249 if (flags
& NETDEV_LOOPBACK
) {
1250 VLOG_ERR("%s: cannot add a loopback device", devname
);
1255 if (netdev_is_pmd(netdev
)) {
1256 n_cores
= ovs_numa_get_n_cores();
1258 if (n_cores
== OVS_CORE_UNSPEC
) {
1259 VLOG_ERR("%s, cannot get cpu core info", devname
);
1263 /* There can only be ovs_numa_get_n_cores() pmd threads,
1264 * so creates a txq for each, and one extra for the non
1266 error
= netdev_set_tx_multiq(netdev
, n_cores
+ 1);
1267 if (error
&& (error
!= EOPNOTSUPP
)) {
1268 VLOG_ERR("%s, cannot set multiq", devname
);
1273 if (netdev_is_reconf_required(netdev
)) {
1274 error
= netdev_reconfigure(netdev
);
1280 if (netdev_is_pmd(netdev
)) {
1281 if (netdev_n_txq(netdev
) < n_cores
+ 1) {
1282 dynamic_txqs
= true;
1286 port
= xzalloc(sizeof *port
);
1287 port
->port_no
= port_no
;
1288 port
->netdev
= netdev
;
1289 port
->n_rxq
= netdev_n_rxq(netdev
);
1290 port
->rxqs
= xcalloc(port
->n_rxq
, sizeof *port
->rxqs
);
1291 port
->txq_used
= xcalloc(netdev_n_txq(netdev
), sizeof *port
->txq_used
);
1292 port
->type
= xstrdup(type
);
1293 ovs_mutex_init(&port
->txq_used_mutex
);
1294 port
->dynamic_txqs
= dynamic_txqs
;
1296 for (i
= 0; i
< port
->n_rxq
; i
++) {
1297 error
= netdev_rxq_open(netdev
, &port
->rxqs
[i
].rxq
, i
);
1299 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
1300 devname
, ovs_strerror(errno
));
1303 port
->rxqs
[i
].core_id
= OVS_CORE_UNSPEC
;
1307 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
1318 for (i
= 0; i
< n_open_rxqs
; i
++) {
1319 netdev_rxq_close(port
->rxqs
[i
].rxq
);
1321 ovs_mutex_destroy(&port
->txq_used_mutex
);
1323 free(port
->txq_used
);
1328 netdev_close(netdev
);
1333 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
1335 OVS_REQUIRES(dp
->port_mutex
)
1337 struct dp_netdev_port
*port
;
1340 /* Reject devices already in 'dp'. */
1341 if (!get_port_by_name(dp
, devname
, &port
)) {
1345 error
= port_create(devname
, type
, port_no
, &port
);
1350 if (netdev_is_pmd(port
->netdev
)) {
1351 int numa_id
= netdev_get_numa_id(port
->netdev
);
1353 ovs_assert(ovs_numa_numa_id_is_valid(numa_id
));
1354 dp_netdev_set_pmds_on_numa(dp
, numa_id
);
1357 dp_netdev_add_port_to_pmds(dp
, port
);
1359 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
1360 seq_change(dp
->port_seq
);
1366 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
1367 odp_port_t
*port_nop
)
1369 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1370 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1371 const char *dpif_port
;
1375 ovs_mutex_lock(&dp
->port_mutex
);
1376 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1377 if (*port_nop
!= ODPP_NONE
) {
1378 port_no
= *port_nop
;
1379 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
1381 port_no
= choose_port(dp
, dpif_port
);
1382 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
1385 *port_nop
= port_no
;
1386 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
1388 ovs_mutex_unlock(&dp
->port_mutex
);
1394 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
1396 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1399 ovs_mutex_lock(&dp
->port_mutex
);
1400 if (port_no
== ODPP_LOCAL
) {
1403 struct dp_netdev_port
*port
;
1405 error
= get_port_by_number(dp
, port_no
, &port
);
1407 do_del_port(dp
, port
);
1410 ovs_mutex_unlock(&dp
->port_mutex
);
1416 is_valid_port_number(odp_port_t port_no
)
1418 return port_no
!= ODPP_NONE
;
1421 static struct dp_netdev_port
*
1422 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
1423 OVS_REQUIRES(dp
->port_mutex
)
1425 struct dp_netdev_port
*port
;
1427 HMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
1428 if (port
->port_no
== port_no
) {
1436 get_port_by_number(struct dp_netdev
*dp
,
1437 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1438 OVS_REQUIRES(dp
->port_mutex
)
1440 if (!is_valid_port_number(port_no
)) {
1444 *portp
= dp_netdev_lookup_port(dp
, port_no
);
1445 return *portp
? 0 : ENODEV
;
1450 port_destroy(struct dp_netdev_port
*port
)
1456 netdev_close(port
->netdev
);
1457 netdev_restore_flags(port
->sf
);
1459 for (unsigned i
= 0; i
< port
->n_rxq
; i
++) {
1460 netdev_rxq_close(port
->rxqs
[i
].rxq
);
1462 ovs_mutex_destroy(&port
->txq_used_mutex
);
1463 free(port
->rxq_affinity_list
);
1464 free(port
->txq_used
);
1471 get_port_by_name(struct dp_netdev
*dp
,
1472 const char *devname
, struct dp_netdev_port
**portp
)
1473 OVS_REQUIRES(dp
->port_mutex
)
1475 struct dp_netdev_port
*port
;
1477 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1478 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1484 /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non
1490 get_n_pmd_threads(struct dp_netdev
*dp
)
1492 /* There is one non pmd thread in dp->poll_threads */
1493 return cmap_count(&dp
->poll_threads
) - 1;
1497 get_n_pmd_threads_on_numa(struct dp_netdev
*dp
, int numa_id
)
1499 struct dp_netdev_pmd_thread
*pmd
;
1502 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1503 if (pmd
->numa_id
== numa_id
) {
1511 /* Returns 'true' if there is a port with pmd netdev and the netdev is on
1512 * numa node 'numa_id' or its rx queue assigned to core on that numa node . */
1514 has_pmd_rxq_for_numa(struct dp_netdev
*dp
, int numa_id
)
1515 OVS_REQUIRES(dp
->port_mutex
)
1517 struct dp_netdev_port
*port
;
1519 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1520 if (netdev_is_pmd(port
->netdev
)) {
1523 if (netdev_get_numa_id(port
->netdev
) == numa_id
) {
1527 for (i
= 0; i
< port
->n_rxq
; i
++) {
1528 unsigned core_id
= port
->rxqs
[i
].core_id
;
1530 if (core_id
!= OVS_CORE_UNSPEC
1531 && ovs_numa_get_numa_id(core_id
) == numa_id
) {
1543 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1544 OVS_REQUIRES(dp
->port_mutex
)
1546 hmap_remove(&dp
->ports
, &port
->node
);
1547 seq_change(dp
->port_seq
);
1549 dp_netdev_del_port_from_all_pmds(dp
, port
);
1551 if (netdev_is_pmd(port
->netdev
)) {
1552 int numa_id
= netdev_get_numa_id(port
->netdev
);
1554 /* PMD threads can not be on invalid numa node. */
1555 ovs_assert(ovs_numa_numa_id_is_valid(numa_id
));
1556 /* If there is no netdev on the numa node, deletes the pmd threads
1558 if (!has_pmd_rxq_for_numa(dp
, numa_id
)) {
1559 dp_netdev_del_pmds_on_numa(dp
, numa_id
);
1567 answer_port_query(const struct dp_netdev_port
*port
,
1568 struct dpif_port
*dpif_port
)
1570 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1571 dpif_port
->type
= xstrdup(port
->type
);
1572 dpif_port
->port_no
= port
->port_no
;
1576 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1577 struct dpif_port
*dpif_port
)
1579 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1580 struct dp_netdev_port
*port
;
1583 ovs_mutex_lock(&dp
->port_mutex
);
1584 error
= get_port_by_number(dp
, port_no
, &port
);
1585 if (!error
&& dpif_port
) {
1586 answer_port_query(port
, dpif_port
);
1588 ovs_mutex_unlock(&dp
->port_mutex
);
1594 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1595 struct dpif_port
*dpif_port
)
1597 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1598 struct dp_netdev_port
*port
;
1601 ovs_mutex_lock(&dp
->port_mutex
);
1602 error
= get_port_by_name(dp
, devname
, &port
);
1603 if (!error
&& dpif_port
) {
1604 answer_port_query(port
, dpif_port
);
1606 ovs_mutex_unlock(&dp
->port_mutex
);
1612 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1614 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1618 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1620 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1621 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1626 dp_netdev_flow_hash(const ovs_u128
*ufid
)
1628 return ufid
->u32
[0];
1631 static inline struct dpcls
*
1632 dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread
*pmd
,
1636 uint32_t hash
= hash_port_no(in_port
);
1637 CMAP_FOR_EACH_WITH_HASH (cls
, node
, hash
, &pmd
->classifiers
) {
1638 if (cls
->in_port
== in_port
) {
1639 /* Port classifier exists already */
1646 static inline struct dpcls
*
1647 dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread
*pmd
,
1649 OVS_REQUIRES(pmd
->flow_mutex
)
1651 struct dpcls
*cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
1652 uint32_t hash
= hash_port_no(in_port
);
1655 /* Create new classifier for in_port */
1656 cls
= xmalloc(sizeof(*cls
));
1658 cls
->in_port
= in_port
;
1659 cmap_insert(&pmd
->classifiers
, &cls
->node
, hash
);
1660 VLOG_DBG("Creating dpcls %p for in_port %d", cls
, in_port
);
1666 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread
*pmd
,
1667 struct dp_netdev_flow
*flow
)
1668 OVS_REQUIRES(pmd
->flow_mutex
)
1670 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1672 odp_port_t in_port
= flow
->flow
.in_port
.odp_port
;
1674 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
1675 ovs_assert(cls
!= NULL
);
1676 dpcls_remove(cls
, &flow
->cr
);
1677 cmap_remove(&pmd
->flow_table
, node
, dp_netdev_flow_hash(&flow
->ufid
));
1680 dp_netdev_flow_unref(flow
);
1684 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
)
1686 struct dp_netdev_flow
*netdev_flow
;
1688 ovs_mutex_lock(&pmd
->flow_mutex
);
1689 CMAP_FOR_EACH (netdev_flow
, node
, &pmd
->flow_table
) {
1690 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
1692 ovs_mutex_unlock(&pmd
->flow_mutex
);
1696 dpif_netdev_flow_flush(struct dpif
*dpif
)
1698 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1699 struct dp_netdev_pmd_thread
*pmd
;
1701 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1702 dp_netdev_pmd_flow_flush(pmd
);
1708 struct dp_netdev_port_state
{
1709 struct hmap_position position
;
1714 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1716 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
1721 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
1722 struct dpif_port
*dpif_port
)
1724 struct dp_netdev_port_state
*state
= state_
;
1725 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1726 struct hmap_node
*node
;
1729 ovs_mutex_lock(&dp
->port_mutex
);
1730 node
= hmap_at_position(&dp
->ports
, &state
->position
);
1732 struct dp_netdev_port
*port
;
1734 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
1737 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
1738 dpif_port
->name
= state
->name
;
1739 dpif_port
->type
= port
->type
;
1740 dpif_port
->port_no
= port
->port_no
;
1746 ovs_mutex_unlock(&dp
->port_mutex
);
1752 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
1754 struct dp_netdev_port_state
*state
= state_
;
1761 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
1763 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1764 uint64_t new_port_seq
;
1767 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
1768 if (dpif
->last_port_seq
!= new_port_seq
) {
1769 dpif
->last_port_seq
= new_port_seq
;
1779 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
1781 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1783 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
1786 static struct dp_netdev_flow
*
1787 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
1789 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
1792 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
1794 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
1797 /* netdev_flow_key utilities.
1799 * netdev_flow_key is basically a miniflow. We use these functions
1800 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1801 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1803 * - Since we are dealing exclusively with miniflows created by
1804 * miniflow_extract(), if the map is different the miniflow is different.
1805 * Therefore we can be faster by comparing the map and the miniflow in a
1807 * - These functions can be inlined by the compiler. */
1809 /* Given the number of bits set in miniflow's maps, returns the size of the
1810 * 'netdev_flow_key.mf' */
1811 static inline size_t
1812 netdev_flow_key_size(size_t flow_u64s
)
1814 return sizeof(struct miniflow
) + MINIFLOW_VALUES_SIZE(flow_u64s
);
1818 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
1819 const struct netdev_flow_key
*b
)
1821 /* 'b->len' may be not set yet. */
1822 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
1825 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
1826 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
1827 * generated by miniflow_extract. */
1829 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
1830 const struct miniflow
*mf
)
1832 return !memcmp(&key
->mf
, mf
, key
->len
);
1836 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
1837 const struct netdev_flow_key
*src
)
1840 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
1845 netdev_flow_key_from_flow(struct netdev_flow_key
*dst
,
1846 const struct flow
*src
)
1848 struct dp_packet packet
;
1849 uint64_t buf_stub
[512 / 8];
1851 dp_packet_use_stub(&packet
, buf_stub
, sizeof buf_stub
);
1852 pkt_metadata_from_flow(&packet
.md
, src
);
1853 flow_compose(&packet
, src
);
1854 miniflow_extract(&packet
, &dst
->mf
);
1855 dp_packet_uninit(&packet
);
1857 dst
->len
= netdev_flow_key_size(miniflow_n_values(&dst
->mf
));
1858 dst
->hash
= 0; /* Not computed yet. */
1861 /* Initialize a netdev_flow_key 'mask' from 'match'. */
1863 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
1864 const struct match
*match
)
1866 uint64_t *dst
= miniflow_values(&mask
->mf
);
1867 struct flowmap fmap
;
1871 /* Only check masks that make sense for the flow. */
1872 flow_wc_map(&match
->flow
, &fmap
);
1873 flowmap_init(&mask
->mf
.map
);
1875 FLOWMAP_FOR_EACH_INDEX(idx
, fmap
) {
1876 uint64_t mask_u64
= flow_u64_value(&match
->wc
.masks
, idx
);
1879 flowmap_set(&mask
->mf
.map
, idx
, 1);
1881 hash
= hash_add64(hash
, mask_u64
);
1887 FLOWMAP_FOR_EACH_MAP (map
, mask
->mf
.map
) {
1888 hash
= hash_add64(hash
, map
);
1891 size_t n
= dst
- miniflow_get_values(&mask
->mf
);
1893 mask
->hash
= hash_finish(hash
, n
* 8);
1894 mask
->len
= netdev_flow_key_size(n
);
1897 /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
1899 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
1900 const struct flow
*flow
,
1901 const struct netdev_flow_key
*mask
)
1903 uint64_t *dst_u64
= miniflow_values(&dst
->mf
);
1904 const uint64_t *mask_u64
= miniflow_get_values(&mask
->mf
);
1908 dst
->len
= mask
->len
;
1909 dst
->mf
= mask
->mf
; /* Copy maps. */
1911 FLOW_FOR_EACH_IN_MAPS(value
, flow
, mask
->mf
.map
) {
1912 *dst_u64
= value
& *mask_u64
++;
1913 hash
= hash_add64(hash
, *dst_u64
++);
1915 dst
->hash
= hash_finish(hash
,
1916 (dst_u64
- miniflow_get_values(&dst
->mf
)) * 8);
1919 /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
1920 #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
1921 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
1923 /* Returns a hash value for the bits of 'key' where there are 1-bits in
1925 static inline uint32_t
1926 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
1927 const struct netdev_flow_key
*mask
)
1929 const uint64_t *p
= miniflow_get_values(&mask
->mf
);
1933 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, key
, mask
->mf
.map
) {
1934 hash
= hash_add64(hash
, value
& *p
++);
1937 return hash_finish(hash
, (p
- miniflow_get_values(&mask
->mf
)) * 8);
1941 emc_entry_alive(struct emc_entry
*ce
)
1943 return ce
->flow
&& !ce
->flow
->dead
;
1947 emc_clear_entry(struct emc_entry
*ce
)
1950 dp_netdev_flow_unref(ce
->flow
);
1956 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
1957 const struct netdev_flow_key
*key
)
1959 if (ce
->flow
!= flow
) {
1961 dp_netdev_flow_unref(ce
->flow
);
1964 if (dp_netdev_flow_ref(flow
)) {
1971 netdev_flow_key_clone(&ce
->key
, key
);
1976 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
1977 struct dp_netdev_flow
*flow
)
1979 struct emc_entry
*to_be_replaced
= NULL
;
1980 struct emc_entry
*current_entry
;
1982 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1983 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
1984 /* We found the entry with the 'mf' miniflow */
1985 emc_change_entry(current_entry
, flow
, NULL
);
1989 /* Replacement policy: put the flow in an empty (not alive) entry, or
1990 * in the first entry where it can be */
1992 || (emc_entry_alive(to_be_replaced
)
1993 && !emc_entry_alive(current_entry
))
1994 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
1995 to_be_replaced
= current_entry
;
1998 /* We didn't find the miniflow in the cache.
1999 * The 'to_be_replaced' entry is where the new flow will be stored */
2001 emc_change_entry(to_be_replaced
, flow
, key
);
2004 static inline struct dp_netdev_flow
*
2005 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
2007 struct emc_entry
*current_entry
;
2009 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
2010 if (current_entry
->key
.hash
== key
->hash
2011 && emc_entry_alive(current_entry
)
2012 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
2014 /* We found the entry with the 'key->mf' miniflow */
2015 return current_entry
->flow
;
2022 static struct dp_netdev_flow
*
2023 dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread
*pmd
,
2024 const struct netdev_flow_key
*key
,
2028 struct dpcls_rule
*rule
;
2029 odp_port_t in_port
= u32_to_odp(MINIFLOW_GET_U32(&key
->mf
, in_port
));
2030 struct dp_netdev_flow
*netdev_flow
= NULL
;
2032 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
2033 if (OVS_LIKELY(cls
)) {
2034 dpcls_lookup(cls
, key
, &rule
, 1, lookup_num_p
);
2035 netdev_flow
= dp_netdev_flow_cast(rule
);
2040 static struct dp_netdev_flow
*
2041 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread
*pmd
,
2042 const ovs_u128
*ufidp
, const struct nlattr
*key
,
2045 struct dp_netdev_flow
*netdev_flow
;
2049 /* If a UFID is not provided, determine one based on the key. */
2050 if (!ufidp
&& key
&& key_len
2051 && !dpif_netdev_flow_from_nlattrs(key
, key_len
, &flow
)) {
2052 dpif_flow_hash(pmd
->dp
->dpif
, &flow
, sizeof flow
, &ufid
);
2057 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, dp_netdev_flow_hash(ufidp
),
2059 if (ovs_u128_equals(netdev_flow
->ufid
, *ufidp
)) {
2069 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_
,
2070 struct dpif_flow_stats
*stats
)
2072 struct dp_netdev_flow
*netdev_flow
;
2073 unsigned long long n
;
2077 netdev_flow
= CONST_CAST(struct dp_netdev_flow
*, netdev_flow_
);
2079 atomic_read_relaxed(&netdev_flow
->stats
.packet_count
, &n
);
2080 stats
->n_packets
= n
;
2081 atomic_read_relaxed(&netdev_flow
->stats
.byte_count
, &n
);
2083 atomic_read_relaxed(&netdev_flow
->stats
.used
, &used
);
2085 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
2086 stats
->tcp_flags
= flags
;
2089 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
2090 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
2091 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
2094 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
2095 struct ofpbuf
*key_buf
, struct ofpbuf
*mask_buf
,
2096 struct dpif_flow
*flow
, bool terse
)
2099 memset(flow
, 0, sizeof *flow
);
2101 struct flow_wildcards wc
;
2102 struct dp_netdev_actions
*actions
;
2104 struct odp_flow_key_parms odp_parms
= {
2105 .flow
= &netdev_flow
->flow
,
2107 .support
= dp_netdev_support
,
2110 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
2111 /* in_port is exact matched, but we have left it out from the mask for
2112 * optimnization reasons. Add in_port back to the mask. */
2113 wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
2116 offset
= key_buf
->size
;
2117 flow
->key
= ofpbuf_tail(key_buf
);
2118 odp_flow_key_from_flow(&odp_parms
, key_buf
);
2119 flow
->key_len
= key_buf
->size
- offset
;
2122 offset
= mask_buf
->size
;
2123 flow
->mask
= ofpbuf_tail(mask_buf
);
2124 odp_parms
.key_buf
= key_buf
;
2125 odp_flow_key_from_mask(&odp_parms
, mask_buf
);
2126 flow
->mask_len
= mask_buf
->size
- offset
;
2129 actions
= dp_netdev_flow_get_actions(netdev_flow
);
2130 flow
->actions
= actions
->actions
;
2131 flow
->actions_len
= actions
->size
;
2134 flow
->ufid
= netdev_flow
->ufid
;
2135 flow
->ufid_present
= true;
2136 flow
->pmd_id
= netdev_flow
->pmd_id
;
2137 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
2141 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
2142 const struct nlattr
*mask_key
,
2143 uint32_t mask_key_len
, const struct flow
*flow
,
2144 struct flow_wildcards
*wc
)
2146 enum odp_key_fitness fitness
;
2148 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, wc
, flow
);
2150 /* This should not happen: it indicates that
2151 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
2152 * disagree on the acceptable form of a mask. Log the problem
2153 * as an error, with enough details to enable debugging. */
2154 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2156 if (!VLOG_DROP_ERR(&rl
)) {
2160 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
2162 VLOG_ERR("internal error parsing flow mask %s (%s)",
2163 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
2174 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
2179 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
2180 /* This should not happen: it indicates that odp_flow_key_from_flow()
2181 * and odp_flow_key_to_flow() disagree on the acceptable form of a
2182 * flow. Log the problem as an error, with enough details to enable
2184 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2186 if (!VLOG_DROP_ERR(&rl
)) {
2190 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
2191 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
2198 in_port
= flow
->in_port
.odp_port
;
2199 if (!is_valid_port_number(in_port
) && in_port
!= ODPP_NONE
) {
2203 if (flow
->ct_state
& DP_NETDEV_CS_UNSUPPORTED_MASK
) {
2211 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
2213 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2214 struct dp_netdev_flow
*netdev_flow
;
2215 struct dp_netdev_pmd_thread
*pmd
;
2216 struct hmapx to_find
= HMAPX_INITIALIZER(&to_find
);
2217 struct hmapx_node
*node
;
2220 if (get
->pmd_id
== PMD_ID_NULL
) {
2221 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2222 if (dp_netdev_pmd_try_ref(pmd
) && !hmapx_add(&to_find
, pmd
)) {
2223 dp_netdev_pmd_unref(pmd
);
2227 pmd
= dp_netdev_get_pmd(dp
, get
->pmd_id
);
2231 hmapx_add(&to_find
, pmd
);
2234 if (!hmapx_count(&to_find
)) {
2238 HMAPX_FOR_EACH (node
, &to_find
) {
2239 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
2240 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, get
->ufid
, get
->key
,
2243 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->buffer
,
2252 HMAPX_FOR_EACH (node
, &to_find
) {
2253 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
2254 dp_netdev_pmd_unref(pmd
);
2257 hmapx_destroy(&to_find
);
2261 static struct dp_netdev_flow
*
2262 dp_netdev_flow_add(struct dp_netdev_pmd_thread
*pmd
,
2263 struct match
*match
, const ovs_u128
*ufid
,
2264 const struct nlattr
*actions
, size_t actions_len
)
2265 OVS_REQUIRES(pmd
->flow_mutex
)
2267 struct dp_netdev_flow
*flow
;
2268 struct netdev_flow_key mask
;
2271 /* Make sure in_port is exact matched before we read it. */
2272 ovs_assert(match
->wc
.masks
.in_port
.odp_port
== ODPP_NONE
);
2273 odp_port_t in_port
= match
->flow
.in_port
.odp_port
;
2275 /* As we select the dpcls based on the port number, each netdev flow
2276 * belonging to the same dpcls will have the same odp_port value.
2277 * For performance reasons we wildcard odp_port here in the mask. In the
2278 * typical case dp_hash is also wildcarded, and the resulting 8-byte
2279 * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and
2280 * will not be part of the subtable mask.
2281 * This will speed up the hash computation during dpcls_lookup() because
2282 * there is one less call to hash_add64() in this case. */
2283 match
->wc
.masks
.in_port
.odp_port
= 0;
2284 netdev_flow_mask_init(&mask
, match
);
2285 match
->wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
2287 /* Make sure wc does not have metadata. */
2288 ovs_assert(!FLOWMAP_HAS_FIELD(&mask
.mf
.map
, metadata
)
2289 && !FLOWMAP_HAS_FIELD(&mask
.mf
.map
, regs
));
2291 /* Do not allocate extra space. */
2292 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
2293 memset(&flow
->stats
, 0, sizeof flow
->stats
);
2296 *CONST_CAST(unsigned *, &flow
->pmd_id
) = pmd
->core_id
;
2297 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
2298 *CONST_CAST(ovs_u128
*, &flow
->ufid
) = *ufid
;
2299 ovs_refcount_init(&flow
->ref_cnt
);
2300 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
2302 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
2304 /* Select dpcls for in_port. Relies on in_port to be exact match. */
2305 cls
= dp_netdev_pmd_find_dpcls(pmd
, in_port
);
2306 dpcls_insert(cls
, &flow
->cr
, &mask
);
2308 cmap_insert(&pmd
->flow_table
, CONST_CAST(struct cmap_node
*, &flow
->node
),
2309 dp_netdev_flow_hash(&flow
->ufid
));
2311 if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
2312 struct ds ds
= DS_EMPTY_INITIALIZER
;
2313 struct ofpbuf key_buf
, mask_buf
;
2314 struct odp_flow_key_parms odp_parms
= {
2315 .flow
= &match
->flow
,
2316 .mask
= &match
->wc
.masks
,
2317 .support
= dp_netdev_support
,
2320 ofpbuf_init(&key_buf
, 0);
2321 ofpbuf_init(&mask_buf
, 0);
2323 odp_flow_key_from_flow(&odp_parms
, &key_buf
);
2324 odp_parms
.key_buf
= &key_buf
;
2325 odp_flow_key_from_mask(&odp_parms
, &mask_buf
);
2327 ds_put_cstr(&ds
, "flow_add: ");
2328 odp_format_ufid(ufid
, &ds
);
2329 ds_put_cstr(&ds
, " ");
2330 odp_flow_format(key_buf
.data
, key_buf
.size
,
2331 mask_buf
.data
, mask_buf
.size
,
2333 ds_put_cstr(&ds
, ", actions:");
2334 format_odp_actions(&ds
, actions
, actions_len
);
2336 VLOG_DBG_RL(&upcall_rl
, "%s", ds_cstr(&ds
));
2338 ofpbuf_uninit(&key_buf
);
2339 ofpbuf_uninit(&mask_buf
);
2347 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
2349 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2350 struct dp_netdev_flow
*netdev_flow
;
2351 struct netdev_flow_key key
;
2352 struct dp_netdev_pmd_thread
*pmd
;
2355 unsigned pmd_id
= put
->pmd_id
== PMD_ID_NULL
2356 ? NON_PMD_CORE_ID
: put
->pmd_id
;
2359 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
);
2363 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
2364 put
->mask
, put
->mask_len
,
2365 &match
.flow
, &match
.wc
);
2370 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2375 /* Must produce a netdev_flow_key for lookup.
2376 * This interface is no longer performance critical, since it is not used
2377 * for upcall processing any more. */
2378 netdev_flow_key_from_flow(&key
, &match
.flow
);
2383 dpif_flow_hash(dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
2386 ovs_mutex_lock(&pmd
->flow_mutex
);
2387 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &key
, NULL
);
2389 if (put
->flags
& DPIF_FP_CREATE
) {
2390 if (cmap_count(&pmd
->flow_table
) < MAX_FLOWS
) {
2392 memset(put
->stats
, 0, sizeof *put
->stats
);
2394 dp_netdev_flow_add(pmd
, &match
, &ufid
, put
->actions
,
2404 if (put
->flags
& DPIF_FP_MODIFY
2405 && flow_equal(&match
.flow
, &netdev_flow
->flow
)) {
2406 struct dp_netdev_actions
*new_actions
;
2407 struct dp_netdev_actions
*old_actions
;
2409 new_actions
= dp_netdev_actions_create(put
->actions
,
2412 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
2413 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
2416 get_dpif_flow_stats(netdev_flow
, put
->stats
);
2418 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
2419 /* XXX: The userspace datapath uses thread local statistics
2420 * (for flows), which should be updated only by the owning
2421 * thread. Since we cannot write on stats memory here,
2422 * we choose not to support this flag. Please note:
2423 * - This feature is currently used only by dpctl commands with
2425 * - Should the need arise, this operation can be implemented
2426 * by keeping a base value (to be update here) for each
2427 * counter, and subtracting it before outputting the stats */
2431 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
2432 } else if (put
->flags
& DPIF_FP_CREATE
) {
2435 /* Overlapping flow. */
2439 ovs_mutex_unlock(&pmd
->flow_mutex
);
2440 dp_netdev_pmd_unref(pmd
);
2446 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
2448 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2449 struct dp_netdev_flow
*netdev_flow
;
2450 struct dp_netdev_pmd_thread
*pmd
;
2451 unsigned pmd_id
= del
->pmd_id
== PMD_ID_NULL
2452 ? NON_PMD_CORE_ID
: del
->pmd_id
;
2455 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2460 ovs_mutex_lock(&pmd
->flow_mutex
);
2461 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, del
->ufid
, del
->key
,
2465 get_dpif_flow_stats(netdev_flow
, del
->stats
);
2467 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
2471 ovs_mutex_unlock(&pmd
->flow_mutex
);
2472 dp_netdev_pmd_unref(pmd
);
2477 struct dpif_netdev_flow_dump
{
2478 struct dpif_flow_dump up
;
2479 struct cmap_position poll_thread_pos
;
2480 struct cmap_position flow_pos
;
2481 struct dp_netdev_pmd_thread
*cur_pmd
;
2483 struct ovs_mutex mutex
;
2486 static struct dpif_netdev_flow_dump
*
2487 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
2489 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
2492 static struct dpif_flow_dump
*
2493 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
, bool terse
)
2495 struct dpif_netdev_flow_dump
*dump
;
2497 dump
= xzalloc(sizeof *dump
);
2498 dpif_flow_dump_init(&dump
->up
, dpif_
);
2499 dump
->up
.terse
= terse
;
2500 ovs_mutex_init(&dump
->mutex
);
2506 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
2508 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2510 ovs_mutex_destroy(&dump
->mutex
);
2515 struct dpif_netdev_flow_dump_thread
{
2516 struct dpif_flow_dump_thread up
;
2517 struct dpif_netdev_flow_dump
*dump
;
2518 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
2519 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
2522 static struct dpif_netdev_flow_dump_thread
*
2523 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
2525 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
2528 static struct dpif_flow_dump_thread
*
2529 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
2531 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2532 struct dpif_netdev_flow_dump_thread
*thread
;
2534 thread
= xmalloc(sizeof *thread
);
2535 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
2536 thread
->dump
= dump
;
2541 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
2543 struct dpif_netdev_flow_dump_thread
*thread
2544 = dpif_netdev_flow_dump_thread_cast(thread_
);
2550 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
2551 struct dpif_flow
*flows
, int max_flows
)
2553 struct dpif_netdev_flow_dump_thread
*thread
2554 = dpif_netdev_flow_dump_thread_cast(thread_
);
2555 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
2556 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
2560 ovs_mutex_lock(&dump
->mutex
);
2561 if (!dump
->status
) {
2562 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
2563 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
2564 struct dp_netdev_pmd_thread
*pmd
= dump
->cur_pmd
;
2565 int flow_limit
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
2567 /* First call to dump_next(), extracts the first pmd thread.
2568 * If there is no pmd thread, returns immediately. */
2570 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2572 ovs_mutex_unlock(&dump
->mutex
);
2579 for (n_flows
= 0; n_flows
< flow_limit
; n_flows
++) {
2580 struct cmap_node
*node
;
2582 node
= cmap_next_position(&pmd
->flow_table
, &dump
->flow_pos
);
2586 netdev_flows
[n_flows
] = CONTAINER_OF(node
,
2587 struct dp_netdev_flow
,
2590 /* When finishing dumping the current pmd thread, moves to
2592 if (n_flows
< flow_limit
) {
2593 memset(&dump
->flow_pos
, 0, sizeof dump
->flow_pos
);
2594 dp_netdev_pmd_unref(pmd
);
2595 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2601 /* Keeps the reference to next caller. */
2602 dump
->cur_pmd
= pmd
;
2604 /* If the current dump is empty, do not exit the loop, since the
2605 * remaining pmds could have flows to be dumped. Just dumps again
2606 * on the new 'pmd'. */
2609 ovs_mutex_unlock(&dump
->mutex
);
2611 for (i
= 0; i
< n_flows
; i
++) {
2612 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
2613 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
2614 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
2615 struct dpif_flow
*f
= &flows
[i
];
2616 struct ofpbuf key
, mask
;
2618 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
2619 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
2620 dp_netdev_flow_to_dpif_flow(netdev_flow
, &key
, &mask
, f
,
2628 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
2629 OVS_NO_THREAD_SAFETY_ANALYSIS
2631 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2632 struct dp_netdev_pmd_thread
*pmd
;
2633 struct dp_packet_batch pp
;
2635 if (dp_packet_size(execute
->packet
) < ETH_HEADER_LEN
||
2636 dp_packet_size(execute
->packet
) > UINT16_MAX
) {
2640 /* Tries finding the 'pmd'. If NULL is returned, that means
2641 * the current thread is a non-pmd thread and should use
2642 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
2643 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
2645 pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
2651 /* If the current thread is non-pmd thread, acquires
2652 * the 'non_pmd_mutex'. */
2653 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2654 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2657 /* The action processing expects the RSS hash to be valid, because
2658 * it's always initialized at the beginning of datapath processing.
2659 * In this case, though, 'execute->packet' may not have gone through
2660 * the datapath at all, it may have been generated by the upper layer
2661 * (OpenFlow packet-out, BFD frame, ...). */
2662 if (!dp_packet_rss_valid(execute
->packet
)) {
2663 dp_packet_set_rss_hash(execute
->packet
,
2664 flow_hash_5tuple(execute
->flow
, 0));
2667 packet_batch_init_packet(&pp
, execute
->packet
);
2668 dp_netdev_execute_actions(pmd
, &pp
, false, execute
->flow
,
2669 execute
->actions
, execute
->actions_len
,
2672 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2673 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2674 dp_netdev_pmd_unref(pmd
);
2681 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
2685 for (i
= 0; i
< n_ops
; i
++) {
2686 struct dpif_op
*op
= ops
[i
];
2689 case DPIF_OP_FLOW_PUT
:
2690 op
->error
= dpif_netdev_flow_put(dpif
, &op
->u
.flow_put
);
2693 case DPIF_OP_FLOW_DEL
:
2694 op
->error
= dpif_netdev_flow_del(dpif
, &op
->u
.flow_del
);
2697 case DPIF_OP_EXECUTE
:
2698 op
->error
= dpif_netdev_execute(dpif
, &op
->u
.execute
);
2701 case DPIF_OP_FLOW_GET
:
2702 op
->error
= dpif_netdev_flow_get(dpif
, &op
->u
.flow_get
);
2708 /* Changes the number or the affinity of pmd threads. The changes are actually
2709 * applied in dpif_netdev_run(). */
2711 dpif_netdev_pmd_set(struct dpif
*dpif
, const char *cmask
)
2713 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2715 if (!nullable_string_is_equal(dp
->pmd_cmask
, cmask
)) {
2716 free(dp
->pmd_cmask
);
2717 dp
->pmd_cmask
= nullable_xstrdup(cmask
);
2718 dp_netdev_request_reconfigure(dp
);
2724 /* Parses affinity list and returns result in 'core_ids'. */
2726 parse_affinity_list(const char *affinity_list
, unsigned *core_ids
, int n_rxq
)
2729 char *list
, *copy
, *key
, *value
;
2732 for (i
= 0; i
< n_rxq
; i
++) {
2733 core_ids
[i
] = OVS_CORE_UNSPEC
;
2736 if (!affinity_list
) {
2740 list
= copy
= xstrdup(affinity_list
);
2742 while (ofputil_parse_key_value(&list
, &key
, &value
)) {
2743 int rxq_id
, core_id
;
2745 if (!str_to_int(key
, 0, &rxq_id
) || rxq_id
< 0
2746 || !str_to_int(value
, 0, &core_id
) || core_id
< 0) {
2751 if (rxq_id
< n_rxq
) {
2752 core_ids
[rxq_id
] = core_id
;
2760 /* Parses 'affinity_list' and applies configuration if it is valid. */
2762 dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port
*port
,
2763 const char *affinity_list
)
2765 unsigned *core_ids
, i
;
2768 core_ids
= xmalloc(port
->n_rxq
* sizeof *core_ids
);
2769 if (parse_affinity_list(affinity_list
, core_ids
, port
->n_rxq
)) {
2774 for (i
= 0; i
< port
->n_rxq
; i
++) {
2775 port
->rxqs
[i
].core_id
= core_ids
[i
];
2783 /* Changes the affinity of port's rx queues. The changes are actually applied
2784 * in dpif_netdev_run(). */
2786 dpif_netdev_port_set_config(struct dpif
*dpif
, odp_port_t port_no
,
2787 const struct smap
*cfg
)
2789 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2790 struct dp_netdev_port
*port
;
2792 const char *affinity_list
= smap_get(cfg
, "pmd-rxq-affinity");
2794 ovs_mutex_lock(&dp
->port_mutex
);
2795 error
= get_port_by_number(dp
, port_no
, &port
);
2796 if (error
|| !netdev_is_pmd(port
->netdev
)
2797 || nullable_string_is_equal(affinity_list
, port
->rxq_affinity_list
)) {
2801 error
= dpif_netdev_port_set_rxq_affinity(port
, affinity_list
);
2805 free(port
->rxq_affinity_list
);
2806 port
->rxq_affinity_list
= nullable_xstrdup(affinity_list
);
2808 dp_netdev_request_reconfigure(dp
);
2810 ovs_mutex_unlock(&dp
->port_mutex
);
2815 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2816 uint32_t queue_id
, uint32_t *priority
)
2818 *priority
= queue_id
;
2823 /* Creates and returns a new 'struct dp_netdev_actions', whose actions are
2824 * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */
2825 struct dp_netdev_actions
*
2826 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
2828 struct dp_netdev_actions
*netdev_actions
;
2830 netdev_actions
= xmalloc(sizeof *netdev_actions
+ size
);
2831 memcpy(netdev_actions
->actions
, actions
, size
);
2832 netdev_actions
->size
= size
;
2834 return netdev_actions
;
2837 struct dp_netdev_actions
*
2838 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
2840 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
2844 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
2849 static inline unsigned long long
2850 cycles_counter(void)
2853 return rte_get_tsc_cycles();
2859 /* Fake mutex to make sure that the calls to cycles_count_* are balanced */
2860 extern struct ovs_mutex cycles_counter_fake_mutex
;
2862 /* Start counting cycles. Must be followed by 'cycles_count_end()' */
2864 cycles_count_start(struct dp_netdev_pmd_thread
*pmd
)
2865 OVS_ACQUIRES(&cycles_counter_fake_mutex
)
2866 OVS_NO_THREAD_SAFETY_ANALYSIS
2868 pmd
->last_cycles
= cycles_counter();
2871 /* Stop counting cycles and add them to the counter 'type' */
2873 cycles_count_end(struct dp_netdev_pmd_thread
*pmd
,
2874 enum pmd_cycles_counter_type type
)
2875 OVS_RELEASES(&cycles_counter_fake_mutex
)
2876 OVS_NO_THREAD_SAFETY_ANALYSIS
2878 unsigned long long interval
= cycles_counter() - pmd
->last_cycles
;
2880 non_atomic_ullong_add(&pmd
->cycles
.n
[type
], interval
);
2884 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
2885 struct dp_netdev_port
*port
,
2886 struct netdev_rxq
*rxq
)
2888 struct dp_packet_batch batch
;
2891 dp_packet_batch_init(&batch
);
2892 cycles_count_start(pmd
);
2893 error
= netdev_rxq_recv(rxq
, &batch
);
2894 cycles_count_end(pmd
, PMD_CYCLES_POLLING
);
2896 *recirc_depth_get() = 0;
2898 cycles_count_start(pmd
);
2899 dp_netdev_input(pmd
, &batch
, port
->port_no
);
2900 cycles_count_end(pmd
, PMD_CYCLES_PROCESSING
);
2901 } else if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
2902 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2904 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
2905 netdev_get_name(port
->netdev
), ovs_strerror(error
));
2910 port_reconfigure(struct dp_netdev_port
*port
)
2912 struct netdev
*netdev
= port
->netdev
;
2915 if (!netdev_is_reconf_required(netdev
)) {
2919 /* Closes the existing 'rxq's. */
2920 for (i
= 0; i
< port
->n_rxq
; i
++) {
2921 netdev_rxq_close(port
->rxqs
[i
].rxq
);
2922 port
->rxqs
[i
].rxq
= NULL
;
2926 /* Allows 'netdev' to apply the pending configuration changes. */
2927 err
= netdev_reconfigure(netdev
);
2928 if (err
&& (err
!= EOPNOTSUPP
)) {
2929 VLOG_ERR("Failed to set interface %s new configuration",
2930 netdev_get_name(netdev
));
2933 /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */
2934 port
->rxqs
= xrealloc(port
->rxqs
,
2935 sizeof *port
->rxqs
* netdev_n_rxq(netdev
));
2936 /* Realloc 'used' counters for tx queues. */
2937 free(port
->txq_used
);
2938 port
->txq_used
= xcalloc(netdev_n_txq(netdev
), sizeof *port
->txq_used
);
2940 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
2941 err
= netdev_rxq_open(netdev
, &port
->rxqs
[i
].rxq
, i
);
2948 /* Parse affinity list to apply configuration for new queues. */
2949 dpif_netdev_port_set_rxq_affinity(port
, port
->rxq_affinity_list
);
2955 reconfigure_pmd_threads(struct dp_netdev
*dp
)
2956 OVS_REQUIRES(dp
->port_mutex
)
2958 struct dp_netdev_port
*port
, *next
;
2961 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
2963 dp_netdev_destroy_all_pmds(dp
);
2965 /* Reconfigures the cpu mask. */
2966 ovs_numa_set_cpu_mask(dp
->pmd_cmask
);
2968 n_cores
= ovs_numa_get_n_cores();
2969 if (n_cores
== OVS_CORE_UNSPEC
) {
2970 VLOG_ERR("Cannot get cpu core info");
2974 HMAP_FOR_EACH_SAFE (port
, next
, node
, &dp
->ports
) {
2977 err
= port_reconfigure(port
);
2979 hmap_remove(&dp
->ports
, &port
->node
);
2980 seq_change(dp
->port_seq
);
2983 port
->dynamic_txqs
= netdev_n_txq(port
->netdev
) < n_cores
+ 1;
2986 /* Restores the non-pmd. */
2987 dp_netdev_set_nonpmd(dp
);
2988 /* Restores all pmd threads. */
2989 dp_netdev_reset_pmd_threads(dp
);
2992 /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
2994 ports_require_restart(const struct dp_netdev
*dp
)
2995 OVS_REQUIRES(dp
->port_mutex
)
2997 struct dp_netdev_port
*port
;
2999 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3000 if (netdev_is_reconf_required(port
->netdev
)) {
3008 /* Return true if needs to revalidate datapath flows. */
3010 dpif_netdev_run(struct dpif
*dpif
)
3012 struct dp_netdev_port
*port
;
3013 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3014 struct dp_netdev_pmd_thread
*non_pmd
;
3015 uint64_t new_tnl_seq
;
3017 ovs_mutex_lock(&dp
->port_mutex
);
3018 non_pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
3020 ovs_mutex_lock(&dp
->non_pmd_mutex
);
3021 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3022 if (!netdev_is_pmd(port
->netdev
)) {
3025 for (i
= 0; i
< port
->n_rxq
; i
++) {
3026 dp_netdev_process_rxq_port(non_pmd
, port
,
3031 dpif_netdev_xps_revalidate_pmd(non_pmd
, time_msec(), false);
3032 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
3034 dp_netdev_pmd_unref(non_pmd
);
3037 if (dp_netdev_is_reconf_required(dp
) || ports_require_restart(dp
)) {
3038 reconfigure_pmd_threads(dp
);
3040 ovs_mutex_unlock(&dp
->port_mutex
);
3042 tnl_neigh_cache_run();
3044 new_tnl_seq
= seq_read(tnl_conf_seq
);
3046 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
3047 dp
->last_tnl_conf_seq
= new_tnl_seq
;
3054 dpif_netdev_wait(struct dpif
*dpif
)
3056 struct dp_netdev_port
*port
;
3057 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3059 ovs_mutex_lock(&dp_netdev_mutex
);
3060 ovs_mutex_lock(&dp
->port_mutex
);
3061 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3062 netdev_wait_reconf_required(port
->netdev
);
3063 if (!netdev_is_pmd(port
->netdev
)) {
3066 for (i
= 0; i
< port
->n_rxq
; i
++) {
3067 netdev_rxq_wait(port
->rxqs
[i
].rxq
);
3071 ovs_mutex_unlock(&dp
->port_mutex
);
3072 ovs_mutex_unlock(&dp_netdev_mutex
);
3073 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
3077 pmd_free_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
3079 struct tx_port
*tx_port_cached
;
3081 /* Free all used tx queue ids. */
3082 dpif_netdev_xps_revalidate_pmd(pmd
, 0, true);
3084 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->tnl_port_cache
) {
3085 free(tx_port_cached
);
3087 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->send_port_cache
) {
3088 free(tx_port_cached
);
3092 /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to
3093 * 'pmd->port_cache' (thread local) */
3095 pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
3096 OVS_REQUIRES(pmd
->port_mutex
)
3098 struct tx_port
*tx_port
, *tx_port_cached
;
3100 pmd_free_cached_ports(pmd
);
3101 hmap_shrink(&pmd
->send_port_cache
);
3102 hmap_shrink(&pmd
->tnl_port_cache
);
3104 HMAP_FOR_EACH (tx_port
, node
, &pmd
->tx_ports
) {
3105 if (netdev_has_tunnel_push_pop(tx_port
->port
->netdev
)) {
3106 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
3107 hmap_insert(&pmd
->tnl_port_cache
, &tx_port_cached
->node
,
3108 hash_port_no(tx_port_cached
->port
->port_no
));
3111 if (netdev_n_txq(tx_port
->port
->netdev
)) {
3112 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
3113 hmap_insert(&pmd
->send_port_cache
, &tx_port_cached
->node
,
3114 hash_port_no(tx_port_cached
->port
->port_no
));
3120 pmd_load_queues_and_ports(struct dp_netdev_pmd_thread
*pmd
,
3121 struct rxq_poll
**ppoll_list
)
3123 struct rxq_poll
*poll_list
= *ppoll_list
;
3124 struct rxq_poll
*poll
;
3127 ovs_mutex_lock(&pmd
->port_mutex
);
3128 poll_list
= xrealloc(poll_list
, pmd
->poll_cnt
* sizeof *poll_list
);
3131 LIST_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
3132 poll_list
[i
++] = *poll
;
3135 pmd_load_cached_ports(pmd
);
3137 ovs_mutex_unlock(&pmd
->port_mutex
);
3139 *ppoll_list
= poll_list
;
3144 pmd_thread_main(void *f_
)
3146 struct dp_netdev_pmd_thread
*pmd
= f_
;
3147 unsigned int lc
= 0;
3148 struct rxq_poll
*poll_list
;
3155 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
3156 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
3157 ovs_numa_thread_setaffinity_core(pmd
->core_id
);
3158 dpdk_set_lcore_id(pmd
->core_id
);
3159 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
3161 emc_cache_init(&pmd
->flow_cache
);
3163 /* List port/core affinity */
3164 for (i
= 0; i
< poll_cnt
; i
++) {
3165 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
3166 pmd
->core_id
, netdev_get_name(poll_list
[i
].port
->netdev
),
3167 netdev_rxq_get_queue_id(poll_list
[i
].rx
));
3171 while (seq_read(pmd
->reload_seq
) == pmd
->last_reload_seq
) {
3172 seq_wait(pmd
->reload_seq
, pmd
->last_reload_seq
);
3179 for (i
= 0; i
< poll_cnt
; i
++) {
3180 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].port
, poll_list
[i
].rx
);
3188 coverage_try_clear();
3189 dp_netdev_pmd_try_optimize(pmd
);
3190 if (!ovsrcu_try_quiesce()) {
3191 emc_cache_slow_sweep(&pmd
->flow_cache
);
3194 atomic_read_relaxed(&pmd
->reload
, &reload
);
3201 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
3202 exiting
= latch_is_set(&pmd
->exit_latch
);
3203 /* Signal here to make sure the pmd finishes
3204 * reloading the updated configuration. */
3205 dp_netdev_pmd_reload_done(pmd
);
3207 emc_cache_uninit(&pmd
->flow_cache
);
3214 pmd_free_cached_ports(pmd
);
3219 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
3220 OVS_ACQUIRES(dp
->upcall_rwlock
)
3222 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
3226 dpif_netdev_disable_upcall(struct dpif
*dpif
)
3227 OVS_NO_THREAD_SAFETY_ANALYSIS
3229 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3230 dp_netdev_disable_upcall(dp
);
3234 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
3235 OVS_RELEASES(dp
->upcall_rwlock
)
3237 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3241 dpif_netdev_enable_upcall(struct dpif
*dpif
)
3242 OVS_NO_THREAD_SAFETY_ANALYSIS
3244 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3245 dp_netdev_enable_upcall(dp
);
3249 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
3251 ovs_mutex_lock(&pmd
->cond_mutex
);
3252 atomic_store_relaxed(&pmd
->reload
, false);
3253 pmd
->last_reload_seq
= seq_read(pmd
->reload_seq
);
3254 xpthread_cond_signal(&pmd
->cond
);
3255 ovs_mutex_unlock(&pmd
->cond_mutex
);
3258 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
3259 * the pointer if succeeds, otherwise, NULL (it can return NULL even if
3260 * 'core_id' is NON_PMD_CORE_ID).
3262 * Caller must unrefs the returned reference. */
3263 static struct dp_netdev_pmd_thread
*
3264 dp_netdev_get_pmd(struct dp_netdev
*dp
, unsigned core_id
)
3266 struct dp_netdev_pmd_thread
*pmd
;
3267 const struct cmap_node
*pnode
;
3269 pnode
= cmap_find(&dp
->poll_threads
, hash_int(core_id
, 0));
3273 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
3275 return dp_netdev_pmd_try_ref(pmd
) ? pmd
: NULL
;
3278 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
3280 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
3281 OVS_REQUIRES(dp
->port_mutex
)
3283 struct dp_netdev_pmd_thread
*non_pmd
;
3284 struct dp_netdev_port
*port
;
3286 non_pmd
= xzalloc(sizeof *non_pmd
);
3287 dp_netdev_configure_pmd(non_pmd
, dp
, NON_PMD_CORE_ID
, OVS_NUMA_UNSPEC
);
3289 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3290 dp_netdev_add_port_tx_to_pmd(non_pmd
, port
);
3293 dp_netdev_reload_pmd__(non_pmd
);
3296 /* Caller must have valid pointer to 'pmd'. */
3298 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
)
3300 return ovs_refcount_try_ref_rcu(&pmd
->ref_cnt
);
3304 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
)
3306 if (pmd
&& ovs_refcount_unref(&pmd
->ref_cnt
) == 1) {
3307 ovsrcu_postpone(dp_netdev_destroy_pmd
, pmd
);
3311 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
3312 * fails, keeps checking for next node until reaching the end of cmap.
3314 * Caller must unrefs the returned reference. */
3315 static struct dp_netdev_pmd_thread
*
3316 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
)
3318 struct dp_netdev_pmd_thread
*next
;
3321 struct cmap_node
*node
;
3323 node
= cmap_next_position(&dp
->poll_threads
, pos
);
3324 next
= node
? CONTAINER_OF(node
, struct dp_netdev_pmd_thread
, node
)
3326 } while (next
&& !dp_netdev_pmd_try_ref(next
));
3331 /* Configures the 'pmd' based on the input argument. */
3333 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
3334 unsigned core_id
, int numa_id
)
3337 pmd
->core_id
= core_id
;
3338 pmd
->numa_id
= numa_id
;
3341 atomic_init(&pmd
->static_tx_qid
,
3342 (core_id
== NON_PMD_CORE_ID
)
3343 ? ovs_numa_get_n_cores()
3344 : get_n_pmd_threads(dp
));
3346 ovs_refcount_init(&pmd
->ref_cnt
);
3347 latch_init(&pmd
->exit_latch
);
3348 pmd
->reload_seq
= seq_create();
3349 pmd
->last_reload_seq
= seq_read(pmd
->reload_seq
);
3350 atomic_init(&pmd
->reload
, false);
3351 xpthread_cond_init(&pmd
->cond
, NULL
);
3352 ovs_mutex_init(&pmd
->cond_mutex
);
3353 ovs_mutex_init(&pmd
->flow_mutex
);
3354 ovs_mutex_init(&pmd
->port_mutex
);
3355 cmap_init(&pmd
->flow_table
);
3356 cmap_init(&pmd
->classifiers
);
3357 pmd
->next_optimization
= time_msec() + DPCLS_OPTIMIZATION_INTERVAL
;
3358 ovs_list_init(&pmd
->poll_list
);
3359 hmap_init(&pmd
->tx_ports
);
3360 hmap_init(&pmd
->tnl_port_cache
);
3361 hmap_init(&pmd
->send_port_cache
);
3362 /* init the 'flow_cache' since there is no
3363 * actual thread created for NON_PMD_CORE_ID. */
3364 if (core_id
== NON_PMD_CORE_ID
) {
3365 emc_cache_init(&pmd
->flow_cache
);
3367 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
3368 hash_int(core_id
, 0));
3372 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
)
3376 dp_netdev_pmd_flow_flush(pmd
);
3377 hmap_destroy(&pmd
->send_port_cache
);
3378 hmap_destroy(&pmd
->tnl_port_cache
);
3379 hmap_destroy(&pmd
->tx_ports
);
3380 /* All flows (including their dpcls_rules) have been deleted already */
3381 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
3383 ovsrcu_postpone(free
, cls
);
3385 cmap_destroy(&pmd
->classifiers
);
3386 cmap_destroy(&pmd
->flow_table
);
3387 ovs_mutex_destroy(&pmd
->flow_mutex
);
3388 latch_destroy(&pmd
->exit_latch
);
3389 seq_destroy(pmd
->reload_seq
);
3390 xpthread_cond_destroy(&pmd
->cond
);
3391 ovs_mutex_destroy(&pmd
->cond_mutex
);
3392 ovs_mutex_destroy(&pmd
->port_mutex
);
3396 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
3397 * and unrefs the struct. */
3399 dp_netdev_del_pmd(struct dp_netdev
*dp
, struct dp_netdev_pmd_thread
*pmd
)
3401 /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize,
3402 * but extra cleanup is necessary */
3403 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3404 ovs_mutex_lock(&dp
->non_pmd_mutex
);
3405 emc_cache_uninit(&pmd
->flow_cache
);
3406 pmd_free_cached_ports(pmd
);
3407 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
3409 latch_set(&pmd
->exit_latch
);
3410 dp_netdev_reload_pmd__(pmd
);
3411 ovs_numa_unpin_core(pmd
->core_id
);
3412 xpthread_join(pmd
->thread
, NULL
);
3415 dp_netdev_pmd_clear_ports(pmd
);
3417 /* Purges the 'pmd''s flows after stopping the thread, but before
3418 * destroying the flows, so that the flow stats can be collected. */
3419 if (dp
->dp_purge_cb
) {
3420 dp
->dp_purge_cb(dp
->dp_purge_aux
, pmd
->core_id
);
3422 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
3423 dp_netdev_pmd_unref(pmd
);
3426 /* Destroys all pmd threads. */
3428 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
)
3430 struct dp_netdev_pmd_thread
*pmd
;
3431 struct dp_netdev_pmd_thread
**pmd_list
;
3432 size_t k
= 0, n_pmds
;
3434 n_pmds
= cmap_count(&dp
->poll_threads
);
3435 pmd_list
= xcalloc(n_pmds
, sizeof *pmd_list
);
3437 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3438 /* We cannot call dp_netdev_del_pmd(), since it alters
3439 * 'dp->poll_threads' (while we're iterating it) and it
3441 ovs_assert(k
< n_pmds
);
3442 pmd_list
[k
++] = pmd
;
3445 for (size_t i
= 0; i
< k
; i
++) {
3446 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
3451 /* Deletes all pmd threads on numa node 'numa_id' and
3452 * fixes static_tx_qids of other threads to keep them sequential. */
3454 dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
3456 struct dp_netdev_pmd_thread
*pmd
;
3457 int n_pmds_on_numa
, n_pmds
;
3458 int *free_idx
, k
= 0;
3459 struct dp_netdev_pmd_thread
**pmd_list
;
3461 n_pmds_on_numa
= get_n_pmd_threads_on_numa(dp
, numa_id
);
3462 free_idx
= xcalloc(n_pmds_on_numa
, sizeof *free_idx
);
3463 pmd_list
= xcalloc(n_pmds_on_numa
, sizeof *pmd_list
);
3465 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3466 /* We cannot call dp_netdev_del_pmd(), since it alters
3467 * 'dp->poll_threads' (while we're iterating it) and it
3469 if (pmd
->numa_id
== numa_id
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
3470 atomic_read_relaxed(&pmd
->static_tx_qid
, &free_idx
[k
]);
3472 ovs_assert(k
< n_pmds_on_numa
);
3477 for (int i
= 0; i
< k
; i
++) {
3478 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
3481 n_pmds
= get_n_pmd_threads(dp
);
3482 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3485 atomic_read_relaxed(&pmd
->static_tx_qid
, &old_tx_qid
);
3487 if (old_tx_qid
>= n_pmds
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
3488 int new_tx_qid
= free_idx
[--k
];
3490 atomic_store_relaxed(&pmd
->static_tx_qid
, new_tx_qid
);
3498 /* Deletes all rx queues from pmd->poll_list and all the ports from
3501 dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
)
3503 struct rxq_poll
*poll
;
3504 struct tx_port
*port
;
3506 ovs_mutex_lock(&pmd
->port_mutex
);
3507 LIST_FOR_EACH_POP (poll
, node
, &pmd
->poll_list
) {
3511 HMAP_FOR_EACH_POP (port
, node
, &pmd
->tx_ports
) {
3514 ovs_mutex_unlock(&pmd
->port_mutex
);
3517 static struct tx_port
*
3518 tx_port_lookup(const struct hmap
*hmap
, odp_port_t port_no
)
3522 HMAP_FOR_EACH_IN_BUCKET (tx
, node
, hash_port_no(port_no
), hmap
) {
3523 if (tx
->port
->port_no
== port_no
) {
3531 /* Deletes all rx queues of 'port' from 'poll_list', and the 'port' from
3532 * 'tx_ports' of 'pmd' thread. Returns true if 'port' was found in 'pmd'
3533 * (therefore a restart is required). */
3535 dp_netdev_del_port_from_pmd__(struct dp_netdev_port
*port
,
3536 struct dp_netdev_pmd_thread
*pmd
)
3538 struct rxq_poll
*poll
, *next
;
3542 ovs_mutex_lock(&pmd
->port_mutex
);
3543 LIST_FOR_EACH_SAFE (poll
, next
, node
, &pmd
->poll_list
) {
3544 if (poll
->port
== port
) {
3546 ovs_list_remove(&poll
->node
);
3552 tx
= tx_port_lookup(&pmd
->tx_ports
, port
->port_no
);
3554 hmap_remove(&pmd
->tx_ports
, &tx
->node
);
3558 ovs_mutex_unlock(&pmd
->port_mutex
);
3563 /* Deletes 'port' from the 'poll_list' and from the 'tx_ports' of all the pmd
3564 * threads. The pmd threads that need to be restarted are inserted in
3567 dp_netdev_del_port_from_all_pmds__(struct dp_netdev
*dp
,
3568 struct dp_netdev_port
*port
,
3569 struct hmapx
*to_reload
)
3571 struct dp_netdev_pmd_thread
*pmd
;
3573 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3576 found
= dp_netdev_del_port_from_pmd__(port
, pmd
);
3579 hmapx_add(to_reload
, pmd
);
3584 /* Deletes 'port' from the 'poll_list' and from the 'tx_ports' of all the pmd
3585 * threads. Reloads the threads if needed. */
3587 dp_netdev_del_port_from_all_pmds(struct dp_netdev
*dp
,
3588 struct dp_netdev_port
*port
)
3590 struct dp_netdev_pmd_thread
*pmd
;
3591 struct hmapx to_reload
= HMAPX_INITIALIZER(&to_reload
);
3592 struct hmapx_node
*node
;
3594 dp_netdev_del_port_from_all_pmds__(dp
, port
, &to_reload
);
3596 HMAPX_FOR_EACH (node
, &to_reload
) {
3597 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3598 dp_netdev_reload_pmd__(pmd
);
3601 hmapx_destroy(&to_reload
);
3605 /* Returns non-isolated PMD thread from this numa node with fewer
3606 * rx queues to poll. Returns NULL if there is no non-isolated PMD threads
3607 * on this numa node. Can be called safely only by main thread. */
3608 static struct dp_netdev_pmd_thread
*
3609 dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev
*dp
, int numa_id
)
3612 struct dp_netdev_pmd_thread
*pmd
, *res
= NULL
;
3614 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3615 if (!pmd
->isolated
&& pmd
->numa_id
== numa_id
3616 && (min_cnt
> pmd
->poll_cnt
|| res
== NULL
)) {
3617 min_cnt
= pmd
->poll_cnt
;
3625 /* Adds rx queue to poll_list of PMD thread. */
3627 dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
3628 struct dp_netdev_port
*port
, struct netdev_rxq
*rx
)
3629 OVS_REQUIRES(pmd
->port_mutex
)
3631 struct rxq_poll
*poll
= xmalloc(sizeof *poll
);
3636 ovs_list_push_back(&pmd
->poll_list
, &poll
->node
);
3640 /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the
3641 * changes to take effect. */
3643 dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
3644 struct dp_netdev_port
*port
)
3648 tx
= xzalloc(sizeof *tx
);
3653 ovs_mutex_lock(&pmd
->port_mutex
);
3654 hmap_insert(&pmd
->tx_ports
, &tx
->node
, hash_port_no(tx
->port
->port_no
));
3655 ovs_mutex_unlock(&pmd
->port_mutex
);
3658 /* Distribute all {pinned|non-pinned} rx queues of 'port' between PMD
3659 * threads in 'dp'. The pmd threads that need to be restarted are inserted
3660 * in 'to_reload'. PMD threads with pinned queues marked as isolated. */
3662 dp_netdev_add_port_rx_to_pmds(struct dp_netdev
*dp
,
3663 struct dp_netdev_port
*port
,
3664 struct hmapx
*to_reload
, bool pinned
)
3666 int numa_id
= netdev_get_numa_id(port
->netdev
);
3667 struct dp_netdev_pmd_thread
*pmd
;
3670 if (!netdev_is_pmd(port
->netdev
)) {
3674 for (i
= 0; i
< port
->n_rxq
; i
++) {
3676 if (port
->rxqs
[i
].core_id
== OVS_CORE_UNSPEC
) {
3679 pmd
= dp_netdev_get_pmd(dp
, port
->rxqs
[i
].core_id
);
3681 VLOG_WARN("There is no PMD thread on core %d. "
3682 "Queue %d on port \'%s\' will not be polled.",
3683 port
->rxqs
[i
].core_id
, i
,
3684 netdev_get_name(port
->netdev
));
3687 pmd
->isolated
= true;
3688 dp_netdev_pmd_unref(pmd
);
3690 if (port
->rxqs
[i
].core_id
!= OVS_CORE_UNSPEC
) {
3693 pmd
= dp_netdev_less_loaded_pmd_on_numa(dp
, numa_id
);
3695 VLOG_WARN("There's no available pmd thread on numa node %d",
3701 ovs_mutex_lock(&pmd
->port_mutex
);
3702 dp_netdev_add_rxq_to_pmd(pmd
, port
, port
->rxqs
[i
].rxq
);
3703 ovs_mutex_unlock(&pmd
->port_mutex
);
3705 hmapx_add(to_reload
, pmd
);
3709 /* Distributes all non-pinned rx queues of 'port' between all PMD threads
3710 * in 'dp' and inserts 'port' in the PMD threads 'tx_ports'. The pmd threads
3711 * that need to be restarted are inserted in 'to_reload'. */
3713 dp_netdev_add_port_to_pmds__(struct dp_netdev
*dp
, struct dp_netdev_port
*port
,
3714 struct hmapx
*to_reload
)
3716 struct dp_netdev_pmd_thread
*pmd
;
3718 dp_netdev_add_port_rx_to_pmds(dp
, port
, to_reload
, false);
3720 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3721 dp_netdev_add_port_tx_to_pmd(pmd
, port
);
3722 hmapx_add(to_reload
, pmd
);
3726 /* Distributes all non-pinned rx queues of 'port' between all PMD threads
3727 * in 'dp', inserts 'port' in the PMD threads 'tx_ports' and reloads them,
3730 dp_netdev_add_port_to_pmds(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
3732 struct dp_netdev_pmd_thread
*pmd
;
3733 struct hmapx to_reload
= HMAPX_INITIALIZER(&to_reload
);
3734 struct hmapx_node
*node
;
3736 dp_netdev_add_port_to_pmds__(dp
, port
, &to_reload
);
3738 HMAPX_FOR_EACH (node
, &to_reload
) {
3739 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3740 dp_netdev_reload_pmd__(pmd
);
3743 hmapx_destroy(&to_reload
);
3746 /* Starts pmd threads for the numa node 'numa_id', if not already started.
3747 * The function takes care of filling the threads tx port cache. */
3749 dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
3750 OVS_REQUIRES(dp
->port_mutex
)
3754 if (!ovs_numa_numa_id_is_valid(numa_id
)) {
3755 VLOG_WARN("Cannot create pmd threads due to numa id (%d) invalid",
3760 n_pmds
= get_n_pmd_threads_on_numa(dp
, numa_id
);
3762 /* If there are already pmd threads created for the numa node
3763 * in which 'netdev' is on, do nothing. Else, creates the
3764 * pmd threads for the numa node. */
3766 int can_have
, n_unpinned
, i
;
3768 n_unpinned
= ovs_numa_get_n_unpinned_cores_on_numa(numa_id
);
3770 VLOG_WARN("Cannot create pmd threads due to out of unpinned "
3771 "cores on numa node %d", numa_id
);
3775 /* If cpu mask is specified, uses all unpinned cores, otherwise
3776 * tries creating NR_PMD_THREADS pmd threads. */
3777 can_have
= dp
->pmd_cmask
? n_unpinned
: MIN(n_unpinned
, NR_PMD_THREADS
);
3778 for (i
= 0; i
< can_have
; i
++) {
3779 unsigned core_id
= ovs_numa_get_unpinned_core_on_numa(numa_id
);
3780 struct dp_netdev_pmd_thread
*pmd
= xzalloc(sizeof *pmd
);
3781 struct dp_netdev_port
*port
;
3783 dp_netdev_configure_pmd(pmd
, dp
, core_id
, numa_id
);
3785 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3786 dp_netdev_add_port_tx_to_pmd(pmd
, port
);
3789 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
3791 VLOG_INFO("Created %d pmd threads on numa node %d", can_have
, numa_id
);
3796 /* Called after pmd threads config change. Restarts pmd threads with
3797 * new configuration. */
3799 dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
3800 OVS_REQUIRES(dp
->port_mutex
)
3802 struct hmapx to_reload
= HMAPX_INITIALIZER(&to_reload
);
3803 struct dp_netdev_pmd_thread
*pmd
;
3804 struct dp_netdev_port
*port
;
3805 struct hmapx_node
*node
;
3807 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3808 if (netdev_is_pmd(port
->netdev
)) {
3809 struct hmapx numas
= HMAPX_INITIALIZER(&numas
);
3810 struct hmapx_node
*numa_node
;
3814 numa_id
= netdev_get_numa_id(port
->netdev
);
3815 hmapx_add(&numas
, (void *) numa_id
);
3816 for (i
= 0; i
< port
->n_rxq
; i
++) {
3817 unsigned core_id
= port
->rxqs
[i
].core_id
;
3819 if (core_id
!= OVS_CORE_UNSPEC
) {
3820 numa_id
= ovs_numa_get_numa_id(core_id
);
3821 hmapx_add(&numas
, (void *) numa_id
);
3825 HMAPX_FOR_EACH (numa_node
, &numas
) {
3826 dp_netdev_set_pmds_on_numa(dp
, (uintptr_t) numa_node
->data
);
3829 hmapx_destroy(&numas
);
3831 /* Distribute only pinned rx queues first to mark threads as isolated */
3832 dp_netdev_add_port_rx_to_pmds(dp
, port
, &to_reload
, true);
3835 /* Distribute remaining non-pinned rx queues to non-isolated PMD threads. */
3836 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3837 dp_netdev_add_port_rx_to_pmds(dp
, port
, &to_reload
, false);
3840 HMAPX_FOR_EACH (node
, &to_reload
) {
3841 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3842 dp_netdev_reload_pmd__(pmd
);
3845 hmapx_destroy(&to_reload
);
3849 dpif_netdev_get_datapath_version(void)
3851 return xstrdup("<built-in>");
3855 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
, int cnt
, int size
,
3856 uint16_t tcp_flags
, long long now
)
3860 atomic_store_relaxed(&netdev_flow
->stats
.used
, now
);
3861 non_atomic_ullong_add(&netdev_flow
->stats
.packet_count
, cnt
);
3862 non_atomic_ullong_add(&netdev_flow
->stats
.byte_count
, size
);
3863 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
3865 atomic_store_relaxed(&netdev_flow
->stats
.tcp_flags
, flags
);
3869 dp_netdev_count_packet(struct dp_netdev_pmd_thread
*pmd
,
3870 enum dp_stat_type type
, int cnt
)
3872 non_atomic_ullong_add(&pmd
->stats
.n
[type
], cnt
);
3876 dp_netdev_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet_
,
3877 struct flow
*flow
, struct flow_wildcards
*wc
, ovs_u128
*ufid
,
3878 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
3879 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
3881 struct dp_netdev
*dp
= pmd
->dp
;
3883 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
3887 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
3888 struct ds ds
= DS_EMPTY_INITIALIZER
;
3891 struct odp_flow_key_parms odp_parms
= {
3893 .mask
= wc
? &wc
->masks
: NULL
,
3894 .support
= dp_netdev_support
,
3897 ofpbuf_init(&key
, 0);
3898 odp_flow_key_from_flow(&odp_parms
, &key
);
3899 packet_str
= ofp_packet_to_string(dp_packet_data(packet_
),
3900 dp_packet_size(packet_
));
3902 odp_flow_key_format(key
.data
, key
.size
, &ds
);
3904 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
3905 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
3907 ofpbuf_uninit(&key
);
3913 return dp
->upcall_cb(packet_
, flow
, ufid
, pmd
->core_id
, type
, userdata
,
3914 actions
, wc
, put_actions
, dp
->upcall_aux
);
3917 static inline uint32_t
3918 dpif_netdev_packet_get_rss_hash(struct dp_packet
*packet
,
3919 const struct miniflow
*mf
)
3921 uint32_t hash
, recirc_depth
;
3923 if (OVS_LIKELY(dp_packet_rss_valid(packet
))) {
3924 hash
= dp_packet_get_rss_hash(packet
);
3926 hash
= miniflow_hash_5tuple(mf
, 0);
3927 dp_packet_set_rss_hash(packet
, hash
);
3930 /* The RSS hash must account for the recirculation depth to avoid
3931 * collisions in the exact match cache */
3932 recirc_depth
= *recirc_depth_get_unsafe();
3933 if (OVS_UNLIKELY(recirc_depth
)) {
3934 hash
= hash_finish(hash
, recirc_depth
);
3935 dp_packet_set_rss_hash(packet
, hash
);
3940 struct packet_batch_per_flow
{
3941 unsigned int byte_count
;
3943 struct dp_netdev_flow
*flow
;
3945 struct dp_packet_batch array
;
3949 packet_batch_per_flow_update(struct packet_batch_per_flow
*batch
,
3950 struct dp_packet
*packet
,
3951 const struct miniflow
*mf
)
3953 batch
->byte_count
+= dp_packet_size(packet
);
3954 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
3955 batch
->array
.packets
[batch
->array
.count
++] = packet
;
3959 packet_batch_per_flow_init(struct packet_batch_per_flow
*batch
,
3960 struct dp_netdev_flow
*flow
)
3962 flow
->batch
= batch
;
3965 dp_packet_batch_init(&batch
->array
);
3966 batch
->byte_count
= 0;
3967 batch
->tcp_flags
= 0;
3971 packet_batch_per_flow_execute(struct packet_batch_per_flow
*batch
,
3972 struct dp_netdev_pmd_thread
*pmd
,
3975 struct dp_netdev_actions
*actions
;
3976 struct dp_netdev_flow
*flow
= batch
->flow
;
3978 dp_netdev_flow_used(flow
, batch
->array
.count
, batch
->byte_count
,
3979 batch
->tcp_flags
, now
);
3981 actions
= dp_netdev_flow_get_actions(flow
);
3983 dp_netdev_execute_actions(pmd
, &batch
->array
, true, &flow
->flow
,
3984 actions
->actions
, actions
->size
, now
);
3988 dp_netdev_queue_batches(struct dp_packet
*pkt
,
3989 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
3990 struct packet_batch_per_flow
*batches
, size_t *n_batches
)
3992 struct packet_batch_per_flow
*batch
= flow
->batch
;
3994 if (OVS_UNLIKELY(!batch
)) {
3995 batch
= &batches
[(*n_batches
)++];
3996 packet_batch_per_flow_init(batch
, flow
);
3999 packet_batch_per_flow_update(batch
, pkt
, mf
);
4002 /* Try to process all ('cnt') the 'packets' using only the exact match cache
4003 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
4004 * miniflow is copied into 'keys' and the packet pointer is moved at the
4005 * beginning of the 'packets' array.
4007 * The function returns the number of packets that needs to be processed in the
4008 * 'packets' array (they have been moved to the beginning of the vector).
4010 * If 'md_is_valid' is false, the metadata in 'packets' is not valid and must be
4011 * initialized by this function using 'port_no'.
4013 static inline size_t
4014 emc_processing(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet_batch
*packets_
,
4015 struct netdev_flow_key
*keys
,
4016 struct packet_batch_per_flow batches
[], size_t *n_batches
,
4017 bool md_is_valid
, odp_port_t port_no
)
4019 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
4020 struct netdev_flow_key
*key
= &keys
[0];
4021 size_t i
, n_missed
= 0, n_dropped
= 0;
4022 struct dp_packet
**packets
= packets_
->packets
;
4023 int cnt
= packets_
->count
;
4025 for (i
= 0; i
< cnt
; i
++) {
4026 struct dp_netdev_flow
*flow
;
4027 struct dp_packet
*packet
= packets
[i
];
4029 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
4030 dp_packet_delete(packet
);
4036 /* Prefetch next packet data and metadata. */
4037 OVS_PREFETCH(dp_packet_data(packets
[i
+1]));
4038 pkt_metadata_prefetch_init(&packets
[i
+1]->md
);
4042 pkt_metadata_init(&packet
->md
, port_no
);
4044 miniflow_extract(packet
, &key
->mf
);
4045 key
->len
= 0; /* Not computed yet. */
4046 key
->hash
= dpif_netdev_packet_get_rss_hash(packet
, &key
->mf
);
4048 flow
= emc_lookup(flow_cache
, key
);
4049 if (OVS_LIKELY(flow
)) {
4050 dp_netdev_queue_batches(packet
, flow
, &key
->mf
, batches
,
4053 /* Exact match cache missed. Group missed packets together at
4054 * the beginning of the 'packets' array. */
4055 packets
[n_missed
] = packet
;
4056 /* 'key[n_missed]' contains the key of the current packet and it
4057 * must be returned to the caller. The next key should be extracted
4058 * to 'keys[n_missed + 1]'. */
4059 key
= &keys
[++n_missed
];
4063 dp_netdev_count_packet(pmd
, DP_STAT_EXACT_HIT
, cnt
- n_dropped
- n_missed
);
4069 handle_packet_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet
,
4070 const struct netdev_flow_key
*key
,
4071 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
,
4072 int *lost_cnt
, long long now
)
4074 struct ofpbuf
*add_actions
;
4075 struct dp_packet_batch b
;
4080 match
.tun_md
.valid
= false;
4081 miniflow_expand(&key
->mf
, &match
.flow
);
4083 ofpbuf_clear(actions
);
4084 ofpbuf_clear(put_actions
);
4086 dpif_flow_hash(pmd
->dp
->dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
4087 error
= dp_netdev_upcall(pmd
, packet
, &match
.flow
, &match
.wc
,
4088 &ufid
, DPIF_UC_MISS
, NULL
, actions
,
4090 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
4091 dp_packet_delete(packet
);
4096 /* The Netlink encoding of datapath flow keys cannot express
4097 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
4098 * tag is interpreted as exact match on the fact that there is no
4099 * VLAN. Unless we refactor a lot of code that translates between
4100 * Netlink and struct flow representations, we have to do the same
4102 if (!match
.wc
.masks
.vlan_tci
) {
4103 match
.wc
.masks
.vlan_tci
= htons(0xffff);
4106 /* We can't allow the packet batching in the next loop to execute
4107 * the actions. Otherwise, if there are any slow path actions,
4108 * we'll send the packet up twice. */
4109 packet_batch_init_packet(&b
, packet
);
4110 dp_netdev_execute_actions(pmd
, &b
, true, &match
.flow
,
4111 actions
->data
, actions
->size
, now
);
4113 add_actions
= put_actions
->size
? put_actions
: actions
;
4114 if (OVS_LIKELY(error
!= ENOSPC
)) {
4115 struct dp_netdev_flow
*netdev_flow
;
4117 /* XXX: There's a race window where a flow covering this packet
4118 * could have already been installed since we last did the flow
4119 * lookup before upcall. This could be solved by moving the
4120 * mutex lock outside the loop, but that's an awful long time
4121 * to be locking everyone out of making flow installs. If we
4122 * move to a per-core classifier, it would be reasonable. */
4123 ovs_mutex_lock(&pmd
->flow_mutex
);
4124 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
, NULL
);
4125 if (OVS_LIKELY(!netdev_flow
)) {
4126 netdev_flow
= dp_netdev_flow_add(pmd
, &match
, &ufid
,
4130 ovs_mutex_unlock(&pmd
->flow_mutex
);
4132 emc_insert(&pmd
->flow_cache
, key
, netdev_flow
);
4137 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
4138 struct dp_packet_batch
*packets_
,
4139 struct netdev_flow_key
*keys
,
4140 struct packet_batch_per_flow batches
[], size_t *n_batches
,
4144 int cnt
= packets_
->count
;
4145 #if !defined(__CHECKER__) && !defined(_WIN32)
4146 const size_t PKT_ARRAY_SIZE
= cnt
;
4148 /* Sparse or MSVC doesn't like variable length array. */
4149 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
4151 struct dp_packet
**packets
= packets_
->packets
;
4153 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
4154 struct dp_netdev
*dp
= pmd
->dp
;
4155 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
4156 int miss_cnt
= 0, lost_cnt
= 0;
4157 int lookup_cnt
= 0, add_lookup_cnt
;
4161 for (i
= 0; i
< cnt
; i
++) {
4162 /* Key length is needed in all the cases, hash computed on demand. */
4163 keys
[i
].len
= netdev_flow_key_size(miniflow_n_values(&keys
[i
].mf
));
4165 /* Get the classifier for the in_port */
4166 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
4167 if (OVS_LIKELY(cls
)) {
4168 any_miss
= !dpcls_lookup(cls
, keys
, rules
, cnt
, &lookup_cnt
);
4171 memset(rules
, 0, sizeof(rules
));
4173 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
4174 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
4175 struct ofpbuf actions
, put_actions
;
4177 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
4178 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
4180 for (i
= 0; i
< cnt
; i
++) {
4181 struct dp_netdev_flow
*netdev_flow
;
4183 if (OVS_LIKELY(rules
[i
])) {
4187 /* It's possible that an earlier slow path execution installed
4188 * a rule covering this flow. In this case, it's a lot cheaper
4189 * to catch it here than execute a miss. */
4190 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
],
4193 lookup_cnt
+= add_lookup_cnt
;
4194 rules
[i
] = &netdev_flow
->cr
;
4199 handle_packet_upcall(pmd
, packets
[i
], &keys
[i
], &actions
,
4200 &put_actions
, &lost_cnt
, now
);
4203 ofpbuf_uninit(&actions
);
4204 ofpbuf_uninit(&put_actions
);
4205 fat_rwlock_unlock(&dp
->upcall_rwlock
);
4206 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
4207 } else if (OVS_UNLIKELY(any_miss
)) {
4208 for (i
= 0; i
< cnt
; i
++) {
4209 if (OVS_UNLIKELY(!rules
[i
])) {
4210 dp_packet_delete(packets
[i
]);
4217 for (i
= 0; i
< cnt
; i
++) {
4218 struct dp_packet
*packet
= packets
[i
];
4219 struct dp_netdev_flow
*flow
;
4221 if (OVS_UNLIKELY(!rules
[i
])) {
4225 flow
= dp_netdev_flow_cast(rules
[i
]);
4227 emc_insert(flow_cache
, &keys
[i
], flow
);
4228 dp_netdev_queue_batches(packet
, flow
, &keys
[i
].mf
, batches
, n_batches
);
4231 dp_netdev_count_packet(pmd
, DP_STAT_MASKED_HIT
, cnt
- miss_cnt
);
4232 dp_netdev_count_packet(pmd
, DP_STAT_LOOKUP_HIT
, lookup_cnt
);
4233 dp_netdev_count_packet(pmd
, DP_STAT_MISS
, miss_cnt
);
4234 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
4237 /* Packets enter the datapath from a port (or from recirculation) here.
4239 * For performance reasons a caller may choose not to initialize the metadata
4240 * in 'packets': in this case 'mdinit' is false and this function needs to
4241 * initialize it using 'port_no'. If the metadata in 'packets' is already
4242 * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */
4244 dp_netdev_input__(struct dp_netdev_pmd_thread
*pmd
,
4245 struct dp_packet_batch
*packets
,
4246 bool md_is_valid
, odp_port_t port_no
)
4248 int cnt
= packets
->count
;
4249 #if !defined(__CHECKER__) && !defined(_WIN32)
4250 const size_t PKT_ARRAY_SIZE
= cnt
;
4252 /* Sparse or MSVC doesn't like variable length array. */
4253 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
4255 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
) struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
4256 struct packet_batch_per_flow batches
[PKT_ARRAY_SIZE
];
4257 long long now
= time_msec();
4258 size_t newcnt
, n_batches
, i
;
4262 newcnt
= emc_processing(pmd
, packets
, keys
, batches
, &n_batches
,
4263 md_is_valid
, port_no
);
4264 if (OVS_UNLIKELY(newcnt
)) {
4265 packets
->count
= newcnt
;
4266 /* Get ingress port from first packet's metadata. */
4267 in_port
= packets
->packets
[0]->md
.in_port
.odp_port
;
4268 fast_path_processing(pmd
, packets
, keys
, batches
, &n_batches
, in_port
, now
);
4271 /* All the flow batches need to be reset before any call to
4272 * packet_batch_per_flow_execute() as it could potentially trigger
4273 * recirculation. When a packet matching flow ‘j’ happens to be
4274 * recirculated, the nested call to dp_netdev_input__() could potentially
4275 * classify the packet as matching another flow - say 'k'. It could happen
4276 * that in the previous call to dp_netdev_input__() that same flow 'k' had
4277 * already its own batches[k] still waiting to be served. So if its
4278 * ‘batch’ member is not reset, the recirculated packet would be wrongly
4279 * appended to batches[k] of the 1st call to dp_netdev_input__(). */
4280 for (i
= 0; i
< n_batches
; i
++) {
4281 batches
[i
].flow
->batch
= NULL
;
4284 for (i
= 0; i
< n_batches
; i
++) {
4285 packet_batch_per_flow_execute(&batches
[i
], pmd
, now
);
4290 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
4291 struct dp_packet_batch
*packets
,
4294 dp_netdev_input__(pmd
, packets
, false, port_no
);
4298 dp_netdev_recirculate(struct dp_netdev_pmd_thread
*pmd
,
4299 struct dp_packet_batch
*packets
)
4301 dp_netdev_input__(pmd
, packets
, true, 0);
4304 struct dp_netdev_execute_aux
{
4305 struct dp_netdev_pmd_thread
*pmd
;
4307 const struct flow
*flow
;
4311 dpif_netdev_register_dp_purge_cb(struct dpif
*dpif
, dp_purge_callback
*cb
,
4314 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4315 dp
->dp_purge_aux
= aux
;
4316 dp
->dp_purge_cb
= cb
;
4320 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
4323 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4324 dp
->upcall_aux
= aux
;
4329 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
4330 long long now
, bool purge
)
4333 struct dp_netdev_port
*port
;
4336 HMAP_FOR_EACH (tx
, node
, &pmd
->send_port_cache
) {
4337 if (!tx
->port
->dynamic_txqs
) {
4340 interval
= now
- tx
->last_used
;
4341 if (tx
->qid
>= 0 && (purge
|| interval
>= XPS_TIMEOUT_MS
)) {
4343 ovs_mutex_lock(&port
->txq_used_mutex
);
4344 port
->txq_used
[tx
->qid
]--;
4345 ovs_mutex_unlock(&port
->txq_used_mutex
);
4352 dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
4353 struct tx_port
*tx
, long long now
)
4355 struct dp_netdev_port
*port
;
4357 int i
, min_cnt
, min_qid
;
4359 if (OVS_UNLIKELY(!now
)) {
4363 interval
= now
- tx
->last_used
;
4364 tx
->last_used
= now
;
4366 if (OVS_LIKELY(tx
->qid
>= 0 && interval
< XPS_TIMEOUT_MS
)) {
4372 ovs_mutex_lock(&port
->txq_used_mutex
);
4374 port
->txq_used
[tx
->qid
]--;
4380 for (i
= 0; i
< netdev_n_txq(port
->netdev
); i
++) {
4381 if (port
->txq_used
[i
] < min_cnt
|| min_cnt
== -1) {
4382 min_cnt
= port
->txq_used
[i
];
4387 port
->txq_used
[min_qid
]++;
4390 ovs_mutex_unlock(&port
->txq_used_mutex
);
4392 dpif_netdev_xps_revalidate_pmd(pmd
, now
, false);
4394 VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
4395 pmd
->core_id
, tx
->qid
, netdev_get_name(tx
->port
->netdev
));
4399 static struct tx_port
*
4400 pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
4403 return tx_port_lookup(&pmd
->tnl_port_cache
, port_no
);
4406 static struct tx_port
*
4407 pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
4410 return tx_port_lookup(&pmd
->send_port_cache
, port_no
);
4414 push_tnl_action(const struct dp_netdev_pmd_thread
*pmd
,
4415 const struct nlattr
*attr
,
4416 struct dp_packet_batch
*batch
)
4418 struct tx_port
*tun_port
;
4419 const struct ovs_action_push_tnl
*data
;
4422 data
= nl_attr_get(attr
);
4424 tun_port
= pmd_tnl_port_cache_lookup(pmd
, u32_to_odp(data
->tnl_port
));
4429 err
= netdev_push_header(tun_port
->port
->netdev
, batch
, data
);
4434 dp_packet_delete_batch(batch
, true);
4439 dp_execute_userspace_action(struct dp_netdev_pmd_thread
*pmd
,
4440 struct dp_packet
*packet
, bool may_steal
,
4441 struct flow
*flow
, ovs_u128
*ufid
,
4442 struct ofpbuf
*actions
,
4443 const struct nlattr
*userdata
, long long now
)
4445 struct dp_packet_batch b
;
4448 ofpbuf_clear(actions
);
4450 error
= dp_netdev_upcall(pmd
, packet
, flow
, NULL
, ufid
,
4451 DPIF_UC_ACTION
, userdata
, actions
,
4453 if (!error
|| error
== ENOSPC
) {
4454 packet_batch_init_packet(&b
, packet
);
4455 dp_netdev_execute_actions(pmd
, &b
, may_steal
, flow
,
4456 actions
->data
, actions
->size
, now
);
4457 } else if (may_steal
) {
4458 dp_packet_delete(packet
);
4463 dp_execute_cb(void *aux_
, struct dp_packet_batch
*packets_
,
4464 const struct nlattr
*a
, bool may_steal
)
4466 struct dp_netdev_execute_aux
*aux
= aux_
;
4467 uint32_t *depth
= recirc_depth_get();
4468 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
4469 struct dp_netdev
*dp
= pmd
->dp
;
4470 int type
= nl_attr_type(a
);
4471 long long now
= aux
->now
;
4474 switch ((enum ovs_action_attr
)type
) {
4475 case OVS_ACTION_ATTR_OUTPUT
:
4476 p
= pmd_send_port_cache_lookup(pmd
, nl_attr_get_odp_port(a
));
4477 if (OVS_LIKELY(p
)) {
4481 dynamic_txqs
= p
->port
->dynamic_txqs
;
4483 tx_qid
= dpif_netdev_xps_get_tx_qid(pmd
, p
, now
);
4485 atomic_read_relaxed(&pmd
->static_tx_qid
, &tx_qid
);
4488 netdev_send(p
->port
->netdev
, tx_qid
, packets_
, may_steal
,
4494 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
4495 if (*depth
< MAX_RECIRC_DEPTH
) {
4496 struct dp_packet_batch tnl_pkt
;
4497 struct dp_packet_batch
*orig_packets_
= packets_
;
4501 dp_packet_batch_clone(&tnl_pkt
, packets_
);
4502 packets_
= &tnl_pkt
;
4503 dp_packet_batch_reset_cutlen(orig_packets_
);
4506 dp_packet_batch_apply_cutlen(packets_
);
4508 err
= push_tnl_action(pmd
, a
, packets_
);
4511 dp_netdev_recirculate(pmd
, packets_
);
4518 case OVS_ACTION_ATTR_TUNNEL_POP
:
4519 if (*depth
< MAX_RECIRC_DEPTH
) {
4520 struct dp_packet_batch
*orig_packets_
= packets_
;
4521 odp_port_t portno
= nl_attr_get_odp_port(a
);
4523 p
= pmd_tnl_port_cache_lookup(pmd
, portno
);
4525 struct dp_packet_batch tnl_pkt
;
4529 dp_packet_batch_clone(&tnl_pkt
, packets_
);
4530 packets_
= &tnl_pkt
;
4531 dp_packet_batch_reset_cutlen(orig_packets_
);
4534 dp_packet_batch_apply_cutlen(packets_
);
4536 netdev_pop_header(p
->port
->netdev
, packets_
);
4537 if (!packets_
->count
) {
4541 for (i
= 0; i
< packets_
->count
; i
++) {
4542 packets_
->packets
[i
]->md
.in_port
.odp_port
= portno
;
4546 dp_netdev_recirculate(pmd
, packets_
);
4553 case OVS_ACTION_ATTR_USERSPACE
:
4554 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
4555 struct dp_packet_batch
*orig_packets_
= packets_
;
4556 struct dp_packet
**packets
= packets_
->packets
;
4557 const struct nlattr
*userdata
;
4558 struct dp_packet_batch usr_pkt
;
4559 struct ofpbuf actions
;
4565 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
4566 ofpbuf_init(&actions
, 0);
4568 if (packets_
->trunc
) {
4570 dp_packet_batch_clone(&usr_pkt
, packets_
);
4571 packets_
= &usr_pkt
;
4572 packets
= packets_
->packets
;
4574 dp_packet_batch_reset_cutlen(orig_packets_
);
4577 dp_packet_batch_apply_cutlen(packets_
);
4580 for (i
= 0; i
< packets_
->count
; i
++) {
4581 flow_extract(packets
[i
], &flow
);
4582 dpif_flow_hash(dp
->dpif
, &flow
, sizeof flow
, &ufid
);
4583 dp_execute_userspace_action(pmd
, packets
[i
], may_steal
, &flow
,
4584 &ufid
, &actions
, userdata
, now
);
4588 dp_packet_delete_batch(packets_
, true);
4591 ofpbuf_uninit(&actions
);
4592 fat_rwlock_unlock(&dp
->upcall_rwlock
);
4598 case OVS_ACTION_ATTR_RECIRC
:
4599 if (*depth
< MAX_RECIRC_DEPTH
) {
4600 struct dp_packet_batch recirc_pkts
;
4604 dp_packet_batch_clone(&recirc_pkts
, packets_
);
4605 packets_
= &recirc_pkts
;
4608 for (i
= 0; i
< packets_
->count
; i
++) {
4609 packets_
->packets
[i
]->md
.recirc_id
= nl_attr_get_u32(a
);
4613 dp_netdev_recirculate(pmd
, packets_
);
4619 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
4622 case OVS_ACTION_ATTR_CT
: {
4623 const struct nlattr
*b
;
4624 bool commit
= false;
4627 const char *helper
= NULL
;
4628 const uint32_t *setmark
= NULL
;
4629 const struct ovs_key_ct_labels
*setlabel
= NULL
;
4631 NL_ATTR_FOR_EACH_UNSAFE (b
, left
, nl_attr_get(a
),
4632 nl_attr_get_size(a
)) {
4633 enum ovs_ct_attr sub_type
= nl_attr_type(b
);
4636 case OVS_CT_ATTR_COMMIT
:
4639 case OVS_CT_ATTR_ZONE
:
4640 zone
= nl_attr_get_u16(b
);
4642 case OVS_CT_ATTR_HELPER
:
4643 helper
= nl_attr_get_string(b
);
4645 case OVS_CT_ATTR_MARK
:
4646 setmark
= nl_attr_get(b
);
4648 case OVS_CT_ATTR_LABELS
:
4649 setlabel
= nl_attr_get(b
);
4651 case OVS_CT_ATTR_NAT
:
4652 case OVS_CT_ATTR_UNSPEC
:
4653 case __OVS_CT_ATTR_MAX
:
4658 conntrack_execute(&dp
->conntrack
, packets_
, aux
->flow
->dl_type
, commit
,
4659 zone
, setmark
, setlabel
, helper
);
4663 case OVS_ACTION_ATTR_PUSH_VLAN
:
4664 case OVS_ACTION_ATTR_POP_VLAN
:
4665 case OVS_ACTION_ATTR_PUSH_MPLS
:
4666 case OVS_ACTION_ATTR_POP_MPLS
:
4667 case OVS_ACTION_ATTR_SET
:
4668 case OVS_ACTION_ATTR_SET_MASKED
:
4669 case OVS_ACTION_ATTR_SAMPLE
:
4670 case OVS_ACTION_ATTR_HASH
:
4671 case OVS_ACTION_ATTR_UNSPEC
:
4672 case OVS_ACTION_ATTR_TRUNC
:
4673 case __OVS_ACTION_ATTR_MAX
:
4677 dp_packet_delete_batch(packets_
, may_steal
);
4681 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
4682 struct dp_packet_batch
*packets
,
4683 bool may_steal
, const struct flow
*flow
,
4684 const struct nlattr
*actions
, size_t actions_len
,
4687 struct dp_netdev_execute_aux aux
= { pmd
, now
, flow
};
4689 odp_execute_actions(&aux
, packets
, may_steal
, actions
,
4690 actions_len
, dp_execute_cb
);
4693 struct dp_netdev_ct_dump
{
4694 struct ct_dpif_dump_state up
;
4695 struct conntrack_dump dump
;
4696 struct conntrack
*ct
;
4697 struct dp_netdev
*dp
;
4701 dpif_netdev_ct_dump_start(struct dpif
*dpif
, struct ct_dpif_dump_state
**dump_
,
4702 const uint16_t *pzone
)
4704 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4705 struct dp_netdev_ct_dump
*dump
;
4707 dump
= xzalloc(sizeof *dump
);
4709 dump
->ct
= &dp
->conntrack
;
4711 conntrack_dump_start(&dp
->conntrack
, &dump
->dump
, pzone
);
4719 dpif_netdev_ct_dump_next(struct dpif
*dpif OVS_UNUSED
,
4720 struct ct_dpif_dump_state
*dump_
,
4721 struct ct_dpif_entry
*entry
)
4723 struct dp_netdev_ct_dump
*dump
;
4725 INIT_CONTAINER(dump
, dump_
, up
);
4727 return conntrack_dump_next(&dump
->dump
, entry
);
4731 dpif_netdev_ct_dump_done(struct dpif
*dpif OVS_UNUSED
,
4732 struct ct_dpif_dump_state
*dump_
)
4734 struct dp_netdev_ct_dump
*dump
;
4737 INIT_CONTAINER(dump
, dump_
, up
);
4739 err
= conntrack_dump_done(&dump
->dump
);
4747 dpif_netdev_ct_flush(struct dpif
*dpif
, const uint16_t *zone
)
4749 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4751 return conntrack_flush(&dp
->conntrack
, zone
);
4754 const struct dpif_class dpif_netdev_class
= {
4757 dpif_netdev_enumerate
,
4758 dpif_netdev_port_open_type
,
4761 dpif_netdev_destroy
,
4764 dpif_netdev_get_stats
,
4765 dpif_netdev_port_add
,
4766 dpif_netdev_port_del
,
4767 dpif_netdev_port_set_config
,
4768 dpif_netdev_port_query_by_number
,
4769 dpif_netdev_port_query_by_name
,
4770 NULL
, /* port_get_pid */
4771 dpif_netdev_port_dump_start
,
4772 dpif_netdev_port_dump_next
,
4773 dpif_netdev_port_dump_done
,
4774 dpif_netdev_port_poll
,
4775 dpif_netdev_port_poll_wait
,
4776 dpif_netdev_flow_flush
,
4777 dpif_netdev_flow_dump_create
,
4778 dpif_netdev_flow_dump_destroy
,
4779 dpif_netdev_flow_dump_thread_create
,
4780 dpif_netdev_flow_dump_thread_destroy
,
4781 dpif_netdev_flow_dump_next
,
4782 dpif_netdev_operate
,
4783 NULL
, /* recv_set */
4784 NULL
, /* handlers_set */
4785 dpif_netdev_pmd_set
,
4786 dpif_netdev_queue_to_priority
,
4788 NULL
, /* recv_wait */
4789 NULL
, /* recv_purge */
4790 dpif_netdev_register_dp_purge_cb
,
4791 dpif_netdev_register_upcall_cb
,
4792 dpif_netdev_enable_upcall
,
4793 dpif_netdev_disable_upcall
,
4794 dpif_netdev_get_datapath_version
,
4795 dpif_netdev_ct_dump_start
,
4796 dpif_netdev_ct_dump_next
,
4797 dpif_netdev_ct_dump_done
,
4798 dpif_netdev_ct_flush
,
4802 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
4803 const char *argv
[], void *aux OVS_UNUSED
)
4805 struct dp_netdev_port
*port
;
4806 struct dp_netdev
*dp
;
4809 ovs_mutex_lock(&dp_netdev_mutex
);
4810 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
4811 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
4812 ovs_mutex_unlock(&dp_netdev_mutex
);
4813 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
4816 ovs_refcount_ref(&dp
->ref_cnt
);
4817 ovs_mutex_unlock(&dp_netdev_mutex
);
4819 ovs_mutex_lock(&dp
->port_mutex
);
4820 if (get_port_by_name(dp
, argv
[2], &port
)) {
4821 unixctl_command_reply_error(conn
, "unknown port");
4825 port_no
= u32_to_odp(atoi(argv
[3]));
4826 if (!port_no
|| port_no
== ODPP_NONE
) {
4827 unixctl_command_reply_error(conn
, "bad port number");
4830 if (dp_netdev_lookup_port(dp
, port_no
)) {
4831 unixctl_command_reply_error(conn
, "port number already in use");
4836 hmap_remove(&dp
->ports
, &port
->node
);
4837 dp_netdev_del_port_from_all_pmds(dp
, port
);
4839 /* Reinsert with new port number. */
4840 port
->port_no
= port_no
;
4841 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
4842 dp_netdev_add_port_to_pmds(dp
, port
);
4844 seq_change(dp
->port_seq
);
4845 unixctl_command_reply(conn
, NULL
);
4848 ovs_mutex_unlock(&dp
->port_mutex
);
4849 dp_netdev_unref(dp
);
4853 dpif_dummy_register__(const char *type
)
4855 struct dpif_class
*class;
4857 class = xmalloc(sizeof *class);
4858 *class = dpif_netdev_class
;
4859 class->type
= xstrdup(type
);
4860 dp_register_provider(class);
4864 dpif_dummy_override(const char *type
)
4869 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
4870 * a userland-only build. It's useful for testsuite.
4872 error
= dp_unregister_provider(type
);
4873 if (error
== 0 || error
== EAFNOSUPPORT
) {
4874 dpif_dummy_register__(type
);
4879 dpif_dummy_register(enum dummy_level level
)
4881 if (level
== DUMMY_OVERRIDE_ALL
) {
4886 dp_enumerate_types(&types
);
4887 SSET_FOR_EACH (type
, &types
) {
4888 dpif_dummy_override(type
);
4890 sset_destroy(&types
);
4891 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
4892 dpif_dummy_override("system");
4895 dpif_dummy_register__("dummy");
4897 unixctl_command_register("dpif-dummy/change-port-number",
4898 "dp port new-number",
4899 3, 3, dpif_dummy_change_port_number
, NULL
);
4902 /* Datapath Classifier. */
4904 /* A set of rules that all have the same fields wildcarded. */
4905 struct dpcls_subtable
{
4906 /* The fields are only used by writers. */
4907 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
4909 /* These fields are accessed by readers. */
4910 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
4911 uint32_t hit_cnt
; /* Number of match hits in subtable in current
4912 optimization interval. */
4913 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
4914 /* 'mask' must be the last field, additional space is allocated here. */
4917 /* Initializes 'cls' as a classifier that initially contains no classification
4920 dpcls_init(struct dpcls
*cls
)
4922 cmap_init(&cls
->subtables_map
);
4923 pvector_init(&cls
->subtables
);
4927 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
4929 VLOG_DBG("Destroying subtable %p for in_port %d", subtable
, cls
->in_port
);
4930 pvector_remove(&cls
->subtables
, subtable
);
4931 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
4932 subtable
->mask
.hash
);
4933 cmap_destroy(&subtable
->rules
);
4934 ovsrcu_postpone(free
, subtable
);
4937 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
4938 * caller's responsibility.
4939 * May only be called after all the readers have been terminated. */
4941 dpcls_destroy(struct dpcls
*cls
)
4944 struct dpcls_subtable
*subtable
;
4946 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
4947 ovs_assert(cmap_count(&subtable
->rules
) == 0);
4948 dpcls_destroy_subtable(cls
, subtable
);
4950 cmap_destroy(&cls
->subtables_map
);
4951 pvector_destroy(&cls
->subtables
);
4955 static struct dpcls_subtable
*
4956 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
4958 struct dpcls_subtable
*subtable
;
4960 /* Need to add one. */
4961 subtable
= xmalloc(sizeof *subtable
4962 - sizeof subtable
->mask
.mf
+ mask
->len
);
4963 cmap_init(&subtable
->rules
);
4964 subtable
->hit_cnt
= 0;
4965 netdev_flow_key_clone(&subtable
->mask
, mask
);
4966 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
4967 /* Add the new subtable at the end of the pvector (with no hits yet) */
4968 pvector_insert(&cls
->subtables
, subtable
, 0);
4969 VLOG_DBG("Creating %"PRIuSIZE
". subtable %p for in_port %d",
4970 cmap_count(&cls
->subtables_map
), subtable
, cls
->in_port
);
4971 pvector_publish(&cls
->subtables
);
4976 static inline struct dpcls_subtable
*
4977 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
4979 struct dpcls_subtable
*subtable
;
4981 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
4982 &cls
->subtables_map
) {
4983 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
4987 return dpcls_create_subtable(cls
, mask
);
4991 /* Periodically sort the dpcls subtable vectors according to hit counts */
4993 dpcls_sort_subtable_vector(struct dpcls
*cls
)
4995 struct pvector
*pvec
= &cls
->subtables
;
4996 struct dpcls_subtable
*subtable
;
4998 PVECTOR_FOR_EACH (subtable
, pvec
) {
4999 pvector_change_priority(pvec
, subtable
, subtable
->hit_cnt
);
5000 subtable
->hit_cnt
= 0;
5002 pvector_publish(pvec
);
5006 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
)
5009 long long int now
= time_msec();
5011 if (now
> pmd
->next_optimization
) {
5012 /* Try to obtain the flow lock to block out revalidator threads.
5013 * If not possible, just try next time. */
5014 if (!ovs_mutex_trylock(&pmd
->flow_mutex
)) {
5015 /* Optimize each classifier */
5016 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
5017 dpcls_sort_subtable_vector(cls
);
5019 ovs_mutex_unlock(&pmd
->flow_mutex
);
5020 /* Start new measuring interval */
5021 pmd
->next_optimization
= now
+ DPCLS_OPTIMIZATION_INTERVAL
;
5026 /* Insert 'rule' into 'cls'. */
5028 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
5029 const struct netdev_flow_key
*mask
)
5031 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
5033 /* Refer to subtable's mask, also for later removal. */
5034 rule
->mask
= &subtable
->mask
;
5035 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
5038 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
5040 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
5042 struct dpcls_subtable
*subtable
;
5044 ovs_assert(rule
->mask
);
5046 /* Get subtable from reference in rule->mask. */
5047 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
5048 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
5050 /* Delete empty subtable. */
5051 dpcls_destroy_subtable(cls
, subtable
);
5052 pvector_publish(&cls
->subtables
);
5056 /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
5057 * in 'mask' the values in 'key' and 'target' are the same. */
5059 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
5060 const struct netdev_flow_key
*target
)
5062 const uint64_t *keyp
= miniflow_get_values(&rule
->flow
.mf
);
5063 const uint64_t *maskp
= miniflow_get_values(&rule
->mask
->mf
);
5066 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, target
, rule
->flow
.mf
.map
) {
5067 if (OVS_UNLIKELY((value
& *maskp
++) != *keyp
++)) {
5074 /* For each miniflow in 'keys' performs a classifier lookup writing the result
5075 * into the corresponding slot in 'rules'. If a particular entry in 'keys' is
5076 * NULL it is skipped.
5078 * This function is optimized for use in the userspace datapath and therefore
5079 * does not implement a lot of features available in the standard
5080 * classifier_lookup() function. Specifically, it does not implement
5081 * priorities, instead returning any rule which matches the flow.
5083 * Returns true if all miniflows found a corresponding rule. */
5085 dpcls_lookup(struct dpcls
*cls
, const struct netdev_flow_key keys
[],
5086 struct dpcls_rule
**rules
, const size_t cnt
,
5089 /* The received 'cnt' miniflows are the search-keys that will be processed
5090 * to find a matching entry into the available subtables.
5091 * The number of bits in map_type is equal to NETDEV_MAX_BURST. */
5092 typedef uint32_t map_type
;
5093 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
5094 BUILD_ASSERT_DECL(MAP_BITS
>= NETDEV_MAX_BURST
);
5096 struct dpcls_subtable
*subtable
;
5098 map_type keys_map
= TYPE_MAXIMUM(map_type
); /* Set all bits. */
5100 uint32_t hashes
[MAP_BITS
];
5101 const struct cmap_node
*nodes
[MAP_BITS
];
5103 if (cnt
!= MAP_BITS
) {
5104 keys_map
>>= MAP_BITS
- cnt
; /* Clear extra bits. */
5106 memset(rules
, 0, cnt
* sizeof *rules
);
5108 int lookups_match
= 0, subtable_pos
= 1;
5110 /* The Datapath classifier - aka dpcls - is composed of subtables.
5111 * Subtables are dynamically created as needed when new rules are inserted.
5112 * Each subtable collects rules with matches on a specific subset of packet
5113 * fields as defined by the subtable's mask. We proceed to process every
5114 * search-key against each subtable, but when a match is found for a
5115 * search-key, the search for that key can stop because the rules are
5116 * non-overlapping. */
5117 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
5120 /* Compute hashes for the remaining keys. Each search-key is
5121 * masked with the subtable's mask to avoid hashing the wildcarded
5123 ULLONG_FOR_EACH_1(i
, keys_map
) {
5124 hashes
[i
] = netdev_flow_key_hash_in_mask(&keys
[i
],
5128 found_map
= cmap_find_batch(&subtable
->rules
, keys_map
, hashes
, nodes
);
5129 /* Check results. When the i-th bit of found_map is set, it means
5130 * that a set of nodes with a matching hash value was found for the
5131 * i-th search-key. Due to possible hash collisions we need to check
5132 * which of the found rules, if any, really matches our masked
5134 ULLONG_FOR_EACH_1(i
, found_map
) {
5135 struct dpcls_rule
*rule
;
5137 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
5138 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, &keys
[i
]))) {
5140 /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap
5141 * within one second optimization interval. */
5142 subtable
->hit_cnt
++;
5143 lookups_match
+= subtable_pos
;
5147 /* None of the found rules was a match. Reset the i-th bit to
5148 * keep searching this key in the next subtable. */
5149 ULLONG_SET0(found_map
, i
); /* Did not match. */
5151 ; /* Keep Sparse happy. */
5153 keys_map
&= ~found_map
; /* Clear the found rules. */
5155 if (num_lookups_p
) {
5156 *num_lookups_p
= lookups_match
;
5158 return true; /* All found. */
5162 if (num_lookups_p
) {
5163 *num_lookups_p
= lookups_match
;
5165 return false; /* Some misses. */