2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
25 #include <sys/types.h>
26 #include <netinet/in.h>
30 #include <sys/ioctl.h>
31 #include <sys/socket.h>
37 #include "conntrack.h"
41 #include "dp-packet.h"
43 #include "dpif-netdev-perf.h"
44 #include "dpif-provider.h"
46 #include "fat-rwlock.h"
52 #include "netdev-provider.h"
53 #include "netdev-vport.h"
55 #include "odp-execute.h"
57 #include "openvswitch/dynamic-string.h"
58 #include "openvswitch/list.h"
59 #include "openvswitch/match.h"
60 #include "openvswitch/ofp-parse.h"
61 #include "openvswitch/ofp-print.h"
62 #include "openvswitch/ofpbuf.h"
63 #include "openvswitch/shash.h"
64 #include "openvswitch/vlog.h"
68 #include "openvswitch/poll-loop.h"
75 #include "tnl-neigh-cache.h"
76 #include "tnl-ports.h"
80 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
82 #define FLOW_DUMP_MAX_BATCH 50
83 /* Use per thread recirc_depth to prevent recirculation loop. */
84 #define MAX_RECIRC_DEPTH 6
85 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
87 /* Use instant packet send by default. */
88 #define DEFAULT_TX_FLUSH_INTERVAL 0
90 /* Configuration parameters. */
91 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
92 enum { MAX_METERS
= 65536 }; /* Maximum number of meters. */
93 enum { MAX_BANDS
= 8 }; /* Maximum number of bands / meter. */
94 enum { N_METER_LOCKS
= 64 }; /* Maximum number of meters. */
96 /* Protects against changes to 'dp_netdevs'. */
97 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
99 /* Contains all 'struct dp_netdev's. */
100 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
101 = SHASH_INITIALIZER(&dp_netdevs
);
103 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
105 #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
106 | CS_INVALID | CS_REPLY_DIR | CS_TRACKED \
107 | CS_SRC_NAT | CS_DST_NAT)
108 #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
110 static struct odp_support dp_netdev_support
= {
111 .max_vlan_headers
= SIZE_MAX
,
112 .max_mpls_depth
= SIZE_MAX
,
118 .ct_state_nat
= true,
119 .ct_orig_tuple
= true,
120 .ct_orig_tuple6
= true,
123 /* Stores a miniflow with inline values */
125 struct netdev_flow_key
{
126 uint32_t hash
; /* Hash function differs for different users. */
127 uint32_t len
; /* Length of the following miniflow (incl. map). */
129 uint64_t buf
[FLOW_MAX_PACKET_U64S
];
132 /* Exact match cache for frequently used flows
134 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
135 * search its entries for a miniflow that matches exactly the miniflow of the
136 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
138 * A cache entry holds a reference to its 'dp_netdev_flow'.
140 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
141 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
142 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
143 * value is the index of a cache entry where the miniflow could be.
149 * Each pmd_thread has its own private exact match cache.
150 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
153 #define EM_FLOW_HASH_SHIFT 13
154 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
155 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
156 #define EM_FLOW_HASH_SEGS 2
158 /* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */
159 #define DEFAULT_EM_FLOW_INSERT_INV_PROB 100
160 #define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
161 DEFAULT_EM_FLOW_INSERT_INV_PROB)
164 struct dp_netdev_flow
*flow
;
165 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
169 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
170 int sweep_idx
; /* For emc_cache_slow_sweep(). */
173 /* Iterate in the exact match cache through every entry that might contain a
174 * miniflow with hash 'HASH'. */
175 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
176 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
177 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
178 i__ < EM_FLOW_HASH_SEGS; \
179 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
181 /* Simple non-wildcarding single-priority classifier. */
183 /* Time in microseconds between successive optimizations of the dpcls
185 #define DPCLS_OPTIMIZATION_INTERVAL 1000000LL
187 /* Time in microseconds of the interval in which rxq processing cycles used
188 * in rxq to pmd assignments is measured and stored. */
189 #define PMD_RXQ_INTERVAL_LEN 10000000LL
191 /* Number of intervals for which cycles are stored
192 * and used during rxq to pmd assignment. */
193 #define PMD_RXQ_INTERVAL_MAX 6
196 struct cmap_node node
; /* Within dp_netdev_pmd_thread.classifiers */
198 struct cmap subtables_map
;
199 struct pvector subtables
;
202 /* A rule to be inserted to the classifier. */
204 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
205 struct netdev_flow_key
*mask
; /* Subtable's mask. */
206 struct netdev_flow_key flow
; /* Matching key. */
207 /* 'flow' must be the last field, additional space is allocated here. */
210 static void dpcls_init(struct dpcls
*);
211 static void dpcls_destroy(struct dpcls
*);
212 static void dpcls_sort_subtable_vector(struct dpcls
*);
213 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
214 const struct netdev_flow_key
*mask
);
215 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
216 static bool dpcls_lookup(struct dpcls
*cls
,
217 const struct netdev_flow_key keys
[],
218 struct dpcls_rule
**rules
, size_t cnt
,
221 /* Set of supported meter flags */
222 #define DP_SUPPORTED_METER_FLAGS_MASK \
223 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
225 /* Set of supported meter band types */
226 #define DP_SUPPORTED_METER_BAND_TYPES \
227 ( 1 << OFPMBT13_DROP )
229 struct dp_meter_band
{
230 struct ofputil_meter_band up
; /* type, prec_level, pad, rate, burst_size */
231 uint32_t bucket
; /* In 1/1000 packets (for PKTPS), or in bits (for KBPS) */
232 uint64_t packet_count
;
239 uint32_t max_delta_t
;
241 uint64_t packet_count
;
243 struct dp_meter_band bands
[];
246 /* Datapath based on the network device interface from netdev.h.
252 * Some members, marked 'const', are immutable. Accessing other members
253 * requires synchronization, as noted in more detail below.
255 * Acquisition order is, from outermost to innermost:
257 * dp_netdev_mutex (global)
262 const struct dpif_class
*const class;
263 const char *const name
;
265 struct ovs_refcount ref_cnt
;
266 atomic_flag destroyed
;
270 * Any lookup into 'ports' or any access to the dp_netdev_ports found
271 * through 'ports' requires taking 'port_mutex'. */
272 struct ovs_mutex port_mutex
;
274 struct seq
*port_seq
; /* Incremented whenever a port changes. */
276 /* The time that a packet can wait in output batch for sending. */
277 atomic_uint32_t tx_flush_interval
;
280 struct ovs_mutex meter_locks
[N_METER_LOCKS
];
281 struct dp_meter
*meters
[MAX_METERS
]; /* Meter bands. */
283 /* Probability of EMC insertions is a factor of 'emc_insert_min'.*/
284 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
) atomic_uint32_t emc_insert_min
;
285 /* Enable collection of PMD performance metrics. */
286 atomic_bool pmd_perf_metrics
;
288 /* Protects access to ofproto-dpif-upcall interface during revalidator
289 * thread synchronization. */
290 struct fat_rwlock upcall_rwlock
;
291 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
294 /* Callback function for notifying the purging of dp flows (during
295 * reseting pmd deletion). */
296 dp_purge_callback
*dp_purge_cb
;
299 /* Stores all 'struct dp_netdev_pmd_thread's. */
300 struct cmap poll_threads
;
301 /* id pool for per thread static_tx_qid. */
302 struct id_pool
*tx_qid_pool
;
303 struct ovs_mutex tx_qid_pool_mutex
;
305 /* Protects the access of the 'struct dp_netdev_pmd_thread'
306 * instance for non-pmd thread. */
307 struct ovs_mutex non_pmd_mutex
;
309 /* Each pmd thread will store its pointer to
310 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
311 ovsthread_key_t per_pmd_key
;
313 struct seq
*reconfigure_seq
;
314 uint64_t last_reconfigure_seq
;
316 /* Cpu mask for pin of pmd threads. */
319 uint64_t last_tnl_conf_seq
;
321 struct conntrack conntrack
;
324 static void meter_lock(const struct dp_netdev
*dp
, uint32_t meter_id
)
325 OVS_ACQUIRES(dp
->meter_locks
[meter_id
% N_METER_LOCKS
])
327 ovs_mutex_lock(&dp
->meter_locks
[meter_id
% N_METER_LOCKS
]);
330 static void meter_unlock(const struct dp_netdev
*dp
, uint32_t meter_id
)
331 OVS_RELEASES(dp
->meter_locks
[meter_id
% N_METER_LOCKS
])
333 ovs_mutex_unlock(&dp
->meter_locks
[meter_id
% N_METER_LOCKS
]);
337 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
339 OVS_REQUIRES(dp
->port_mutex
);
341 enum rxq_cycles_counter_type
{
342 RXQ_CYCLES_PROC_CURR
, /* Cycles spent successfully polling and
343 processing packets during the current
345 RXQ_CYCLES_PROC_HIST
, /* Total cycles of all intervals that are used
346 during rxq to pmd assignment. */
350 #define XPS_TIMEOUT 500000LL /* In microseconds. */
352 /* Contained by struct dp_netdev_port's 'rxqs' member. */
353 struct dp_netdev_rxq
{
354 struct dp_netdev_port
*port
;
355 struct netdev_rxq
*rx
;
356 unsigned core_id
; /* Core to which this queue should be
357 pinned. OVS_CORE_UNSPEC if the
358 queue doesn't need to be pinned to a
360 unsigned intrvl_idx
; /* Write index for 'cycles_intrvl'. */
361 struct dp_netdev_pmd_thread
*pmd
; /* pmd thread that polls this queue. */
362 bool is_vhost
; /* Is rxq of a vhost port. */
364 /* Counters of cycles spent successfully polling and processing pkts. */
365 atomic_ullong cycles
[RXQ_N_CYCLES
];
366 /* We store PMD_RXQ_INTERVAL_MAX intervals of data for an rxq and then
367 sum them to yield the cycles used for an rxq. */
368 atomic_ullong cycles_intrvl
[PMD_RXQ_INTERVAL_MAX
];
371 /* A port in a netdev-based datapath. */
372 struct dp_netdev_port
{
374 bool dynamic_txqs
; /* If true XPS will be used. */
375 bool need_reconfigure
; /* True if we should reconfigure netdev. */
376 struct netdev
*netdev
;
377 struct hmap_node node
; /* Node in dp_netdev's 'ports'. */
378 struct netdev_saved_flags
*sf
;
379 struct dp_netdev_rxq
*rxqs
;
380 unsigned n_rxq
; /* Number of elements in 'rxqs' */
381 unsigned *txq_used
; /* Number of threads that use each tx queue. */
382 struct ovs_mutex txq_used_mutex
;
383 char *type
; /* Port type as requested by user. */
384 char *rxq_affinity_list
; /* Requested affinity of rx queues. */
387 /* Contained by struct dp_netdev_flow's 'stats' member. */
388 struct dp_netdev_flow_stats
{
389 atomic_llong used
; /* Last used time, in monotonic msecs. */
390 atomic_ullong packet_count
; /* Number of packets matched. */
391 atomic_ullong byte_count
; /* Number of bytes matched. */
392 atomic_uint16_t tcp_flags
; /* Bitwise-OR of seen tcp_flags values. */
395 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
401 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
402 * its pmd thread's classifier. The text below calls this classifier 'cls'.
407 * The thread safety rules described here for "struct dp_netdev_flow" are
408 * motivated by two goals:
410 * - Prevent threads that read members of "struct dp_netdev_flow" from
411 * reading bad data due to changes by some thread concurrently modifying
414 * - Prevent two threads making changes to members of a given "struct
415 * dp_netdev_flow" from interfering with each other.
421 * A flow 'flow' may be accessed without a risk of being freed during an RCU
422 * grace period. Code that needs to hold onto a flow for a while
423 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
425 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
426 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
429 * Some members, marked 'const', are immutable. Accessing other members
430 * requires synchronization, as noted in more detail below.
432 struct dp_netdev_flow
{
433 const struct flow flow
; /* Unmasked flow that created this entry. */
434 /* Hash table index by unmasked flow. */
435 const struct cmap_node node
; /* In owning dp_netdev_pmd_thread's */
437 const ovs_u128 ufid
; /* Unique flow identifier. */
438 const unsigned pmd_id
; /* The 'core_id' of pmd thread owning this */
441 /* Number of references.
442 * The classifier owns one reference.
443 * Any thread trying to keep a rule from being freed should hold its own
445 struct ovs_refcount ref_cnt
;
450 struct dp_netdev_flow_stats stats
;
453 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
455 /* While processing a group of input packets, the datapath uses the next
456 * member to store a pointer to the output batch for the flow. It is
457 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
458 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
459 struct packet_batch_per_flow
*batch
;
461 /* Packet classification. */
462 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
463 /* 'cr' must be the last member. */
466 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
467 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
468 static int dpif_netdev_flow_from_nlattrs(const struct nlattr
*, uint32_t,
469 struct flow
*, bool);
471 /* A set of datapath actions within a "struct dp_netdev_flow".
477 * A struct dp_netdev_actions 'actions' is protected with RCU. */
478 struct dp_netdev_actions
{
479 /* These members are immutable: they do not change during the struct's
481 unsigned int size
; /* Size of 'actions', in bytes. */
482 struct nlattr actions
[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
485 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
487 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
488 const struct dp_netdev_flow
*);
489 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
491 struct polled_queue
{
492 struct dp_netdev_rxq
*rxq
;
496 /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
498 struct dp_netdev_rxq
*rxq
;
499 struct hmap_node node
;
502 /* Contained by struct dp_netdev_pmd_thread's 'send_port_cache',
503 * 'tnl_port_cache' or 'tx_ports'. */
505 struct dp_netdev_port
*port
;
508 struct hmap_node node
;
509 long long flush_time
;
510 struct dp_packet_batch output_pkts
;
511 struct dp_netdev_rxq
*output_pkts_rxqs
[NETDEV_MAX_BURST
];
514 /* A set of properties for the current processing loop that is not directly
515 * associated with the pmd thread itself, but with the packets being
516 * processed or the short-term system configuration (for example, time).
517 * Contained by struct dp_netdev_pmd_thread's 'ctx' member. */
518 struct dp_netdev_pmd_thread_ctx
{
519 /* Latest measured time. See 'pmd_thread_ctx_time_update()'. */
521 /* RX queue from which last packet was received. */
522 struct dp_netdev_rxq
*last_rxq
;
525 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
526 * the performance overhead of interrupt processing. Therefore netdev can
527 * not implement rx-wait for these devices. dpif-netdev needs to poll
528 * these device to check for recv buffer. pmd-thread does polling for
529 * devices assigned to itself.
531 * DPDK used PMD for accessing NIC.
533 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
534 * I/O of all non-pmd threads. There will be no actual thread created
537 * Each struct has its own flow cache and classifier per managed ingress port.
538 * For packets received on ingress port, a look up is done on corresponding PMD
539 * thread's flow cache and in case of a miss, lookup is performed in the
540 * corresponding classifier of port. Packets are executed with the found
541 * actions in either case.
543 struct dp_netdev_pmd_thread
{
544 struct dp_netdev
*dp
;
545 struct ovs_refcount ref_cnt
; /* Every reference must be refcount'ed. */
546 struct cmap_node node
; /* In 'dp->poll_threads'. */
548 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
549 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
551 /* Per thread exact-match cache. Note, the instance for cpu core
552 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
553 * need to be protected by 'non_pmd_mutex'. Every other instance
554 * will only be accessed by its own pmd thread. */
555 struct emc_cache flow_cache
;
557 /* Flow-Table and classifiers
559 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
560 * changes to 'classifiers' must be made while still holding the
563 struct ovs_mutex flow_mutex
;
564 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
566 /* One classifier per in_port polled by the pmd */
567 struct cmap classifiers
;
568 /* Periodically sort subtable vectors according to hit frequencies */
569 long long int next_optimization
;
570 /* End of the next time interval for which processing cycles
571 are stored for each polled rxq. */
572 long long int rxq_next_cycle_store
;
574 /* Last interval timestamp. */
575 uint64_t intrvl_tsc_prev
;
576 /* Last interval cycles. */
577 atomic_ullong intrvl_cycles
;
579 /* Current context of the PMD thread. */
580 struct dp_netdev_pmd_thread_ctx ctx
;
582 struct latch exit_latch
; /* For terminating the pmd thread. */
583 struct seq
*reload_seq
;
584 uint64_t last_reload_seq
;
585 atomic_bool reload
; /* Do we need to reload ports? */
587 unsigned core_id
; /* CPU core id of this pmd thread. */
588 int numa_id
; /* numa node id of this pmd thread. */
591 /* Queue id used by this pmd thread to send packets on all netdevs if
592 * XPS disabled for this netdev. All static_tx_qid's are unique and less
593 * than 'cmap_count(dp->poll_threads)'. */
594 uint32_t static_tx_qid
;
596 /* Number of filled output batches. */
597 int n_output_batches
;
599 struct ovs_mutex port_mutex
; /* Mutex for 'poll_list' and 'tx_ports'. */
600 /* List of rx queues to poll. */
601 struct hmap poll_list OVS_GUARDED
;
602 /* Map of 'tx_port's used for transmission. Written by the main thread,
603 * read by the pmd thread. */
604 struct hmap tx_ports OVS_GUARDED
;
606 /* These are thread-local copies of 'tx_ports'. One contains only tunnel
607 * ports (that support push_tunnel/pop_tunnel), the other contains ports
608 * with at least one txq (that support send). A port can be in both.
610 * There are two separate maps to make sure that we don't try to execute
611 * OUTPUT on a device which has 0 txqs or PUSH/POP on a non-tunnel device.
613 * The instances for cpu core NON_PMD_CORE_ID can be accessed by multiple
614 * threads, and thusly need to be protected by 'non_pmd_mutex'. Every
615 * other instance will only be accessed by its own pmd thread. */
616 struct hmap tnl_port_cache
;
617 struct hmap send_port_cache
;
619 /* Keep track of detailed PMD performance statistics. */
620 struct pmd_perf_stats perf_stats
;
622 /* Set to true if the pmd thread needs to be reloaded. */
626 /* Interface to netdev-based datapath. */
629 struct dp_netdev
*dp
;
630 uint64_t last_port_seq
;
633 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
634 struct dp_netdev_port
**portp
)
635 OVS_REQUIRES(dp
->port_mutex
);
636 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
637 struct dp_netdev_port
**portp
)
638 OVS_REQUIRES(dp
->port_mutex
);
639 static void dp_netdev_free(struct dp_netdev
*)
640 OVS_REQUIRES(dp_netdev_mutex
);
641 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
642 const char *type
, odp_port_t port_no
)
643 OVS_REQUIRES(dp
->port_mutex
);
644 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
645 OVS_REQUIRES(dp
->port_mutex
);
646 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
647 bool create
, struct dpif
**);
648 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
649 struct dp_packet_batch
*,
651 const struct flow
*flow
,
652 const struct nlattr
*actions
,
654 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
655 struct dp_packet_batch
*, odp_port_t port_no
);
656 static void dp_netdev_recirculate(struct dp_netdev_pmd_thread
*,
657 struct dp_packet_batch
*);
659 static void dp_netdev_disable_upcall(struct dp_netdev
*);
660 static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
661 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
662 struct dp_netdev
*dp
, unsigned core_id
,
664 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
);
665 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
666 OVS_REQUIRES(dp
->port_mutex
);
668 static void *pmd_thread_main(void *);
669 static struct dp_netdev_pmd_thread
*dp_netdev_get_pmd(struct dp_netdev
*dp
,
671 static struct dp_netdev_pmd_thread
*
672 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
);
673 static void dp_netdev_del_pmd(struct dp_netdev
*dp
,
674 struct dp_netdev_pmd_thread
*pmd
);
675 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
, bool non_pmd
);
676 static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
);
677 static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
678 struct dp_netdev_port
*port
)
679 OVS_REQUIRES(pmd
->port_mutex
);
680 static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
682 OVS_REQUIRES(pmd
->port_mutex
);
683 static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
684 struct dp_netdev_rxq
*rxq
)
685 OVS_REQUIRES(pmd
->port_mutex
);
686 static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
687 struct rxq_poll
*poll
)
688 OVS_REQUIRES(pmd
->port_mutex
);
690 dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread
*pmd
,
693 static void reconfigure_datapath(struct dp_netdev
*dp
)
694 OVS_REQUIRES(dp
->port_mutex
);
695 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
);
696 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
);
697 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
);
698 static void pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
699 OVS_REQUIRES(pmd
->port_mutex
);
701 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
,
702 struct polled_queue
*poll_list
, int poll_cnt
);
704 dp_netdev_rxq_set_cycles(struct dp_netdev_rxq
*rx
,
705 enum rxq_cycles_counter_type type
,
706 unsigned long long cycles
);
708 dp_netdev_rxq_get_cycles(struct dp_netdev_rxq
*rx
,
709 enum rxq_cycles_counter_type type
);
711 dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq
*rx
,
712 unsigned long long cycles
);
714 dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq
*rx
, unsigned idx
);
716 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
718 static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
721 static inline bool emc_entry_alive(struct emc_entry
*ce
);
722 static void emc_clear_entry(struct emc_entry
*ce
);
724 static void dp_netdev_request_reconfigure(struct dp_netdev
*dp
);
726 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread
*pmd
);
729 emc_cache_init(struct emc_cache
*flow_cache
)
733 flow_cache
->sweep_idx
= 0;
734 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
735 flow_cache
->entries
[i
].flow
= NULL
;
736 flow_cache
->entries
[i
].key
.hash
= 0;
737 flow_cache
->entries
[i
].key
.len
= sizeof(struct miniflow
);
738 flowmap_init(&flow_cache
->entries
[i
].key
.mf
.map
);
743 emc_cache_uninit(struct emc_cache
*flow_cache
)
747 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
748 emc_clear_entry(&flow_cache
->entries
[i
]);
752 /* Check and clear dead flow references slowly (one entry at each
755 emc_cache_slow_sweep(struct emc_cache
*flow_cache
)
757 struct emc_entry
*entry
= &flow_cache
->entries
[flow_cache
->sweep_idx
];
759 if (!emc_entry_alive(entry
)) {
760 emc_clear_entry(entry
);
762 flow_cache
->sweep_idx
= (flow_cache
->sweep_idx
+ 1) & EM_FLOW_HASH_MASK
;
765 /* Updates the time in PMD threads context and should be called in three cases:
767 * 1. PMD structure initialization:
768 * - dp_netdev_configure_pmd()
770 * 2. Before processing of the new packet batch:
771 * - dpif_netdev_execute()
772 * - dp_netdev_process_rxq_port()
774 * 3. At least once per polling iteration in main polling threads if no
775 * packets received on current iteration:
776 * - dpif_netdev_run()
777 * - pmd_thread_main()
779 * 'pmd->ctx.now' should be used without update in all other cases if possible.
782 pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread
*pmd
)
784 pmd
->ctx
.now
= time_usec();
787 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
789 dpif_is_netdev(const struct dpif
*dpif
)
791 return dpif
->dpif_class
->open
== dpif_netdev_open
;
794 static struct dpif_netdev
*
795 dpif_netdev_cast(const struct dpif
*dpif
)
797 ovs_assert(dpif_is_netdev(dpif
));
798 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
801 static struct dp_netdev
*
802 get_dp_netdev(const struct dpif
*dpif
)
804 return dpif_netdev_cast(dpif
)->dp
;
808 PMD_INFO_SHOW_STATS
, /* Show how cpu cycles are spent. */
809 PMD_INFO_CLEAR_STATS
, /* Set the cycles count to 0. */
810 PMD_INFO_SHOW_RXQ
, /* Show poll lists of pmd threads. */
811 PMD_INFO_PERF_SHOW
, /* Show pmd performance details. */
815 format_pmd_thread(struct ds
*reply
, struct dp_netdev_pmd_thread
*pmd
)
817 ds_put_cstr(reply
, (pmd
->core_id
== NON_PMD_CORE_ID
)
818 ? "main thread" : "pmd thread");
819 if (pmd
->numa_id
!= OVS_NUMA_UNSPEC
) {
820 ds_put_format(reply
, " numa_id %d", pmd
->numa_id
);
822 if (pmd
->core_id
!= OVS_CORE_UNSPEC
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
823 ds_put_format(reply
, " core_id %u", pmd
->core_id
);
825 ds_put_cstr(reply
, ":\n");
829 pmd_info_show_stats(struct ds
*reply
,
830 struct dp_netdev_pmd_thread
*pmd
)
832 uint64_t stats
[PMD_N_STATS
];
833 uint64_t total_cycles
, total_packets
;
834 double passes_per_pkt
= 0;
835 double lookups_per_hit
= 0;
836 double packets_per_batch
= 0;
838 pmd_perf_read_counters(&pmd
->perf_stats
, stats
);
839 total_cycles
= stats
[PMD_CYCLES_ITER_IDLE
]
840 + stats
[PMD_CYCLES_ITER_BUSY
];
841 total_packets
= stats
[PMD_STAT_RECV
];
843 format_pmd_thread(reply
, pmd
);
845 if (total_packets
> 0) {
846 passes_per_pkt
= (total_packets
+ stats
[PMD_STAT_RECIRC
])
847 / (double) total_packets
;
849 if (stats
[PMD_STAT_MASKED_HIT
] > 0) {
850 lookups_per_hit
= stats
[PMD_STAT_MASKED_LOOKUP
]
851 / (double) stats
[PMD_STAT_MASKED_HIT
];
853 if (stats
[PMD_STAT_SENT_BATCHES
] > 0) {
854 packets_per_batch
= stats
[PMD_STAT_SENT_PKTS
]
855 / (double) stats
[PMD_STAT_SENT_BATCHES
];
859 " packets received: %"PRIu64
"\n"
860 " packet recirculations: %"PRIu64
"\n"
861 " avg. datapath passes per packet: %.02f\n"
862 " emc hits: %"PRIu64
"\n"
863 " megaflow hits: %"PRIu64
"\n"
864 " avg. subtable lookups per megaflow hit: %.02f\n"
865 " miss with success upcall: %"PRIu64
"\n"
866 " miss with failed upcall: %"PRIu64
"\n"
867 " avg. packets per output batch: %.02f\n",
868 total_packets
, stats
[PMD_STAT_RECIRC
],
869 passes_per_pkt
, stats
[PMD_STAT_EXACT_HIT
],
870 stats
[PMD_STAT_MASKED_HIT
], lookups_per_hit
,
871 stats
[PMD_STAT_MISS
], stats
[PMD_STAT_LOST
],
874 if (total_cycles
== 0) {
879 " idle cycles: %"PRIu64
" (%.02f%%)\n"
880 " processing cycles: %"PRIu64
" (%.02f%%)\n",
881 stats
[PMD_CYCLES_ITER_IDLE
],
882 stats
[PMD_CYCLES_ITER_IDLE
] / (double) total_cycles
* 100,
883 stats
[PMD_CYCLES_ITER_BUSY
],
884 stats
[PMD_CYCLES_ITER_BUSY
] / (double) total_cycles
* 100);
886 if (total_packets
== 0) {
891 " avg cycles per packet: %.02f (%"PRIu64
"/%"PRIu64
")\n",
892 total_cycles
/ (double) total_packets
,
893 total_cycles
, total_packets
);
896 " avg processing cycles per packet: "
897 "%.02f (%"PRIu64
"/%"PRIu64
")\n",
898 stats
[PMD_CYCLES_ITER_BUSY
] / (double) total_packets
,
899 stats
[PMD_CYCLES_ITER_BUSY
], total_packets
);
903 pmd_info_show_perf(struct ds
*reply
,
904 struct dp_netdev_pmd_thread
*pmd
,
905 struct pmd_perf_params
*par
)
907 if (pmd
->core_id
!= NON_PMD_CORE_ID
) {
909 xastrftime_msec("%H:%M:%S.###", time_wall_msec(), true);
910 long long now
= time_msec();
911 double duration
= (now
- pmd
->perf_stats
.start_ms
) / 1000.0;
913 ds_put_cstr(reply
, "\n");
914 ds_put_format(reply
, "Time: %s\n", time_str
);
915 ds_put_format(reply
, "Measurement duration: %.3f s\n", duration
);
916 ds_put_cstr(reply
, "\n");
917 format_pmd_thread(reply
, pmd
);
918 ds_put_cstr(reply
, "\n");
919 pmd_perf_format_overall_stats(reply
, &pmd
->perf_stats
, duration
);
920 if (pmd_perf_metrics_enabled(pmd
)) {
921 /* Prevent parallel clearing of perf metrics. */
922 ovs_mutex_lock(&pmd
->perf_stats
.clear_mutex
);
923 if (par
->histograms
) {
924 ds_put_cstr(reply
, "\n");
925 pmd_perf_format_histograms(reply
, &pmd
->perf_stats
);
927 if (par
->iter_hist_len
> 0) {
928 ds_put_cstr(reply
, "\n");
929 pmd_perf_format_iteration_history(reply
, &pmd
->perf_stats
,
932 if (par
->ms_hist_len
> 0) {
933 ds_put_cstr(reply
, "\n");
934 pmd_perf_format_ms_history(reply
, &pmd
->perf_stats
,
937 ovs_mutex_unlock(&pmd
->perf_stats
.clear_mutex
);
944 compare_poll_list(const void *a_
, const void *b_
)
946 const struct rxq_poll
*a
= a_
;
947 const struct rxq_poll
*b
= b_
;
949 const char *namea
= netdev_rxq_get_name(a
->rxq
->rx
);
950 const char *nameb
= netdev_rxq_get_name(b
->rxq
->rx
);
952 int cmp
= strcmp(namea
, nameb
);
954 return netdev_rxq_get_queue_id(a
->rxq
->rx
)
955 - netdev_rxq_get_queue_id(b
->rxq
->rx
);
962 sorted_poll_list(struct dp_netdev_pmd_thread
*pmd
, struct rxq_poll
**list
,
965 struct rxq_poll
*ret
, *poll
;
968 *n
= hmap_count(&pmd
->poll_list
);
972 ret
= xcalloc(*n
, sizeof *ret
);
974 HMAP_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
979 qsort(ret
, *n
, sizeof *ret
, compare_poll_list
);
986 pmd_info_show_rxq(struct ds
*reply
, struct dp_netdev_pmd_thread
*pmd
)
988 if (pmd
->core_id
!= NON_PMD_CORE_ID
) {
989 struct rxq_poll
*list
;
991 uint64_t total_cycles
= 0;
994 "pmd thread numa_id %d core_id %u:\n isolated : %s\n",
995 pmd
->numa_id
, pmd
->core_id
, (pmd
->isolated
)
998 ovs_mutex_lock(&pmd
->port_mutex
);
999 sorted_poll_list(pmd
, &list
, &n_rxq
);
1001 /* Get the total pmd cycles for an interval. */
1002 atomic_read_relaxed(&pmd
->intrvl_cycles
, &total_cycles
);
1003 /* Estimate the cycles to cover all intervals. */
1004 total_cycles
*= PMD_RXQ_INTERVAL_MAX
;
1006 for (int i
= 0; i
< n_rxq
; i
++) {
1007 struct dp_netdev_rxq
*rxq
= list
[i
].rxq
;
1008 const char *name
= netdev_rxq_get_name(rxq
->rx
);
1009 uint64_t proc_cycles
= 0;
1011 for (int j
= 0; j
< PMD_RXQ_INTERVAL_MAX
; j
++) {
1012 proc_cycles
+= dp_netdev_rxq_get_intrvl_cycles(rxq
, j
);
1014 ds_put_format(reply
, " port: %-16s queue-id: %2d", name
,
1015 netdev_rxq_get_queue_id(list
[i
].rxq
->rx
));
1016 ds_put_format(reply
, " pmd usage: ");
1018 ds_put_format(reply
, "%2"PRIu64
"",
1019 proc_cycles
* 100 / total_cycles
);
1020 ds_put_cstr(reply
, " %");
1022 ds_put_format(reply
, "%s", "NOT AVAIL");
1024 ds_put_cstr(reply
, "\n");
1026 ovs_mutex_unlock(&pmd
->port_mutex
);
1032 compare_poll_thread_list(const void *a_
, const void *b_
)
1034 const struct dp_netdev_pmd_thread
*a
, *b
;
1036 a
= *(struct dp_netdev_pmd_thread
**)a_
;
1037 b
= *(struct dp_netdev_pmd_thread
**)b_
;
1039 if (a
->core_id
< b
->core_id
) {
1042 if (a
->core_id
> b
->core_id
) {
1048 /* Create a sorted list of pmd's from the dp->poll_threads cmap. We can use
1049 * this list, as long as we do not go to quiescent state. */
1051 sorted_poll_thread_list(struct dp_netdev
*dp
,
1052 struct dp_netdev_pmd_thread
***list
,
1055 struct dp_netdev_pmd_thread
*pmd
;
1056 struct dp_netdev_pmd_thread
**pmd_list
;
1057 size_t k
= 0, n_pmds
;
1059 n_pmds
= cmap_count(&dp
->poll_threads
);
1060 pmd_list
= xcalloc(n_pmds
, sizeof *pmd_list
);
1062 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1066 pmd_list
[k
++] = pmd
;
1069 qsort(pmd_list
, k
, sizeof *pmd_list
, compare_poll_thread_list
);
1076 dpif_netdev_pmd_rebalance(struct unixctl_conn
*conn
, int argc
,
1077 const char *argv
[], void *aux OVS_UNUSED
)
1079 struct ds reply
= DS_EMPTY_INITIALIZER
;
1080 struct dp_netdev
*dp
= NULL
;
1082 ovs_mutex_lock(&dp_netdev_mutex
);
1085 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
1086 } else if (shash_count(&dp_netdevs
) == 1) {
1087 /* There's only one datapath */
1088 dp
= shash_first(&dp_netdevs
)->data
;
1092 ovs_mutex_unlock(&dp_netdev_mutex
);
1093 unixctl_command_reply_error(conn
,
1094 "please specify an existing datapath");
1098 dp_netdev_request_reconfigure(dp
);
1099 ovs_mutex_unlock(&dp_netdev_mutex
);
1100 ds_put_cstr(&reply
, "pmd rxq rebalance requested.\n");
1101 unixctl_command_reply(conn
, ds_cstr(&reply
));
1106 dpif_netdev_pmd_info(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
1109 struct ds reply
= DS_EMPTY_INITIALIZER
;
1110 struct dp_netdev_pmd_thread
**pmd_list
;
1111 struct dp_netdev
*dp
= NULL
;
1112 enum pmd_info_type type
= *(enum pmd_info_type
*) aux
;
1113 unsigned int core_id
;
1114 bool filter_on_pmd
= false;
1117 ovs_mutex_lock(&dp_netdev_mutex
);
1120 if (!strcmp(argv
[1], "-pmd") && argc
> 2) {
1121 if (str_to_uint(argv
[2], 10, &core_id
)) {
1122 filter_on_pmd
= true;
1127 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
1134 if (shash_count(&dp_netdevs
) == 1) {
1135 /* There's only one datapath */
1136 dp
= shash_first(&dp_netdevs
)->data
;
1138 ovs_mutex_unlock(&dp_netdev_mutex
);
1139 unixctl_command_reply_error(conn
,
1140 "please specify an existing datapath");
1145 sorted_poll_thread_list(dp
, &pmd_list
, &n
);
1146 for (size_t i
= 0; i
< n
; i
++) {
1147 struct dp_netdev_pmd_thread
*pmd
= pmd_list
[i
];
1151 if (filter_on_pmd
&& pmd
->core_id
!= core_id
) {
1154 if (type
== PMD_INFO_SHOW_RXQ
) {
1155 pmd_info_show_rxq(&reply
, pmd
);
1156 } else if (type
== PMD_INFO_CLEAR_STATS
) {
1157 pmd_perf_stats_clear(&pmd
->perf_stats
);
1158 } else if (type
== PMD_INFO_SHOW_STATS
) {
1159 pmd_info_show_stats(&reply
, pmd
);
1160 } else if (type
== PMD_INFO_PERF_SHOW
) {
1161 pmd_info_show_perf(&reply
, pmd
, (struct pmd_perf_params
*)aux
);
1166 ovs_mutex_unlock(&dp_netdev_mutex
);
1168 unixctl_command_reply(conn
, ds_cstr(&reply
));
1173 pmd_perf_show_cmd(struct unixctl_conn
*conn
, int argc
,
1175 void *aux OVS_UNUSED
)
1177 struct pmd_perf_params par
;
1178 long int it_hist
= 0, ms_hist
= 0;
1179 par
.histograms
= true;
1182 if (!strcmp(argv
[1], "-nh")) {
1183 par
.histograms
= false;
1186 } else if (!strcmp(argv
[1], "-it") && argc
> 2) {
1187 it_hist
= strtol(argv
[2], NULL
, 10);
1190 } else if (it_hist
> HISTORY_LEN
) {
1191 it_hist
= HISTORY_LEN
;
1195 } else if (!strcmp(argv
[1], "-ms") && argc
> 2) {
1196 ms_hist
= strtol(argv
[2], NULL
, 10);
1199 } else if (ms_hist
> HISTORY_LEN
) {
1200 ms_hist
= HISTORY_LEN
;
1208 par
.iter_hist_len
= it_hist
;
1209 par
.ms_hist_len
= ms_hist
;
1210 par
.command_type
= PMD_INFO_PERF_SHOW
;
1211 dpif_netdev_pmd_info(conn
, argc
, argv
, &par
);
1215 dpif_netdev_init(void)
1217 static enum pmd_info_type show_aux
= PMD_INFO_SHOW_STATS
,
1218 clear_aux
= PMD_INFO_CLEAR_STATS
,
1219 poll_aux
= PMD_INFO_SHOW_RXQ
;
1221 unixctl_command_register("dpif-netdev/pmd-stats-show", "[-pmd core] [dp]",
1222 0, 3, dpif_netdev_pmd_info
,
1224 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[-pmd core] [dp]",
1225 0, 3, dpif_netdev_pmd_info
,
1226 (void *)&clear_aux
);
1227 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[-pmd core] [dp]",
1228 0, 3, dpif_netdev_pmd_info
,
1230 unixctl_command_register("dpif-netdev/pmd-perf-show",
1231 "[-nh] [-it iter-history-len]"
1232 " [-ms ms-history-len]"
1233 " [-pmd core] [dp]",
1234 0, 8, pmd_perf_show_cmd
,
1236 unixctl_command_register("dpif-netdev/pmd-rxq-rebalance", "[dp]",
1237 0, 1, dpif_netdev_pmd_rebalance
,
1239 unixctl_command_register("dpif-netdev/pmd-perf-log-set",
1240 "on|off [-b before] [-a after] [-e|-ne] "
1241 "[-us usec] [-q qlen]",
1242 0, 10, pmd_perf_log_set_cmd
,
1248 dpif_netdev_enumerate(struct sset
*all_dps
,
1249 const struct dpif_class
*dpif_class
)
1251 struct shash_node
*node
;
1253 ovs_mutex_lock(&dp_netdev_mutex
);
1254 SHASH_FOR_EACH(node
, &dp_netdevs
) {
1255 struct dp_netdev
*dp
= node
->data
;
1256 if (dpif_class
!= dp
->class) {
1257 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
1258 * If the class doesn't match, skip this dpif. */
1261 sset_add(all_dps
, node
->name
);
1263 ovs_mutex_unlock(&dp_netdev_mutex
);
1269 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
1271 return class != &dpif_netdev_class
;
1275 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
1277 return strcmp(type
, "internal") ? type
1278 : dpif_netdev_class_is_dummy(class) ? "dummy-internal"
1282 static struct dpif
*
1283 create_dpif_netdev(struct dp_netdev
*dp
)
1285 uint16_t netflow_id
= hash_string(dp
->name
, 0);
1286 struct dpif_netdev
*dpif
;
1288 ovs_refcount_ref(&dp
->ref_cnt
);
1290 dpif
= xmalloc(sizeof *dpif
);
1291 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
1293 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
1298 /* Choose an unused, non-zero port number and return it on success.
1299 * Return ODPP_NONE on failure. */
1301 choose_port(struct dp_netdev
*dp
, const char *name
)
1302 OVS_REQUIRES(dp
->port_mutex
)
1306 if (dp
->class != &dpif_netdev_class
) {
1310 /* If the port name begins with "br", start the number search at
1311 * 100 to make writing tests easier. */
1312 if (!strncmp(name
, "br", 2)) {
1316 /* If the port name contains a number, try to assign that port number.
1317 * This can make writing unit tests easier because port numbers are
1319 for (p
= name
; *p
!= '\0'; p
++) {
1320 if (isdigit((unsigned char) *p
)) {
1321 port_no
= start_no
+ strtol(p
, NULL
, 10);
1322 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
1323 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
1324 return u32_to_odp(port_no
);
1331 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
1332 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
1333 return u32_to_odp(port_no
);
1341 create_dp_netdev(const char *name
, const struct dpif_class
*class,
1342 struct dp_netdev
**dpp
)
1343 OVS_REQUIRES(dp_netdev_mutex
)
1345 struct dp_netdev
*dp
;
1348 dp
= xzalloc(sizeof *dp
);
1349 shash_add(&dp_netdevs
, name
, dp
);
1351 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
1352 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
1353 ovs_refcount_init(&dp
->ref_cnt
);
1354 atomic_flag_clear(&dp
->destroyed
);
1356 ovs_mutex_init(&dp
->port_mutex
);
1357 hmap_init(&dp
->ports
);
1358 dp
->port_seq
= seq_create();
1359 fat_rwlock_init(&dp
->upcall_rwlock
);
1361 dp
->reconfigure_seq
= seq_create();
1362 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
1364 for (int i
= 0; i
< N_METER_LOCKS
; ++i
) {
1365 ovs_mutex_init_adaptive(&dp
->meter_locks
[i
]);
1368 /* Disable upcalls by default. */
1369 dp_netdev_disable_upcall(dp
);
1370 dp
->upcall_aux
= NULL
;
1371 dp
->upcall_cb
= NULL
;
1373 conntrack_init(&dp
->conntrack
);
1375 atomic_init(&dp
->emc_insert_min
, DEFAULT_EM_FLOW_INSERT_MIN
);
1376 atomic_init(&dp
->tx_flush_interval
, DEFAULT_TX_FLUSH_INTERVAL
);
1378 cmap_init(&dp
->poll_threads
);
1380 ovs_mutex_init(&dp
->tx_qid_pool_mutex
);
1381 /* We need 1 Tx queue for each possible core + 1 for non-PMD threads. */
1382 dp
->tx_qid_pool
= id_pool_create(0, ovs_numa_get_n_cores() + 1);
1384 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
1385 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
1387 ovs_mutex_lock(&dp
->port_mutex
);
1388 /* non-PMD will be created before all other threads and will
1389 * allocate static_tx_qid = 0. */
1390 dp_netdev_set_nonpmd(dp
);
1392 error
= do_add_port(dp
, name
, dpif_netdev_port_open_type(dp
->class,
1395 ovs_mutex_unlock(&dp
->port_mutex
);
1401 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
1407 dp_netdev_request_reconfigure(struct dp_netdev
*dp
)
1409 seq_change(dp
->reconfigure_seq
);
1413 dp_netdev_is_reconf_required(struct dp_netdev
*dp
)
1415 return seq_read(dp
->reconfigure_seq
) != dp
->last_reconfigure_seq
;
1419 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
1420 bool create
, struct dpif
**dpifp
)
1422 struct dp_netdev
*dp
;
1425 ovs_mutex_lock(&dp_netdev_mutex
);
1426 dp
= shash_find_data(&dp_netdevs
, name
);
1428 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
1430 error
= (dp
->class != class ? EINVAL
1435 *dpifp
= create_dpif_netdev(dp
);
1438 ovs_mutex_unlock(&dp_netdev_mutex
);
1444 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
1445 OVS_NO_THREAD_SAFETY_ANALYSIS
1447 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
1448 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
1450 /* Before freeing a lock we should release it */
1451 fat_rwlock_unlock(&dp
->upcall_rwlock
);
1452 fat_rwlock_destroy(&dp
->upcall_rwlock
);
1456 dp_delete_meter(struct dp_netdev
*dp
, uint32_t meter_id
)
1457 OVS_REQUIRES(dp
->meter_locks
[meter_id
% N_METER_LOCKS
])
1459 if (dp
->meters
[meter_id
]) {
1460 free(dp
->meters
[meter_id
]);
1461 dp
->meters
[meter_id
] = NULL
;
1465 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
1466 * through the 'dp_netdevs' shash while freeing 'dp'. */
1468 dp_netdev_free(struct dp_netdev
*dp
)
1469 OVS_REQUIRES(dp_netdev_mutex
)
1471 struct dp_netdev_port
*port
, *next
;
1473 shash_find_and_delete(&dp_netdevs
, dp
->name
);
1475 ovs_mutex_lock(&dp
->port_mutex
);
1476 HMAP_FOR_EACH_SAFE (port
, next
, node
, &dp
->ports
) {
1477 do_del_port(dp
, port
);
1479 ovs_mutex_unlock(&dp
->port_mutex
);
1481 dp_netdev_destroy_all_pmds(dp
, true);
1482 cmap_destroy(&dp
->poll_threads
);
1484 ovs_mutex_destroy(&dp
->tx_qid_pool_mutex
);
1485 id_pool_destroy(dp
->tx_qid_pool
);
1487 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
1488 ovsthread_key_delete(dp
->per_pmd_key
);
1490 conntrack_destroy(&dp
->conntrack
);
1493 seq_destroy(dp
->reconfigure_seq
);
1495 seq_destroy(dp
->port_seq
);
1496 hmap_destroy(&dp
->ports
);
1497 ovs_mutex_destroy(&dp
->port_mutex
);
1499 /* Upcalls must be disabled at this point */
1500 dp_netdev_destroy_upcall_lock(dp
);
1504 for (i
= 0; i
< MAX_METERS
; ++i
) {
1506 dp_delete_meter(dp
, i
);
1507 meter_unlock(dp
, i
);
1509 for (i
= 0; i
< N_METER_LOCKS
; ++i
) {
1510 ovs_mutex_destroy(&dp
->meter_locks
[i
]);
1513 free(dp
->pmd_cmask
);
1514 free(CONST_CAST(char *, dp
->name
));
1519 dp_netdev_unref(struct dp_netdev
*dp
)
1522 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1523 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1524 ovs_mutex_lock(&dp_netdev_mutex
);
1525 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1528 ovs_mutex_unlock(&dp_netdev_mutex
);
1533 dpif_netdev_close(struct dpif
*dpif
)
1535 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1537 dp_netdev_unref(dp
);
1542 dpif_netdev_destroy(struct dpif
*dpif
)
1544 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1546 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
1547 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1548 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1556 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1557 * load/store semantics. While the increment is not atomic, the load and
1558 * store operations are, making it impossible to read inconsistent values.
1560 * This is used to update thread local stats counters. */
1562 non_atomic_ullong_add(atomic_ullong
*var
, unsigned long long n
)
1564 unsigned long long tmp
;
1566 atomic_read_relaxed(var
, &tmp
);
1568 atomic_store_relaxed(var
, tmp
);
1572 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
1574 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1575 struct dp_netdev_pmd_thread
*pmd
;
1576 uint64_t pmd_stats
[PMD_N_STATS
];
1578 stats
->n_flows
= stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
1579 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1580 stats
->n_flows
+= cmap_count(&pmd
->flow_table
);
1581 pmd_perf_read_counters(&pmd
->perf_stats
, pmd_stats
);
1582 stats
->n_hit
+= pmd_stats
[PMD_STAT_EXACT_HIT
];
1583 stats
->n_hit
+= pmd_stats
[PMD_STAT_MASKED_HIT
];
1584 stats
->n_missed
+= pmd_stats
[PMD_STAT_MISS
];
1585 stats
->n_lost
+= pmd_stats
[PMD_STAT_LOST
];
1587 stats
->n_masks
= UINT32_MAX
;
1588 stats
->n_mask_hit
= UINT64_MAX
;
1594 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
1596 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1597 ovs_mutex_lock(&pmd
->dp
->non_pmd_mutex
);
1598 ovs_mutex_lock(&pmd
->port_mutex
);
1599 pmd_load_cached_ports(pmd
);
1600 ovs_mutex_unlock(&pmd
->port_mutex
);
1601 ovs_mutex_unlock(&pmd
->dp
->non_pmd_mutex
);
1605 ovs_mutex_lock(&pmd
->cond_mutex
);
1606 seq_change(pmd
->reload_seq
);
1607 atomic_store_relaxed(&pmd
->reload
, true);
1608 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
1609 ovs_mutex_unlock(&pmd
->cond_mutex
);
1613 hash_port_no(odp_port_t port_no
)
1615 return hash_int(odp_to_u32(port_no
), 0);
1619 port_create(const char *devname
, const char *type
,
1620 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1622 struct netdev_saved_flags
*sf
;
1623 struct dp_netdev_port
*port
;
1624 enum netdev_flags flags
;
1625 struct netdev
*netdev
;
1630 /* Open and validate network device. */
1631 error
= netdev_open(devname
, type
, &netdev
);
1635 /* XXX reject non-Ethernet devices */
1637 netdev_get_flags(netdev
, &flags
);
1638 if (flags
& NETDEV_LOOPBACK
) {
1639 VLOG_ERR("%s: cannot add a loopback device", devname
);
1644 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
1646 VLOG_ERR("%s: cannot set promisc flag", devname
);
1650 port
= xzalloc(sizeof *port
);
1651 port
->port_no
= port_no
;
1652 port
->netdev
= netdev
;
1653 port
->type
= xstrdup(type
);
1655 port
->need_reconfigure
= true;
1656 ovs_mutex_init(&port
->txq_used_mutex
);
1663 netdev_close(netdev
);
1668 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
1670 OVS_REQUIRES(dp
->port_mutex
)
1672 struct dp_netdev_port
*port
;
1675 /* Reject devices already in 'dp'. */
1676 if (!get_port_by_name(dp
, devname
, &port
)) {
1680 error
= port_create(devname
, type
, port_no
, &port
);
1685 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
1686 seq_change(dp
->port_seq
);
1688 reconfigure_datapath(dp
);
1694 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
1695 odp_port_t
*port_nop
)
1697 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1698 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1699 const char *dpif_port
;
1703 ovs_mutex_lock(&dp
->port_mutex
);
1704 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1705 if (*port_nop
!= ODPP_NONE
) {
1706 port_no
= *port_nop
;
1707 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
1709 port_no
= choose_port(dp
, dpif_port
);
1710 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
1713 *port_nop
= port_no
;
1714 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
1716 ovs_mutex_unlock(&dp
->port_mutex
);
1722 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
1724 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1727 ovs_mutex_lock(&dp
->port_mutex
);
1728 if (port_no
== ODPP_LOCAL
) {
1731 struct dp_netdev_port
*port
;
1733 error
= get_port_by_number(dp
, port_no
, &port
);
1735 do_del_port(dp
, port
);
1738 ovs_mutex_unlock(&dp
->port_mutex
);
1744 is_valid_port_number(odp_port_t port_no
)
1746 return port_no
!= ODPP_NONE
;
1749 static struct dp_netdev_port
*
1750 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
1751 OVS_REQUIRES(dp
->port_mutex
)
1753 struct dp_netdev_port
*port
;
1755 HMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
1756 if (port
->port_no
== port_no
) {
1764 get_port_by_number(struct dp_netdev
*dp
,
1765 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1766 OVS_REQUIRES(dp
->port_mutex
)
1768 if (!is_valid_port_number(port_no
)) {
1772 *portp
= dp_netdev_lookup_port(dp
, port_no
);
1773 return *portp
? 0 : ENODEV
;
1778 port_destroy(struct dp_netdev_port
*port
)
1784 netdev_close(port
->netdev
);
1785 netdev_restore_flags(port
->sf
);
1787 for (unsigned i
= 0; i
< port
->n_rxq
; i
++) {
1788 netdev_rxq_close(port
->rxqs
[i
].rx
);
1790 ovs_mutex_destroy(&port
->txq_used_mutex
);
1791 free(port
->rxq_affinity_list
);
1792 free(port
->txq_used
);
1799 get_port_by_name(struct dp_netdev
*dp
,
1800 const char *devname
, struct dp_netdev_port
**portp
)
1801 OVS_REQUIRES(dp
->port_mutex
)
1803 struct dp_netdev_port
*port
;
1805 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1806 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1812 /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non
1817 /* Returns 'true' if there is a port with pmd netdev. */
1819 has_pmd_port(struct dp_netdev
*dp
)
1820 OVS_REQUIRES(dp
->port_mutex
)
1822 struct dp_netdev_port
*port
;
1824 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1825 if (netdev_is_pmd(port
->netdev
)) {
1834 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1835 OVS_REQUIRES(dp
->port_mutex
)
1837 hmap_remove(&dp
->ports
, &port
->node
);
1838 seq_change(dp
->port_seq
);
1840 reconfigure_datapath(dp
);
1846 answer_port_query(const struct dp_netdev_port
*port
,
1847 struct dpif_port
*dpif_port
)
1849 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1850 dpif_port
->type
= xstrdup(port
->type
);
1851 dpif_port
->port_no
= port
->port_no
;
1855 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1856 struct dpif_port
*dpif_port
)
1858 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1859 struct dp_netdev_port
*port
;
1862 ovs_mutex_lock(&dp
->port_mutex
);
1863 error
= get_port_by_number(dp
, port_no
, &port
);
1864 if (!error
&& dpif_port
) {
1865 answer_port_query(port
, dpif_port
);
1867 ovs_mutex_unlock(&dp
->port_mutex
);
1873 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1874 struct dpif_port
*dpif_port
)
1876 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1877 struct dp_netdev_port
*port
;
1880 ovs_mutex_lock(&dp
->port_mutex
);
1881 error
= get_port_by_name(dp
, devname
, &port
);
1882 if (!error
&& dpif_port
) {
1883 answer_port_query(port
, dpif_port
);
1885 ovs_mutex_unlock(&dp
->port_mutex
);
1891 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1893 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1897 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1899 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1900 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1905 dp_netdev_flow_hash(const ovs_u128
*ufid
)
1907 return ufid
->u32
[0];
1910 static inline struct dpcls
*
1911 dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread
*pmd
,
1915 uint32_t hash
= hash_port_no(in_port
);
1916 CMAP_FOR_EACH_WITH_HASH (cls
, node
, hash
, &pmd
->classifiers
) {
1917 if (cls
->in_port
== in_port
) {
1918 /* Port classifier exists already */
1925 static inline struct dpcls
*
1926 dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread
*pmd
,
1928 OVS_REQUIRES(pmd
->flow_mutex
)
1930 struct dpcls
*cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
1931 uint32_t hash
= hash_port_no(in_port
);
1934 /* Create new classifier for in_port */
1935 cls
= xmalloc(sizeof(*cls
));
1937 cls
->in_port
= in_port
;
1938 cmap_insert(&pmd
->classifiers
, &cls
->node
, hash
);
1939 VLOG_DBG("Creating dpcls %p for in_port %d", cls
, in_port
);
1945 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread
*pmd
,
1946 struct dp_netdev_flow
*flow
)
1947 OVS_REQUIRES(pmd
->flow_mutex
)
1949 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1951 odp_port_t in_port
= flow
->flow
.in_port
.odp_port
;
1953 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
1954 ovs_assert(cls
!= NULL
);
1955 dpcls_remove(cls
, &flow
->cr
);
1956 cmap_remove(&pmd
->flow_table
, node
, dp_netdev_flow_hash(&flow
->ufid
));
1959 dp_netdev_flow_unref(flow
);
1963 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
)
1965 struct dp_netdev_flow
*netdev_flow
;
1967 ovs_mutex_lock(&pmd
->flow_mutex
);
1968 CMAP_FOR_EACH (netdev_flow
, node
, &pmd
->flow_table
) {
1969 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
1971 ovs_mutex_unlock(&pmd
->flow_mutex
);
1975 dpif_netdev_flow_flush(struct dpif
*dpif
)
1977 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1978 struct dp_netdev_pmd_thread
*pmd
;
1980 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1981 dp_netdev_pmd_flow_flush(pmd
);
1987 struct dp_netdev_port_state
{
1988 struct hmap_position position
;
1993 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1995 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
2000 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
2001 struct dpif_port
*dpif_port
)
2003 struct dp_netdev_port_state
*state
= state_
;
2004 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2005 struct hmap_node
*node
;
2008 ovs_mutex_lock(&dp
->port_mutex
);
2009 node
= hmap_at_position(&dp
->ports
, &state
->position
);
2011 struct dp_netdev_port
*port
;
2013 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
2016 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
2017 dpif_port
->name
= state
->name
;
2018 dpif_port
->type
= port
->type
;
2019 dpif_port
->port_no
= port
->port_no
;
2025 ovs_mutex_unlock(&dp
->port_mutex
);
2031 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
2033 struct dp_netdev_port_state
*state
= state_
;
2040 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
2042 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
2043 uint64_t new_port_seq
;
2046 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
2047 if (dpif
->last_port_seq
!= new_port_seq
) {
2048 dpif
->last_port_seq
= new_port_seq
;
2058 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
2060 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
2062 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
2065 static struct dp_netdev_flow
*
2066 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
2068 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
2071 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
2073 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
2076 /* netdev_flow_key utilities.
2078 * netdev_flow_key is basically a miniflow. We use these functions
2079 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
2080 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
2082 * - Since we are dealing exclusively with miniflows created by
2083 * miniflow_extract(), if the map is different the miniflow is different.
2084 * Therefore we can be faster by comparing the map and the miniflow in a
2086 * - These functions can be inlined by the compiler. */
2088 /* Given the number of bits set in miniflow's maps, returns the size of the
2089 * 'netdev_flow_key.mf' */
2090 static inline size_t
2091 netdev_flow_key_size(size_t flow_u64s
)
2093 return sizeof(struct miniflow
) + MINIFLOW_VALUES_SIZE(flow_u64s
);
2097 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
2098 const struct netdev_flow_key
*b
)
2100 /* 'b->len' may be not set yet. */
2101 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
2104 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
2105 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
2106 * generated by miniflow_extract. */
2108 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
2109 const struct miniflow
*mf
)
2111 return !memcmp(&key
->mf
, mf
, key
->len
);
2115 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
2116 const struct netdev_flow_key
*src
)
2119 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
2122 /* Initialize a netdev_flow_key 'mask' from 'match'. */
2124 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
2125 const struct match
*match
)
2127 uint64_t *dst
= miniflow_values(&mask
->mf
);
2128 struct flowmap fmap
;
2132 /* Only check masks that make sense for the flow. */
2133 flow_wc_map(&match
->flow
, &fmap
);
2134 flowmap_init(&mask
->mf
.map
);
2136 FLOWMAP_FOR_EACH_INDEX(idx
, fmap
) {
2137 uint64_t mask_u64
= flow_u64_value(&match
->wc
.masks
, idx
);
2140 flowmap_set(&mask
->mf
.map
, idx
, 1);
2142 hash
= hash_add64(hash
, mask_u64
);
2148 FLOWMAP_FOR_EACH_MAP (map
, mask
->mf
.map
) {
2149 hash
= hash_add64(hash
, map
);
2152 size_t n
= dst
- miniflow_get_values(&mask
->mf
);
2154 mask
->hash
= hash_finish(hash
, n
* 8);
2155 mask
->len
= netdev_flow_key_size(n
);
2158 /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
2160 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
2161 const struct flow
*flow
,
2162 const struct netdev_flow_key
*mask
)
2164 uint64_t *dst_u64
= miniflow_values(&dst
->mf
);
2165 const uint64_t *mask_u64
= miniflow_get_values(&mask
->mf
);
2169 dst
->len
= mask
->len
;
2170 dst
->mf
= mask
->mf
; /* Copy maps. */
2172 FLOW_FOR_EACH_IN_MAPS(value
, flow
, mask
->mf
.map
) {
2173 *dst_u64
= value
& *mask_u64
++;
2174 hash
= hash_add64(hash
, *dst_u64
++);
2176 dst
->hash
= hash_finish(hash
,
2177 (dst_u64
- miniflow_get_values(&dst
->mf
)) * 8);
2180 /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
2181 #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
2182 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
2184 /* Returns a hash value for the bits of 'key' where there are 1-bits in
2186 static inline uint32_t
2187 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
2188 const struct netdev_flow_key
*mask
)
2190 const uint64_t *p
= miniflow_get_values(&mask
->mf
);
2194 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, key
, mask
->mf
.map
) {
2195 hash
= hash_add64(hash
, value
& *p
++);
2198 return hash_finish(hash
, (p
- miniflow_get_values(&mask
->mf
)) * 8);
2202 emc_entry_alive(struct emc_entry
*ce
)
2204 return ce
->flow
&& !ce
->flow
->dead
;
2208 emc_clear_entry(struct emc_entry
*ce
)
2211 dp_netdev_flow_unref(ce
->flow
);
2217 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
2218 const struct netdev_flow_key
*key
)
2220 if (ce
->flow
!= flow
) {
2222 dp_netdev_flow_unref(ce
->flow
);
2225 if (dp_netdev_flow_ref(flow
)) {
2232 netdev_flow_key_clone(&ce
->key
, key
);
2237 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
2238 struct dp_netdev_flow
*flow
)
2240 struct emc_entry
*to_be_replaced
= NULL
;
2241 struct emc_entry
*current_entry
;
2243 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
2244 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
2245 /* We found the entry with the 'mf' miniflow */
2246 emc_change_entry(current_entry
, flow
, NULL
);
2250 /* Replacement policy: put the flow in an empty (not alive) entry, or
2251 * in the first entry where it can be */
2253 || (emc_entry_alive(to_be_replaced
)
2254 && !emc_entry_alive(current_entry
))
2255 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
2256 to_be_replaced
= current_entry
;
2259 /* We didn't find the miniflow in the cache.
2260 * The 'to_be_replaced' entry is where the new flow will be stored */
2262 emc_change_entry(to_be_replaced
, flow
, key
);
2266 emc_probabilistic_insert(struct dp_netdev_pmd_thread
*pmd
,
2267 const struct netdev_flow_key
*key
,
2268 struct dp_netdev_flow
*flow
)
2270 /* Insert an entry into the EMC based on probability value 'min'. By
2271 * default the value is UINT32_MAX / 100 which yields an insertion
2272 * probability of 1/100 ie. 1% */
2275 atomic_read_relaxed(&pmd
->dp
->emc_insert_min
, &min
);
2277 if (min
&& random_uint32() <= min
) {
2278 emc_insert(&pmd
->flow_cache
, key
, flow
);
2282 static inline struct dp_netdev_flow
*
2283 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
2285 struct emc_entry
*current_entry
;
2287 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
2288 if (current_entry
->key
.hash
== key
->hash
2289 && emc_entry_alive(current_entry
)
2290 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
2292 /* We found the entry with the 'key->mf' miniflow */
2293 return current_entry
->flow
;
2300 static struct dp_netdev_flow
*
2301 dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread
*pmd
,
2302 const struct netdev_flow_key
*key
,
2306 struct dpcls_rule
*rule
;
2307 odp_port_t in_port
= u32_to_odp(MINIFLOW_GET_U32(&key
->mf
,
2309 struct dp_netdev_flow
*netdev_flow
= NULL
;
2311 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
2312 if (OVS_LIKELY(cls
)) {
2313 dpcls_lookup(cls
, key
, &rule
, 1, lookup_num_p
);
2314 netdev_flow
= dp_netdev_flow_cast(rule
);
2319 static struct dp_netdev_flow
*
2320 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread
*pmd
,
2321 const ovs_u128
*ufidp
, const struct nlattr
*key
,
2324 struct dp_netdev_flow
*netdev_flow
;
2328 /* If a UFID is not provided, determine one based on the key. */
2329 if (!ufidp
&& key
&& key_len
2330 && !dpif_netdev_flow_from_nlattrs(key
, key_len
, &flow
, false)) {
2331 dpif_flow_hash(pmd
->dp
->dpif
, &flow
, sizeof flow
, &ufid
);
2336 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, dp_netdev_flow_hash(ufidp
),
2338 if (ovs_u128_equals(netdev_flow
->ufid
, *ufidp
)) {
2348 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_
,
2349 struct dpif_flow_stats
*stats
)
2351 struct dp_netdev_flow
*netdev_flow
;
2352 unsigned long long n
;
2356 netdev_flow
= CONST_CAST(struct dp_netdev_flow
*, netdev_flow_
);
2358 atomic_read_relaxed(&netdev_flow
->stats
.packet_count
, &n
);
2359 stats
->n_packets
= n
;
2360 atomic_read_relaxed(&netdev_flow
->stats
.byte_count
, &n
);
2362 atomic_read_relaxed(&netdev_flow
->stats
.used
, &used
);
2364 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
2365 stats
->tcp_flags
= flags
;
2368 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
2369 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
2370 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
2373 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
2374 struct ofpbuf
*key_buf
, struct ofpbuf
*mask_buf
,
2375 struct dpif_flow
*flow
, bool terse
)
2378 memset(flow
, 0, sizeof *flow
);
2380 struct flow_wildcards wc
;
2381 struct dp_netdev_actions
*actions
;
2383 struct odp_flow_key_parms odp_parms
= {
2384 .flow
= &netdev_flow
->flow
,
2386 .support
= dp_netdev_support
,
2389 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
2390 /* in_port is exact matched, but we have left it out from the mask for
2391 * optimnization reasons. Add in_port back to the mask. */
2392 wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
2395 offset
= key_buf
->size
;
2396 flow
->key
= ofpbuf_tail(key_buf
);
2397 odp_flow_key_from_flow(&odp_parms
, key_buf
);
2398 flow
->key_len
= key_buf
->size
- offset
;
2401 offset
= mask_buf
->size
;
2402 flow
->mask
= ofpbuf_tail(mask_buf
);
2403 odp_parms
.key_buf
= key_buf
;
2404 odp_flow_key_from_mask(&odp_parms
, mask_buf
);
2405 flow
->mask_len
= mask_buf
->size
- offset
;
2408 actions
= dp_netdev_flow_get_actions(netdev_flow
);
2409 flow
->actions
= actions
->actions
;
2410 flow
->actions_len
= actions
->size
;
2413 flow
->ufid
= netdev_flow
->ufid
;
2414 flow
->ufid_present
= true;
2415 flow
->pmd_id
= netdev_flow
->pmd_id
;
2416 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
2420 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
2421 const struct nlattr
*mask_key
,
2422 uint32_t mask_key_len
, const struct flow
*flow
,
2423 struct flow_wildcards
*wc
, bool probe
)
2425 enum odp_key_fitness fitness
;
2427 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, wc
, flow
);
2430 /* This should not happen: it indicates that
2431 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
2432 * disagree on the acceptable form of a mask. Log the problem
2433 * as an error, with enough details to enable debugging. */
2434 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2436 if (!VLOG_DROP_ERR(&rl
)) {
2440 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
2442 VLOG_ERR("internal error parsing flow mask %s (%s)",
2443 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
2455 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
2456 struct flow
*flow
, bool probe
)
2458 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
2460 /* This should not happen: it indicates that
2461 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
2462 * the acceptable form of a flow. Log the problem as an error,
2463 * with enough details to enable debugging. */
2464 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2466 if (!VLOG_DROP_ERR(&rl
)) {
2470 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
2471 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
2479 if (flow
->ct_state
& DP_NETDEV_CS_UNSUPPORTED_MASK
) {
2487 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
2489 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2490 struct dp_netdev_flow
*netdev_flow
;
2491 struct dp_netdev_pmd_thread
*pmd
;
2492 struct hmapx to_find
= HMAPX_INITIALIZER(&to_find
);
2493 struct hmapx_node
*node
;
2496 if (get
->pmd_id
== PMD_ID_NULL
) {
2497 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2498 if (dp_netdev_pmd_try_ref(pmd
) && !hmapx_add(&to_find
, pmd
)) {
2499 dp_netdev_pmd_unref(pmd
);
2503 pmd
= dp_netdev_get_pmd(dp
, get
->pmd_id
);
2507 hmapx_add(&to_find
, pmd
);
2510 if (!hmapx_count(&to_find
)) {
2514 HMAPX_FOR_EACH (node
, &to_find
) {
2515 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
2516 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, get
->ufid
, get
->key
,
2519 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->buffer
,
2528 HMAPX_FOR_EACH (node
, &to_find
) {
2529 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
2530 dp_netdev_pmd_unref(pmd
);
2533 hmapx_destroy(&to_find
);
2537 static struct dp_netdev_flow
*
2538 dp_netdev_flow_add(struct dp_netdev_pmd_thread
*pmd
,
2539 struct match
*match
, const ovs_u128
*ufid
,
2540 const struct nlattr
*actions
, size_t actions_len
)
2541 OVS_REQUIRES(pmd
->flow_mutex
)
2543 struct dp_netdev_flow
*flow
;
2544 struct netdev_flow_key mask
;
2547 /* Make sure in_port is exact matched before we read it. */
2548 ovs_assert(match
->wc
.masks
.in_port
.odp_port
== ODPP_NONE
);
2549 odp_port_t in_port
= match
->flow
.in_port
.odp_port
;
2551 /* As we select the dpcls based on the port number, each netdev flow
2552 * belonging to the same dpcls will have the same odp_port value.
2553 * For performance reasons we wildcard odp_port here in the mask. In the
2554 * typical case dp_hash is also wildcarded, and the resulting 8-byte
2555 * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and
2556 * will not be part of the subtable mask.
2557 * This will speed up the hash computation during dpcls_lookup() because
2558 * there is one less call to hash_add64() in this case. */
2559 match
->wc
.masks
.in_port
.odp_port
= 0;
2560 netdev_flow_mask_init(&mask
, match
);
2561 match
->wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
2563 /* Make sure wc does not have metadata. */
2564 ovs_assert(!FLOWMAP_HAS_FIELD(&mask
.mf
.map
, metadata
)
2565 && !FLOWMAP_HAS_FIELD(&mask
.mf
.map
, regs
));
2567 /* Do not allocate extra space. */
2568 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
2569 memset(&flow
->stats
, 0, sizeof flow
->stats
);
2572 *CONST_CAST(unsigned *, &flow
->pmd_id
) = pmd
->core_id
;
2573 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
2574 *CONST_CAST(ovs_u128
*, &flow
->ufid
) = *ufid
;
2575 ovs_refcount_init(&flow
->ref_cnt
);
2576 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
2578 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
2580 /* Select dpcls for in_port. Relies on in_port to be exact match. */
2581 cls
= dp_netdev_pmd_find_dpcls(pmd
, in_port
);
2582 dpcls_insert(cls
, &flow
->cr
, &mask
);
2584 cmap_insert(&pmd
->flow_table
, CONST_CAST(struct cmap_node
*, &flow
->node
),
2585 dp_netdev_flow_hash(&flow
->ufid
));
2587 if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl
)))) {
2588 struct ds ds
= DS_EMPTY_INITIALIZER
;
2589 struct ofpbuf key_buf
, mask_buf
;
2590 struct odp_flow_key_parms odp_parms
= {
2591 .flow
= &match
->flow
,
2592 .mask
= &match
->wc
.masks
,
2593 .support
= dp_netdev_support
,
2596 ofpbuf_init(&key_buf
, 0);
2597 ofpbuf_init(&mask_buf
, 0);
2599 odp_flow_key_from_flow(&odp_parms
, &key_buf
);
2600 odp_parms
.key_buf
= &key_buf
;
2601 odp_flow_key_from_mask(&odp_parms
, &mask_buf
);
2603 ds_put_cstr(&ds
, "flow_add: ");
2604 odp_format_ufid(ufid
, &ds
);
2605 ds_put_cstr(&ds
, " ");
2606 odp_flow_format(key_buf
.data
, key_buf
.size
,
2607 mask_buf
.data
, mask_buf
.size
,
2609 ds_put_cstr(&ds
, ", actions:");
2610 format_odp_actions(&ds
, actions
, actions_len
, NULL
);
2612 VLOG_DBG("%s", ds_cstr(&ds
));
2614 ofpbuf_uninit(&key_buf
);
2615 ofpbuf_uninit(&mask_buf
);
2617 /* Add a printout of the actual match installed. */
2620 ds_put_cstr(&ds
, "flow match: ");
2621 miniflow_expand(&flow
->cr
.flow
.mf
, &m
.flow
);
2622 miniflow_expand(&flow
->cr
.mask
->mf
, &m
.wc
.masks
);
2623 memset(&m
.tun_md
, 0, sizeof m
.tun_md
);
2624 match_format(&m
, NULL
, &ds
, OFP_DEFAULT_PRIORITY
);
2626 VLOG_DBG("%s", ds_cstr(&ds
));
2635 flow_put_on_pmd(struct dp_netdev_pmd_thread
*pmd
,
2636 struct netdev_flow_key
*key
,
2637 struct match
*match
,
2639 const struct dpif_flow_put
*put
,
2640 struct dpif_flow_stats
*stats
)
2642 struct dp_netdev_flow
*netdev_flow
;
2646 memset(stats
, 0, sizeof *stats
);
2649 ovs_mutex_lock(&pmd
->flow_mutex
);
2650 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
, NULL
);
2652 if (put
->flags
& DPIF_FP_CREATE
) {
2653 if (cmap_count(&pmd
->flow_table
) < MAX_FLOWS
) {
2654 dp_netdev_flow_add(pmd
, match
, ufid
, put
->actions
,
2664 if (put
->flags
& DPIF_FP_MODIFY
) {
2665 struct dp_netdev_actions
*new_actions
;
2666 struct dp_netdev_actions
*old_actions
;
2668 new_actions
= dp_netdev_actions_create(put
->actions
,
2671 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
2672 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
2675 get_dpif_flow_stats(netdev_flow
, stats
);
2677 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
2678 /* XXX: The userspace datapath uses thread local statistics
2679 * (for flows), which should be updated only by the owning
2680 * thread. Since we cannot write on stats memory here,
2681 * we choose not to support this flag. Please note:
2682 * - This feature is currently used only by dpctl commands with
2684 * - Should the need arise, this operation can be implemented
2685 * by keeping a base value (to be update here) for each
2686 * counter, and subtracting it before outputting the stats */
2690 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
2691 } else if (put
->flags
& DPIF_FP_CREATE
) {
2694 /* Overlapping flow. */
2698 ovs_mutex_unlock(&pmd
->flow_mutex
);
2703 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
2705 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2706 struct netdev_flow_key key
, mask
;
2707 struct dp_netdev_pmd_thread
*pmd
;
2711 bool probe
= put
->flags
& DPIF_FP_PROBE
;
2714 memset(put
->stats
, 0, sizeof *put
->stats
);
2716 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
,
2721 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
2722 put
->mask
, put
->mask_len
,
2723 &match
.flow
, &match
.wc
, probe
);
2731 dpif_flow_hash(dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
2734 /* Must produce a netdev_flow_key for lookup.
2735 * Use the same method as employed to create the key when adding
2736 * the flow to the dplcs to make sure they match. */
2737 netdev_flow_mask_init(&mask
, &match
);
2738 netdev_flow_key_init_masked(&key
, &match
.flow
, &mask
);
2740 if (put
->pmd_id
== PMD_ID_NULL
) {
2741 if (cmap_count(&dp
->poll_threads
) == 0) {
2744 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2745 struct dpif_flow_stats pmd_stats
;
2748 pmd_error
= flow_put_on_pmd(pmd
, &key
, &match
, &ufid
, put
,
2752 } else if (put
->stats
) {
2753 put
->stats
->n_packets
+= pmd_stats
.n_packets
;
2754 put
->stats
->n_bytes
+= pmd_stats
.n_bytes
;
2755 put
->stats
->used
= MAX(put
->stats
->used
, pmd_stats
.used
);
2756 put
->stats
->tcp_flags
|= pmd_stats
.tcp_flags
;
2760 pmd
= dp_netdev_get_pmd(dp
, put
->pmd_id
);
2764 error
= flow_put_on_pmd(pmd
, &key
, &match
, &ufid
, put
, put
->stats
);
2765 dp_netdev_pmd_unref(pmd
);
2772 flow_del_on_pmd(struct dp_netdev_pmd_thread
*pmd
,
2773 struct dpif_flow_stats
*stats
,
2774 const struct dpif_flow_del
*del
)
2776 struct dp_netdev_flow
*netdev_flow
;
2779 ovs_mutex_lock(&pmd
->flow_mutex
);
2780 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, del
->ufid
, del
->key
,
2784 get_dpif_flow_stats(netdev_flow
, stats
);
2786 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
2790 ovs_mutex_unlock(&pmd
->flow_mutex
);
2796 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
2798 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2799 struct dp_netdev_pmd_thread
*pmd
;
2803 memset(del
->stats
, 0, sizeof *del
->stats
);
2806 if (del
->pmd_id
== PMD_ID_NULL
) {
2807 if (cmap_count(&dp
->poll_threads
) == 0) {
2810 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2811 struct dpif_flow_stats pmd_stats
;
2814 pmd_error
= flow_del_on_pmd(pmd
, &pmd_stats
, del
);
2817 } else if (del
->stats
) {
2818 del
->stats
->n_packets
+= pmd_stats
.n_packets
;
2819 del
->stats
->n_bytes
+= pmd_stats
.n_bytes
;
2820 del
->stats
->used
= MAX(del
->stats
->used
, pmd_stats
.used
);
2821 del
->stats
->tcp_flags
|= pmd_stats
.tcp_flags
;
2825 pmd
= dp_netdev_get_pmd(dp
, del
->pmd_id
);
2829 error
= flow_del_on_pmd(pmd
, del
->stats
, del
);
2830 dp_netdev_pmd_unref(pmd
);
2837 struct dpif_netdev_flow_dump
{
2838 struct dpif_flow_dump up
;
2839 struct cmap_position poll_thread_pos
;
2840 struct cmap_position flow_pos
;
2841 struct dp_netdev_pmd_thread
*cur_pmd
;
2843 struct ovs_mutex mutex
;
2846 static struct dpif_netdev_flow_dump
*
2847 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
2849 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
2852 static struct dpif_flow_dump
*
2853 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
, bool terse
,
2854 char *type OVS_UNUSED
)
2856 struct dpif_netdev_flow_dump
*dump
;
2858 dump
= xzalloc(sizeof *dump
);
2859 dpif_flow_dump_init(&dump
->up
, dpif_
);
2860 dump
->up
.terse
= terse
;
2861 ovs_mutex_init(&dump
->mutex
);
2867 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
2869 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2871 ovs_mutex_destroy(&dump
->mutex
);
2876 struct dpif_netdev_flow_dump_thread
{
2877 struct dpif_flow_dump_thread up
;
2878 struct dpif_netdev_flow_dump
*dump
;
2879 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
2880 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
2883 static struct dpif_netdev_flow_dump_thread
*
2884 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
2886 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
2889 static struct dpif_flow_dump_thread
*
2890 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
2892 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2893 struct dpif_netdev_flow_dump_thread
*thread
;
2895 thread
= xmalloc(sizeof *thread
);
2896 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
2897 thread
->dump
= dump
;
2902 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
2904 struct dpif_netdev_flow_dump_thread
*thread
2905 = dpif_netdev_flow_dump_thread_cast(thread_
);
2911 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
2912 struct dpif_flow
*flows
, int max_flows
)
2914 struct dpif_netdev_flow_dump_thread
*thread
2915 = dpif_netdev_flow_dump_thread_cast(thread_
);
2916 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
2917 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
2921 ovs_mutex_lock(&dump
->mutex
);
2922 if (!dump
->status
) {
2923 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
2924 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
2925 struct dp_netdev_pmd_thread
*pmd
= dump
->cur_pmd
;
2926 int flow_limit
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
2928 /* First call to dump_next(), extracts the first pmd thread.
2929 * If there is no pmd thread, returns immediately. */
2931 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2933 ovs_mutex_unlock(&dump
->mutex
);
2940 for (n_flows
= 0; n_flows
< flow_limit
; n_flows
++) {
2941 struct cmap_node
*node
;
2943 node
= cmap_next_position(&pmd
->flow_table
, &dump
->flow_pos
);
2947 netdev_flows
[n_flows
] = CONTAINER_OF(node
,
2948 struct dp_netdev_flow
,
2951 /* When finishing dumping the current pmd thread, moves to
2953 if (n_flows
< flow_limit
) {
2954 memset(&dump
->flow_pos
, 0, sizeof dump
->flow_pos
);
2955 dp_netdev_pmd_unref(pmd
);
2956 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2962 /* Keeps the reference to next caller. */
2963 dump
->cur_pmd
= pmd
;
2965 /* If the current dump is empty, do not exit the loop, since the
2966 * remaining pmds could have flows to be dumped. Just dumps again
2967 * on the new 'pmd'. */
2970 ovs_mutex_unlock(&dump
->mutex
);
2972 for (i
= 0; i
< n_flows
; i
++) {
2973 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
2974 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
2975 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
2976 struct dpif_flow
*f
= &flows
[i
];
2977 struct ofpbuf key
, mask
;
2979 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
2980 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
2981 dp_netdev_flow_to_dpif_flow(netdev_flow
, &key
, &mask
, f
,
2989 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
2990 OVS_NO_THREAD_SAFETY_ANALYSIS
2992 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2993 struct dp_netdev_pmd_thread
*pmd
;
2994 struct dp_packet_batch pp
;
2996 if (dp_packet_size(execute
->packet
) < ETH_HEADER_LEN
||
2997 dp_packet_size(execute
->packet
) > UINT16_MAX
) {
3001 /* Tries finding the 'pmd'. If NULL is returned, that means
3002 * the current thread is a non-pmd thread and should use
3003 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
3004 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
3006 pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
3012 if (execute
->probe
) {
3013 /* If this is part of a probe, Drop the packet, since executing
3014 * the action may actually cause spurious packets be sent into
3016 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3017 dp_netdev_pmd_unref(pmd
);
3022 /* If the current thread is non-pmd thread, acquires
3023 * the 'non_pmd_mutex'. */
3024 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3025 ovs_mutex_lock(&dp
->non_pmd_mutex
);
3028 /* Update current time in PMD context. */
3029 pmd_thread_ctx_time_update(pmd
);
3031 /* The action processing expects the RSS hash to be valid, because
3032 * it's always initialized at the beginning of datapath processing.
3033 * In this case, though, 'execute->packet' may not have gone through
3034 * the datapath at all, it may have been generated by the upper layer
3035 * (OpenFlow packet-out, BFD frame, ...). */
3036 if (!dp_packet_rss_valid(execute
->packet
)) {
3037 dp_packet_set_rss_hash(execute
->packet
,
3038 flow_hash_5tuple(execute
->flow
, 0));
3041 dp_packet_batch_init_packet(&pp
, execute
->packet
);
3042 dp_netdev_execute_actions(pmd
, &pp
, false, execute
->flow
,
3043 execute
->actions
, execute
->actions_len
);
3044 dp_netdev_pmd_flush_output_packets(pmd
, true);
3046 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3047 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
3048 dp_netdev_pmd_unref(pmd
);
3055 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
3059 for (i
= 0; i
< n_ops
; i
++) {
3060 struct dpif_op
*op
= ops
[i
];
3063 case DPIF_OP_FLOW_PUT
:
3064 op
->error
= dpif_netdev_flow_put(dpif
, &op
->flow_put
);
3067 case DPIF_OP_FLOW_DEL
:
3068 op
->error
= dpif_netdev_flow_del(dpif
, &op
->flow_del
);
3071 case DPIF_OP_EXECUTE
:
3072 op
->error
= dpif_netdev_execute(dpif
, &op
->execute
);
3075 case DPIF_OP_FLOW_GET
:
3076 op
->error
= dpif_netdev_flow_get(dpif
, &op
->flow_get
);
3082 /* Applies datapath configuration from the database. Some of the changes are
3083 * actually applied in dpif_netdev_run(). */
3085 dpif_netdev_set_config(struct dpif
*dpif
, const struct smap
*other_config
)
3087 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3088 const char *cmask
= smap_get(other_config
, "pmd-cpu-mask");
3089 unsigned long long insert_prob
=
3090 smap_get_ullong(other_config
, "emc-insert-inv-prob",
3091 DEFAULT_EM_FLOW_INSERT_INV_PROB
);
3092 uint32_t insert_min
, cur_min
;
3093 uint32_t tx_flush_interval
, cur_tx_flush_interval
;
3095 tx_flush_interval
= smap_get_int(other_config
, "tx-flush-interval",
3096 DEFAULT_TX_FLUSH_INTERVAL
);
3097 atomic_read_relaxed(&dp
->tx_flush_interval
, &cur_tx_flush_interval
);
3098 if (tx_flush_interval
!= cur_tx_flush_interval
) {
3099 atomic_store_relaxed(&dp
->tx_flush_interval
, tx_flush_interval
);
3100 VLOG_INFO("Flushing interval for tx queues set to %"PRIu32
" us",
3104 if (!nullable_string_is_equal(dp
->pmd_cmask
, cmask
)) {
3105 free(dp
->pmd_cmask
);
3106 dp
->pmd_cmask
= nullable_xstrdup(cmask
);
3107 dp_netdev_request_reconfigure(dp
);
3110 atomic_read_relaxed(&dp
->emc_insert_min
, &cur_min
);
3111 if (insert_prob
<= UINT32_MAX
) {
3112 insert_min
= insert_prob
== 0 ? 0 : UINT32_MAX
/ insert_prob
;
3114 insert_min
= DEFAULT_EM_FLOW_INSERT_MIN
;
3115 insert_prob
= DEFAULT_EM_FLOW_INSERT_INV_PROB
;
3118 if (insert_min
!= cur_min
) {
3119 atomic_store_relaxed(&dp
->emc_insert_min
, insert_min
);
3120 if (insert_min
== 0) {
3121 VLOG_INFO("EMC has been disabled");
3123 VLOG_INFO("EMC insertion probability changed to 1/%llu (~%.2f%%)",
3124 insert_prob
, (100 / (float)insert_prob
));
3128 bool perf_enabled
= smap_get_bool(other_config
, "pmd-perf-metrics", false);
3129 bool cur_perf_enabled
;
3130 atomic_read_relaxed(&dp
->pmd_perf_metrics
, &cur_perf_enabled
);
3131 if (perf_enabled
!= cur_perf_enabled
) {
3132 atomic_store_relaxed(&dp
->pmd_perf_metrics
, perf_enabled
);
3134 VLOG_INFO("PMD performance metrics collection enabled");
3136 VLOG_INFO("PMD performance metrics collection disabled");
3143 /* Parses affinity list and returns result in 'core_ids'. */
3145 parse_affinity_list(const char *affinity_list
, unsigned *core_ids
, int n_rxq
)
3148 char *list
, *copy
, *key
, *value
;
3151 for (i
= 0; i
< n_rxq
; i
++) {
3152 core_ids
[i
] = OVS_CORE_UNSPEC
;
3155 if (!affinity_list
) {
3159 list
= copy
= xstrdup(affinity_list
);
3161 while (ofputil_parse_key_value(&list
, &key
, &value
)) {
3162 int rxq_id
, core_id
;
3164 if (!str_to_int(key
, 0, &rxq_id
) || rxq_id
< 0
3165 || !str_to_int(value
, 0, &core_id
) || core_id
< 0) {
3170 if (rxq_id
< n_rxq
) {
3171 core_ids
[rxq_id
] = core_id
;
3179 /* Parses 'affinity_list' and applies configuration if it is valid. */
3181 dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port
*port
,
3182 const char *affinity_list
)
3184 unsigned *core_ids
, i
;
3187 core_ids
= xmalloc(port
->n_rxq
* sizeof *core_ids
);
3188 if (parse_affinity_list(affinity_list
, core_ids
, port
->n_rxq
)) {
3193 for (i
= 0; i
< port
->n_rxq
; i
++) {
3194 port
->rxqs
[i
].core_id
= core_ids
[i
];
3202 /* Changes the affinity of port's rx queues. The changes are actually applied
3203 * in dpif_netdev_run(). */
3205 dpif_netdev_port_set_config(struct dpif
*dpif
, odp_port_t port_no
,
3206 const struct smap
*cfg
)
3208 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3209 struct dp_netdev_port
*port
;
3211 const char *affinity_list
= smap_get(cfg
, "pmd-rxq-affinity");
3213 ovs_mutex_lock(&dp
->port_mutex
);
3214 error
= get_port_by_number(dp
, port_no
, &port
);
3215 if (error
|| !netdev_is_pmd(port
->netdev
)
3216 || nullable_string_is_equal(affinity_list
, port
->rxq_affinity_list
)) {
3220 error
= dpif_netdev_port_set_rxq_affinity(port
, affinity_list
);
3224 free(port
->rxq_affinity_list
);
3225 port
->rxq_affinity_list
= nullable_xstrdup(affinity_list
);
3227 dp_netdev_request_reconfigure(dp
);
3229 ovs_mutex_unlock(&dp
->port_mutex
);
3234 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
3235 uint32_t queue_id
, uint32_t *priority
)
3237 *priority
= queue_id
;
3242 /* Creates and returns a new 'struct dp_netdev_actions', whose actions are
3243 * a copy of the 'size' bytes of 'actions' input parameters. */
3244 struct dp_netdev_actions
*
3245 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
3247 struct dp_netdev_actions
*netdev_actions
;
3249 netdev_actions
= xmalloc(sizeof *netdev_actions
+ size
);
3250 memcpy(netdev_actions
->actions
, actions
, size
);
3251 netdev_actions
->size
= size
;
3253 return netdev_actions
;
3256 struct dp_netdev_actions
*
3257 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
3259 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
3263 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
3269 dp_netdev_rxq_set_cycles(struct dp_netdev_rxq
*rx
,
3270 enum rxq_cycles_counter_type type
,
3271 unsigned long long cycles
)
3273 atomic_store_relaxed(&rx
->cycles
[type
], cycles
);
3277 dp_netdev_rxq_add_cycles(struct dp_netdev_rxq
*rx
,
3278 enum rxq_cycles_counter_type type
,
3279 unsigned long long cycles
)
3281 non_atomic_ullong_add(&rx
->cycles
[type
], cycles
);
3285 dp_netdev_rxq_get_cycles(struct dp_netdev_rxq
*rx
,
3286 enum rxq_cycles_counter_type type
)
3288 unsigned long long processing_cycles
;
3289 atomic_read_relaxed(&rx
->cycles
[type
], &processing_cycles
);
3290 return processing_cycles
;
3294 dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq
*rx
,
3295 unsigned long long cycles
)
3297 unsigned int idx
= rx
->intrvl_idx
++ % PMD_RXQ_INTERVAL_MAX
;
3298 atomic_store_relaxed(&rx
->cycles_intrvl
[idx
], cycles
);
3302 dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq
*rx
, unsigned idx
)
3304 unsigned long long processing_cycles
;
3305 atomic_read_relaxed(&rx
->cycles_intrvl
[idx
], &processing_cycles
);
3306 return processing_cycles
;
3309 #if ATOMIC_ALWAYS_LOCK_FREE_8B
3311 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread
*pmd
)
3313 bool pmd_perf_enabled
;
3314 atomic_read_relaxed(&pmd
->dp
->pmd_perf_metrics
, &pmd_perf_enabled
);
3315 return pmd_perf_enabled
;
3318 /* If stores and reads of 64-bit integers are not atomic, the full PMD
3319 * performance metrics are not available as locked access to 64 bit
3320 * integers would be prohibitively expensive. */
3322 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread
*pmd OVS_UNUSED
)
3329 dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread
*pmd
,
3336 struct cycle_timer timer
;
3338 uint32_t tx_flush_interval
;
3340 cycle_timer_start(&pmd
->perf_stats
, &timer
);
3342 dynamic_txqs
= p
->port
->dynamic_txqs
;
3344 tx_qid
= dpif_netdev_xps_get_tx_qid(pmd
, p
);
3346 tx_qid
= pmd
->static_tx_qid
;
3349 output_cnt
= dp_packet_batch_size(&p
->output_pkts
);
3350 ovs_assert(output_cnt
> 0);
3352 netdev_send(p
->port
->netdev
, tx_qid
, &p
->output_pkts
, dynamic_txqs
);
3353 dp_packet_batch_init(&p
->output_pkts
);
3355 /* Update time of the next flush. */
3356 atomic_read_relaxed(&pmd
->dp
->tx_flush_interval
, &tx_flush_interval
);
3357 p
->flush_time
= pmd
->ctx
.now
+ tx_flush_interval
;
3359 ovs_assert(pmd
->n_output_batches
> 0);
3360 pmd
->n_output_batches
--;
3362 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_SENT_PKTS
, output_cnt
);
3363 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_SENT_BATCHES
, 1);
3365 /* Distribute send cycles evenly among transmitted packets and assign to
3366 * their respective rx queues. */
3367 cycles
= cycle_timer_stop(&pmd
->perf_stats
, &timer
) / output_cnt
;
3368 for (i
= 0; i
< output_cnt
; i
++) {
3369 if (p
->output_pkts_rxqs
[i
]) {
3370 dp_netdev_rxq_add_cycles(p
->output_pkts_rxqs
[i
],
3371 RXQ_CYCLES_PROC_CURR
, cycles
);
3379 dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread
*pmd
,
3385 if (!pmd
->n_output_batches
) {
3389 HMAP_FOR_EACH (p
, node
, &pmd
->send_port_cache
) {
3390 if (!dp_packet_batch_is_empty(&p
->output_pkts
)
3391 && (force
|| pmd
->ctx
.now
>= p
->flush_time
)) {
3392 output_cnt
+= dp_netdev_pmd_flush_output_on_port(pmd
, p
);
3399 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
3400 struct dp_netdev_rxq
*rxq
,
3403 struct pmd_perf_stats
*s
= &pmd
->perf_stats
;
3404 struct dp_packet_batch batch
;
3405 struct cycle_timer timer
;
3408 int rem_qlen
= 0, *qlen_p
= NULL
;
3411 /* Measure duration for polling and processing rx burst. */
3412 cycle_timer_start(&pmd
->perf_stats
, &timer
);
3414 pmd
->ctx
.last_rxq
= rxq
;
3415 dp_packet_batch_init(&batch
);
3417 /* Fetch the rx queue length only for vhostuser ports. */
3418 if (pmd_perf_metrics_enabled(pmd
) && rxq
->is_vhost
) {
3422 error
= netdev_rxq_recv(rxq
->rx
, &batch
, qlen_p
);
3424 /* At least one packet received. */
3425 *recirc_depth_get() = 0;
3426 pmd_thread_ctx_time_update(pmd
);
3427 batch_cnt
= batch
.count
;
3428 if (pmd_perf_metrics_enabled(pmd
)) {
3429 /* Update batch histogram. */
3430 s
->current
.batches
++;
3431 histogram_add_sample(&s
->pkts_per_batch
, batch_cnt
);
3432 /* Update the maximum vhost rx queue fill level. */
3433 if (rxq
->is_vhost
&& rem_qlen
>= 0) {
3434 uint32_t qfill
= batch_cnt
+ rem_qlen
;
3435 if (qfill
> s
->current
.max_vhost_qfill
) {
3436 s
->current
.max_vhost_qfill
= qfill
;
3440 /* Process packet batch. */
3441 dp_netdev_input(pmd
, &batch
, port_no
);
3443 /* Assign processing cycles to rx queue. */
3444 cycles
= cycle_timer_stop(&pmd
->perf_stats
, &timer
);
3445 dp_netdev_rxq_add_cycles(rxq
, RXQ_CYCLES_PROC_CURR
, cycles
);
3447 dp_netdev_pmd_flush_output_packets(pmd
, false);
3449 /* Discard cycles. */
3450 cycle_timer_stop(&pmd
->perf_stats
, &timer
);
3451 if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
3452 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3454 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
3455 netdev_rxq_get_name(rxq
->rx
), ovs_strerror(error
));
3459 pmd
->ctx
.last_rxq
= NULL
;
3464 static struct tx_port
*
3465 tx_port_lookup(const struct hmap
*hmap
, odp_port_t port_no
)
3469 HMAP_FOR_EACH_IN_BUCKET (tx
, node
, hash_port_no(port_no
), hmap
) {
3470 if (tx
->port
->port_no
== port_no
) {
3479 port_reconfigure(struct dp_netdev_port
*port
)
3481 struct netdev
*netdev
= port
->netdev
;
3484 /* Closes the existing 'rxq's. */
3485 for (i
= 0; i
< port
->n_rxq
; i
++) {
3486 netdev_rxq_close(port
->rxqs
[i
].rx
);
3487 port
->rxqs
[i
].rx
= NULL
;
3489 unsigned last_nrxq
= port
->n_rxq
;
3492 /* Allows 'netdev' to apply the pending configuration changes. */
3493 if (netdev_is_reconf_required(netdev
) || port
->need_reconfigure
) {
3494 err
= netdev_reconfigure(netdev
);
3495 if (err
&& (err
!= EOPNOTSUPP
)) {
3496 VLOG_ERR("Failed to set interface %s new configuration",
3497 netdev_get_name(netdev
));
3501 /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */
3502 port
->rxqs
= xrealloc(port
->rxqs
,
3503 sizeof *port
->rxqs
* netdev_n_rxq(netdev
));
3504 /* Realloc 'used' counters for tx queues. */
3505 free(port
->txq_used
);
3506 port
->txq_used
= xcalloc(netdev_n_txq(netdev
), sizeof *port
->txq_used
);
3508 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
3509 bool new_queue
= i
>= last_nrxq
;
3511 memset(&port
->rxqs
[i
], 0, sizeof port
->rxqs
[i
]);
3514 port
->rxqs
[i
].port
= port
;
3515 port
->rxqs
[i
].is_vhost
= !strncmp(port
->type
, "dpdkvhost", 9);
3517 err
= netdev_rxq_open(netdev
, &port
->rxqs
[i
].rx
, i
);
3524 /* Parse affinity list to apply configuration for new queues. */
3525 dpif_netdev_port_set_rxq_affinity(port
, port
->rxq_affinity_list
);
3527 /* If reconfiguration was successful mark it as such, so we can use it */
3528 port
->need_reconfigure
= false;
3533 struct rr_numa_list
{
3534 struct hmap numas
; /* Contains 'struct rr_numa' */
3538 struct hmap_node node
;
3542 /* Non isolated pmds on numa node 'numa_id' */
3543 struct dp_netdev_pmd_thread
**pmds
;
3550 static struct rr_numa
*
3551 rr_numa_list_lookup(struct rr_numa_list
*rr
, int numa_id
)
3553 struct rr_numa
*numa
;
3555 HMAP_FOR_EACH_WITH_HASH (numa
, node
, hash_int(numa_id
, 0), &rr
->numas
) {
3556 if (numa
->numa_id
== numa_id
) {
3564 /* Returns the next node in numa list following 'numa' in round-robin fashion.
3565 * Returns first node if 'numa' is a null pointer or the last node in 'rr'.
3566 * Returns NULL if 'rr' numa list is empty. */
3567 static struct rr_numa
*
3568 rr_numa_list_next(struct rr_numa_list
*rr
, const struct rr_numa
*numa
)
3570 struct hmap_node
*node
= NULL
;
3573 node
= hmap_next(&rr
->numas
, &numa
->node
);
3576 node
= hmap_first(&rr
->numas
);
3579 return (node
) ? CONTAINER_OF(node
, struct rr_numa
, node
) : NULL
;
3583 rr_numa_list_populate(struct dp_netdev
*dp
, struct rr_numa_list
*rr
)
3585 struct dp_netdev_pmd_thread
*pmd
;
3586 struct rr_numa
*numa
;
3588 hmap_init(&rr
->numas
);
3590 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3591 if (pmd
->core_id
== NON_PMD_CORE_ID
|| pmd
->isolated
) {
3595 numa
= rr_numa_list_lookup(rr
, pmd
->numa_id
);
3597 numa
= xzalloc(sizeof *numa
);
3598 numa
->numa_id
= pmd
->numa_id
;
3599 hmap_insert(&rr
->numas
, &numa
->node
, hash_int(pmd
->numa_id
, 0));
3602 numa
->pmds
= xrealloc(numa
->pmds
, numa
->n_pmds
* sizeof *numa
->pmds
);
3603 numa
->pmds
[numa
->n_pmds
- 1] = pmd
;
3604 /* At least one pmd so initialise curr_idx and idx_inc. */
3605 numa
->cur_index
= 0;
3606 numa
->idx_inc
= true;
3610 /* Returns the next pmd from the numa node in
3611 * incrementing or decrementing order. */
3612 static struct dp_netdev_pmd_thread
*
3613 rr_numa_get_pmd(struct rr_numa
*numa
)
3615 int numa_idx
= numa
->cur_index
;
3617 if (numa
->idx_inc
== true) {
3618 /* Incrementing through list of pmds. */
3619 if (numa
->cur_index
== numa
->n_pmds
-1) {
3620 /* Reached the last pmd. */
3621 numa
->idx_inc
= false;
3626 /* Decrementing through list of pmds. */
3627 if (numa
->cur_index
== 0) {
3628 /* Reached the first pmd. */
3629 numa
->idx_inc
= true;
3634 return numa
->pmds
[numa_idx
];
3638 rr_numa_list_destroy(struct rr_numa_list
*rr
)
3640 struct rr_numa
*numa
;
3642 HMAP_FOR_EACH_POP (numa
, node
, &rr
->numas
) {
3646 hmap_destroy(&rr
->numas
);
3649 /* Sort Rx Queues by the processing cycles they are consuming. */
3651 compare_rxq_cycles(const void *a
, const void *b
)
3653 struct dp_netdev_rxq
*qa
;
3654 struct dp_netdev_rxq
*qb
;
3655 uint64_t cycles_qa
, cycles_qb
;
3657 qa
= *(struct dp_netdev_rxq
**) a
;
3658 qb
= *(struct dp_netdev_rxq
**) b
;
3660 cycles_qa
= dp_netdev_rxq_get_cycles(qa
, RXQ_CYCLES_PROC_HIST
);
3661 cycles_qb
= dp_netdev_rxq_get_cycles(qb
, RXQ_CYCLES_PROC_HIST
);
3663 if (cycles_qa
!= cycles_qb
) {
3664 return (cycles_qa
< cycles_qb
) ? 1 : -1;
3666 /* Cycles are the same so tiebreak on port/queue id.
3667 * Tiebreaking (as opposed to return 0) ensures consistent
3668 * sort results across multiple OS's. */
3669 uint32_t port_qa
= odp_to_u32(qa
->port
->port_no
);
3670 uint32_t port_qb
= odp_to_u32(qb
->port
->port_no
);
3671 if (port_qa
!= port_qb
) {
3672 return port_qa
> port_qb
? 1 : -1;
3674 return netdev_rxq_get_queue_id(qa
->rx
)
3675 - netdev_rxq_get_queue_id(qb
->rx
);
3680 /* Assign pmds to queues. If 'pinned' is true, assign pmds to pinned
3681 * queues and marks the pmds as isolated. Otherwise, assign non isolated
3682 * pmds to unpinned queues.
3684 * If 'pinned' is false queues will be sorted by processing cycles they are
3685 * consuming and then assigned to pmds in round robin order.
3687 * The function doesn't touch the pmd threads, it just stores the assignment
3688 * in the 'pmd' member of each rxq. */
3690 rxq_scheduling(struct dp_netdev
*dp
, bool pinned
) OVS_REQUIRES(dp
->port_mutex
)
3692 struct dp_netdev_port
*port
;
3693 struct rr_numa_list rr
;
3694 struct rr_numa
*non_local_numa
= NULL
;
3695 struct dp_netdev_rxq
** rxqs
= NULL
;
3697 struct rr_numa
*numa
= NULL
;
3700 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3701 if (!netdev_is_pmd(port
->netdev
)) {
3705 for (int qid
= 0; qid
< port
->n_rxq
; qid
++) {
3706 struct dp_netdev_rxq
*q
= &port
->rxqs
[qid
];
3708 if (pinned
&& q
->core_id
!= OVS_CORE_UNSPEC
) {
3709 struct dp_netdev_pmd_thread
*pmd
;
3711 pmd
= dp_netdev_get_pmd(dp
, q
->core_id
);
3713 VLOG_WARN("There is no PMD thread on core %d. Queue "
3714 "%d on port \'%s\' will not be polled.",
3715 q
->core_id
, qid
, netdev_get_name(port
->netdev
));
3718 pmd
->isolated
= true;
3719 dp_netdev_pmd_unref(pmd
);
3721 } else if (!pinned
&& q
->core_id
== OVS_CORE_UNSPEC
) {
3722 uint64_t cycle_hist
= 0;
3725 rxqs
= xmalloc(sizeof *rxqs
);
3727 rxqs
= xrealloc(rxqs
, sizeof *rxqs
* (n_rxqs
+ 1));
3729 /* Sum the queue intervals and store the cycle history. */
3730 for (unsigned i
= 0; i
< PMD_RXQ_INTERVAL_MAX
; i
++) {
3731 cycle_hist
+= dp_netdev_rxq_get_intrvl_cycles(q
, i
);
3733 dp_netdev_rxq_set_cycles(q
, RXQ_CYCLES_PROC_HIST
, cycle_hist
);
3735 /* Store the queue. */
3742 /* Sort the queues in order of the processing cycles
3743 * they consumed during their last pmd interval. */
3744 qsort(rxqs
, n_rxqs
, sizeof *rxqs
, compare_rxq_cycles
);
3747 rr_numa_list_populate(dp
, &rr
);
3748 /* Assign the sorted queues to pmds in round robin. */
3749 for (int i
= 0; i
< n_rxqs
; i
++) {
3750 numa_id
= netdev_get_numa_id(rxqs
[i
]->port
->netdev
);
3751 numa
= rr_numa_list_lookup(&rr
, numa_id
);
3753 /* There are no pmds on the queue's local NUMA node.
3754 Round robin on the NUMA nodes that do have pmds. */
3755 non_local_numa
= rr_numa_list_next(&rr
, non_local_numa
);
3756 if (!non_local_numa
) {
3757 VLOG_ERR("There is no available (non-isolated) pmd "
3758 "thread for port \'%s\' queue %d. This queue "
3759 "will not be polled. Is pmd-cpu-mask set to "
3760 "zero? Or are all PMDs isolated to other "
3761 "queues?", netdev_rxq_get_name(rxqs
[i
]->rx
),
3762 netdev_rxq_get_queue_id(rxqs
[i
]->rx
));
3765 rxqs
[i
]->pmd
= rr_numa_get_pmd(non_local_numa
);
3766 VLOG_WARN("There's no available (non-isolated) pmd thread "
3767 "on numa node %d. Queue %d on port \'%s\' will "
3768 "be assigned to the pmd on core %d "
3769 "(numa node %d). Expect reduced performance.",
3770 numa_id
, netdev_rxq_get_queue_id(rxqs
[i
]->rx
),
3771 netdev_rxq_get_name(rxqs
[i
]->rx
),
3772 rxqs
[i
]->pmd
->core_id
, rxqs
[i
]->pmd
->numa_id
);
3774 rxqs
[i
]->pmd
= rr_numa_get_pmd(numa
);
3775 VLOG_INFO("Core %d on numa node %d assigned port \'%s\' "
3776 "rx queue %d (measured processing cycles %"PRIu64
").",
3777 rxqs
[i
]->pmd
->core_id
, numa_id
,
3778 netdev_rxq_get_name(rxqs
[i
]->rx
),
3779 netdev_rxq_get_queue_id(rxqs
[i
]->rx
),
3780 dp_netdev_rxq_get_cycles(rxqs
[i
], RXQ_CYCLES_PROC_HIST
));
3784 rr_numa_list_destroy(&rr
);
3789 reload_affected_pmds(struct dp_netdev
*dp
)
3791 struct dp_netdev_pmd_thread
*pmd
;
3793 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3794 if (pmd
->need_reload
) {
3795 dp_netdev_reload_pmd__(pmd
);
3796 pmd
->need_reload
= false;
3802 reconfigure_pmd_threads(struct dp_netdev
*dp
)
3803 OVS_REQUIRES(dp
->port_mutex
)
3805 struct dp_netdev_pmd_thread
*pmd
;
3806 struct ovs_numa_dump
*pmd_cores
;
3807 struct ovs_numa_info_core
*core
;
3808 struct hmapx to_delete
= HMAPX_INITIALIZER(&to_delete
);
3809 struct hmapx_node
*node
;
3810 bool changed
= false;
3811 bool need_to_adjust_static_tx_qids
= false;
3813 /* The pmd threads should be started only if there's a pmd port in the
3814 * datapath. If the user didn't provide any "pmd-cpu-mask", we start
3815 * NR_PMD_THREADS per numa node. */
3816 if (!has_pmd_port(dp
)) {
3817 pmd_cores
= ovs_numa_dump_n_cores_per_numa(0);
3818 } else if (dp
->pmd_cmask
&& dp
->pmd_cmask
[0]) {
3819 pmd_cores
= ovs_numa_dump_cores_with_cmask(dp
->pmd_cmask
);
3821 pmd_cores
= ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS
);
3824 /* We need to adjust 'static_tx_qid's only if we're reducing number of
3825 * PMD threads. Otherwise, new threads will allocate all the freed ids. */
3826 if (ovs_numa_dump_count(pmd_cores
) < cmap_count(&dp
->poll_threads
) - 1) {
3827 /* Adjustment is required to keep 'static_tx_qid's sequential and
3828 * avoid possible issues, for example, imbalanced tx queue usage
3829 * and unnecessary locking caused by remapping on netdev level. */
3830 need_to_adjust_static_tx_qids
= true;
3833 /* Check for unwanted pmd threads */
3834 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3835 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3838 if (!ovs_numa_dump_contains_core(pmd_cores
, pmd
->numa_id
,
3840 hmapx_add(&to_delete
, pmd
);
3841 } else if (need_to_adjust_static_tx_qids
) {
3842 pmd
->need_reload
= true;
3846 HMAPX_FOR_EACH (node
, &to_delete
) {
3847 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3848 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d destroyed.",
3849 pmd
->numa_id
, pmd
->core_id
);
3850 dp_netdev_del_pmd(dp
, pmd
);
3852 changed
= !hmapx_is_empty(&to_delete
);
3853 hmapx_destroy(&to_delete
);
3855 if (need_to_adjust_static_tx_qids
) {
3856 /* 'static_tx_qid's are not sequential now.
3857 * Reload remaining threads to fix this. */
3858 reload_affected_pmds(dp
);
3861 /* Check for required new pmd threads */
3862 FOR_EACH_CORE_ON_DUMP(core
, pmd_cores
) {
3863 pmd
= dp_netdev_get_pmd(dp
, core
->core_id
);
3865 pmd
= xzalloc(sizeof *pmd
);
3866 dp_netdev_configure_pmd(pmd
, dp
, core
->core_id
, core
->numa_id
);
3867 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
3868 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d created.",
3869 pmd
->numa_id
, pmd
->core_id
);
3872 dp_netdev_pmd_unref(pmd
);
3877 struct ovs_numa_info_numa
*numa
;
3879 /* Log the number of pmd threads per numa node. */
3880 FOR_EACH_NUMA_ON_DUMP (numa
, pmd_cores
) {
3881 VLOG_INFO("There are %"PRIuSIZE
" pmd threads on numa node %d",
3882 numa
->n_cores
, numa
->numa_id
);
3886 ovs_numa_dump_destroy(pmd_cores
);
3890 pmd_remove_stale_ports(struct dp_netdev
*dp
,
3891 struct dp_netdev_pmd_thread
*pmd
)
3892 OVS_EXCLUDED(pmd
->port_mutex
)
3893 OVS_REQUIRES(dp
->port_mutex
)
3895 struct rxq_poll
*poll
, *poll_next
;
3896 struct tx_port
*tx
, *tx_next
;
3898 ovs_mutex_lock(&pmd
->port_mutex
);
3899 HMAP_FOR_EACH_SAFE (poll
, poll_next
, node
, &pmd
->poll_list
) {
3900 struct dp_netdev_port
*port
= poll
->rxq
->port
;
3902 if (port
->need_reconfigure
3903 || !hmap_contains(&dp
->ports
, &port
->node
)) {
3904 dp_netdev_del_rxq_from_pmd(pmd
, poll
);
3907 HMAP_FOR_EACH_SAFE (tx
, tx_next
, node
, &pmd
->tx_ports
) {
3908 struct dp_netdev_port
*port
= tx
->port
;
3910 if (port
->need_reconfigure
3911 || !hmap_contains(&dp
->ports
, &port
->node
)) {
3912 dp_netdev_del_port_tx_from_pmd(pmd
, tx
);
3915 ovs_mutex_unlock(&pmd
->port_mutex
);
3918 /* Must be called each time a port is added/removed or the cmask changes.
3919 * This creates and destroys pmd threads, reconfigures ports, opens their
3920 * rxqs and assigns all rxqs/txqs to pmd threads. */
3922 reconfigure_datapath(struct dp_netdev
*dp
)
3923 OVS_REQUIRES(dp
->port_mutex
)
3925 struct dp_netdev_pmd_thread
*pmd
;
3926 struct dp_netdev_port
*port
;
3929 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
3931 /* Step 1: Adjust the pmd threads based on the datapath ports, the cores
3932 * on the system and the user configuration. */
3933 reconfigure_pmd_threads(dp
);
3935 wanted_txqs
= cmap_count(&dp
->poll_threads
);
3937 /* The number of pmd threads might have changed, or a port can be new:
3938 * adjust the txqs. */
3939 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3940 netdev_set_tx_multiq(port
->netdev
, wanted_txqs
);
3943 /* Step 2: Remove from the pmd threads ports that have been removed or
3944 * need reconfiguration. */
3946 /* Check for all the ports that need reconfiguration. We cache this in
3947 * 'port->need_reconfigure', because netdev_is_reconf_required() can
3948 * change at any time. */
3949 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3950 if (netdev_is_reconf_required(port
->netdev
)) {
3951 port
->need_reconfigure
= true;
3955 /* Remove from the pmd threads all the ports that have been deleted or
3956 * need reconfiguration. */
3957 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3958 pmd_remove_stale_ports(dp
, pmd
);
3961 /* Reload affected pmd threads. We must wait for the pmd threads before
3962 * reconfiguring the ports, because a port cannot be reconfigured while
3963 * it's being used. */
3964 reload_affected_pmds(dp
);
3966 /* Step 3: Reconfigure ports. */
3968 /* We only reconfigure the ports that we determined above, because they're
3969 * not being used by any pmd thread at the moment. If a port fails to
3970 * reconfigure we remove it from the datapath. */
3971 struct dp_netdev_port
*next_port
;
3972 HMAP_FOR_EACH_SAFE (port
, next_port
, node
, &dp
->ports
) {
3975 if (!port
->need_reconfigure
) {
3979 err
= port_reconfigure(port
);
3981 hmap_remove(&dp
->ports
, &port
->node
);
3982 seq_change(dp
->port_seq
);
3985 port
->dynamic_txqs
= netdev_n_txq(port
->netdev
) < wanted_txqs
;
3989 /* Step 4: Compute new rxq scheduling. We don't touch the pmd threads
3990 * for now, we just update the 'pmd' pointer in each rxq to point to the
3991 * wanted thread according to the scheduling policy. */
3993 /* Reset all the pmd threads to non isolated. */
3994 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3995 pmd
->isolated
= false;
3998 /* Reset all the queues to unassigned */
3999 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4000 for (int i
= 0; i
< port
->n_rxq
; i
++) {
4001 port
->rxqs
[i
].pmd
= NULL
;
4005 /* Add pinned queues and mark pmd threads isolated. */
4006 rxq_scheduling(dp
, true);
4008 /* Add non-pinned queues. */
4009 rxq_scheduling(dp
, false);
4011 /* Step 5: Remove queues not compliant with new scheduling. */
4012 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4013 struct rxq_poll
*poll
, *poll_next
;
4015 ovs_mutex_lock(&pmd
->port_mutex
);
4016 HMAP_FOR_EACH_SAFE (poll
, poll_next
, node
, &pmd
->poll_list
) {
4017 if (poll
->rxq
->pmd
!= pmd
) {
4018 dp_netdev_del_rxq_from_pmd(pmd
, poll
);
4021 ovs_mutex_unlock(&pmd
->port_mutex
);
4024 /* Reload affected pmd threads. We must wait for the pmd threads to remove
4025 * the old queues before readding them, otherwise a queue can be polled by
4026 * two threads at the same time. */
4027 reload_affected_pmds(dp
);
4029 /* Step 6: Add queues from scheduling, if they're not there already. */
4030 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4031 if (!netdev_is_pmd(port
->netdev
)) {
4035 for (int qid
= 0; qid
< port
->n_rxq
; qid
++) {
4036 struct dp_netdev_rxq
*q
= &port
->rxqs
[qid
];
4039 ovs_mutex_lock(&q
->pmd
->port_mutex
);
4040 dp_netdev_add_rxq_to_pmd(q
->pmd
, q
);
4041 ovs_mutex_unlock(&q
->pmd
->port_mutex
);
4046 /* Add every port to the tx cache of every pmd thread, if it's not
4047 * there already and if this pmd has at least one rxq to poll. */
4048 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4049 ovs_mutex_lock(&pmd
->port_mutex
);
4050 if (hmap_count(&pmd
->poll_list
) || pmd
->core_id
== NON_PMD_CORE_ID
) {
4051 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4052 dp_netdev_add_port_tx_to_pmd(pmd
, port
);
4055 ovs_mutex_unlock(&pmd
->port_mutex
);
4058 /* Reload affected pmd threads. */
4059 reload_affected_pmds(dp
);
4062 /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
4064 ports_require_restart(const struct dp_netdev
*dp
)
4065 OVS_REQUIRES(dp
->port_mutex
)
4067 struct dp_netdev_port
*port
;
4069 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4070 if (netdev_is_reconf_required(port
->netdev
)) {
4078 /* Return true if needs to revalidate datapath flows. */
4080 dpif_netdev_run(struct dpif
*dpif
)
4082 struct dp_netdev_port
*port
;
4083 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4084 struct dp_netdev_pmd_thread
*non_pmd
;
4085 uint64_t new_tnl_seq
;
4086 bool need_to_flush
= true;
4088 ovs_mutex_lock(&dp
->port_mutex
);
4089 non_pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
4091 ovs_mutex_lock(&dp
->non_pmd_mutex
);
4092 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4093 if (!netdev_is_pmd(port
->netdev
)) {
4096 for (i
= 0; i
< port
->n_rxq
; i
++) {
4097 if (dp_netdev_process_rxq_port(non_pmd
,
4100 need_to_flush
= false;
4105 if (need_to_flush
) {
4106 /* We didn't receive anything in the process loop.
4107 * Check if we need to send something.
4108 * There was no time updates on current iteration. */
4109 pmd_thread_ctx_time_update(non_pmd
);
4110 dp_netdev_pmd_flush_output_packets(non_pmd
, false);
4113 dpif_netdev_xps_revalidate_pmd(non_pmd
, false);
4114 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
4116 dp_netdev_pmd_unref(non_pmd
);
4119 if (dp_netdev_is_reconf_required(dp
) || ports_require_restart(dp
)) {
4120 reconfigure_datapath(dp
);
4122 ovs_mutex_unlock(&dp
->port_mutex
);
4124 tnl_neigh_cache_run();
4126 new_tnl_seq
= seq_read(tnl_conf_seq
);
4128 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
4129 dp
->last_tnl_conf_seq
= new_tnl_seq
;
4136 dpif_netdev_wait(struct dpif
*dpif
)
4138 struct dp_netdev_port
*port
;
4139 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4141 ovs_mutex_lock(&dp_netdev_mutex
);
4142 ovs_mutex_lock(&dp
->port_mutex
);
4143 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4144 netdev_wait_reconf_required(port
->netdev
);
4145 if (!netdev_is_pmd(port
->netdev
)) {
4148 for (i
= 0; i
< port
->n_rxq
; i
++) {
4149 netdev_rxq_wait(port
->rxqs
[i
].rx
);
4153 ovs_mutex_unlock(&dp
->port_mutex
);
4154 ovs_mutex_unlock(&dp_netdev_mutex
);
4155 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
4159 pmd_free_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
4161 struct tx_port
*tx_port_cached
;
4163 /* Flush all the queued packets. */
4164 dp_netdev_pmd_flush_output_packets(pmd
, true);
4165 /* Free all used tx queue ids. */
4166 dpif_netdev_xps_revalidate_pmd(pmd
, true);
4168 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->tnl_port_cache
) {
4169 free(tx_port_cached
);
4171 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->send_port_cache
) {
4172 free(tx_port_cached
);
4176 /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to
4177 * thread-local copies. Copy to 'pmd->tnl_port_cache' if it is a tunnel
4178 * device, otherwise to 'pmd->send_port_cache' if the port has at least
4181 pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
4182 OVS_REQUIRES(pmd
->port_mutex
)
4184 struct tx_port
*tx_port
, *tx_port_cached
;
4186 pmd_free_cached_ports(pmd
);
4187 hmap_shrink(&pmd
->send_port_cache
);
4188 hmap_shrink(&pmd
->tnl_port_cache
);
4190 HMAP_FOR_EACH (tx_port
, node
, &pmd
->tx_ports
) {
4191 if (netdev_has_tunnel_push_pop(tx_port
->port
->netdev
)) {
4192 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
4193 hmap_insert(&pmd
->tnl_port_cache
, &tx_port_cached
->node
,
4194 hash_port_no(tx_port_cached
->port
->port_no
));
4197 if (netdev_n_txq(tx_port
->port
->netdev
)) {
4198 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
4199 hmap_insert(&pmd
->send_port_cache
, &tx_port_cached
->node
,
4200 hash_port_no(tx_port_cached
->port
->port_no
));
4206 pmd_alloc_static_tx_qid(struct dp_netdev_pmd_thread
*pmd
)
4208 ovs_mutex_lock(&pmd
->dp
->tx_qid_pool_mutex
);
4209 if (!id_pool_alloc_id(pmd
->dp
->tx_qid_pool
, &pmd
->static_tx_qid
)) {
4210 VLOG_ABORT("static_tx_qid allocation failed for PMD on core %2d"
4211 ", numa_id %d.", pmd
->core_id
, pmd
->numa_id
);
4213 ovs_mutex_unlock(&pmd
->dp
->tx_qid_pool_mutex
);
4215 VLOG_DBG("static_tx_qid = %d allocated for PMD thread on core %2d"
4216 ", numa_id %d.", pmd
->static_tx_qid
, pmd
->core_id
, pmd
->numa_id
);
4220 pmd_free_static_tx_qid(struct dp_netdev_pmd_thread
*pmd
)
4222 ovs_mutex_lock(&pmd
->dp
->tx_qid_pool_mutex
);
4223 id_pool_free_id(pmd
->dp
->tx_qid_pool
, pmd
->static_tx_qid
);
4224 ovs_mutex_unlock(&pmd
->dp
->tx_qid_pool_mutex
);
4228 pmd_load_queues_and_ports(struct dp_netdev_pmd_thread
*pmd
,
4229 struct polled_queue
**ppoll_list
)
4231 struct polled_queue
*poll_list
= *ppoll_list
;
4232 struct rxq_poll
*poll
;
4235 ovs_mutex_lock(&pmd
->port_mutex
);
4236 poll_list
= xrealloc(poll_list
, hmap_count(&pmd
->poll_list
)
4237 * sizeof *poll_list
);
4240 HMAP_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
4241 poll_list
[i
].rxq
= poll
->rxq
;
4242 poll_list
[i
].port_no
= poll
->rxq
->port
->port_no
;
4246 pmd_load_cached_ports(pmd
);
4248 ovs_mutex_unlock(&pmd
->port_mutex
);
4250 *ppoll_list
= poll_list
;
4255 pmd_thread_main(void *f_
)
4257 struct dp_netdev_pmd_thread
*pmd
= f_
;
4258 struct pmd_perf_stats
*s
= &pmd
->perf_stats
;
4259 unsigned int lc
= 0;
4260 struct polled_queue
*poll_list
;
4264 int process_packets
= 0;
4268 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
4269 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
4270 ovs_numa_thread_setaffinity_core(pmd
->core_id
);
4271 dpdk_set_lcore_id(pmd
->core_id
);
4272 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
4273 emc_cache_init(&pmd
->flow_cache
);
4275 pmd_alloc_static_tx_qid(pmd
);
4277 /* List port/core affinity */
4278 for (i
= 0; i
< poll_cnt
; i
++) {
4279 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
4280 pmd
->core_id
, netdev_rxq_get_name(poll_list
[i
].rxq
->rx
),
4281 netdev_rxq_get_queue_id(poll_list
[i
].rxq
->rx
));
4282 /* Reset the rxq current cycles counter. */
4283 dp_netdev_rxq_set_cycles(poll_list
[i
].rxq
, RXQ_CYCLES_PROC_CURR
, 0);
4287 while (seq_read(pmd
->reload_seq
) == pmd
->last_reload_seq
) {
4288 seq_wait(pmd
->reload_seq
, pmd
->last_reload_seq
);
4294 pmd
->intrvl_tsc_prev
= 0;
4295 atomic_store_relaxed(&pmd
->intrvl_cycles
, 0);
4296 cycles_counter_update(s
);
4297 /* Protect pmd stats from external clearing while polling. */
4298 ovs_mutex_lock(&pmd
->perf_stats
.stats_mutex
);
4300 uint64_t rx_packets
= 0, tx_packets
= 0;
4302 pmd_perf_start_iteration(s
);
4304 for (i
= 0; i
< poll_cnt
; i
++) {
4306 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].rxq
,
4307 poll_list
[i
].port_no
);
4308 rx_packets
+= process_packets
;
4312 /* We didn't receive anything in the process loop.
4313 * Check if we need to send something.
4314 * There was no time updates on current iteration. */
4315 pmd_thread_ctx_time_update(pmd
);
4316 tx_packets
= dp_netdev_pmd_flush_output_packets(pmd
, false);
4324 coverage_try_clear();
4325 dp_netdev_pmd_try_optimize(pmd
, poll_list
, poll_cnt
);
4326 if (!ovsrcu_try_quiesce()) {
4327 emc_cache_slow_sweep(&pmd
->flow_cache
);
4330 atomic_read_relaxed(&pmd
->reload
, &reload
);
4335 pmd_perf_end_iteration(s
, rx_packets
, tx_packets
,
4336 pmd_perf_metrics_enabled(pmd
));
4338 ovs_mutex_unlock(&pmd
->perf_stats
.stats_mutex
);
4340 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
4341 exiting
= latch_is_set(&pmd
->exit_latch
);
4342 /* Signal here to make sure the pmd finishes
4343 * reloading the updated configuration. */
4344 dp_netdev_pmd_reload_done(pmd
);
4346 pmd_free_static_tx_qid(pmd
);
4352 emc_cache_uninit(&pmd
->flow_cache
);
4354 pmd_free_cached_ports(pmd
);
4359 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
4360 OVS_ACQUIRES(dp
->upcall_rwlock
)
4362 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
4368 dpif_netdev_meter_get_features(const struct dpif
* dpif OVS_UNUSED
,
4369 struct ofputil_meter_features
*features
)
4371 features
->max_meters
= MAX_METERS
;
4372 features
->band_types
= DP_SUPPORTED_METER_BAND_TYPES
;
4373 features
->capabilities
= DP_SUPPORTED_METER_FLAGS_MASK
;
4374 features
->max_bands
= MAX_BANDS
;
4375 features
->max_color
= 0;
4378 /* Returns false when packet needs to be dropped. */
4380 dp_netdev_run_meter(struct dp_netdev
*dp
, struct dp_packet_batch
*packets_
,
4381 uint32_t meter_id
, long long int now
)
4383 struct dp_meter
*meter
;
4384 struct dp_meter_band
*band
;
4385 struct dp_packet
*packet
;
4386 long long int long_delta_t
; /* msec */
4387 uint32_t delta_t
; /* msec */
4388 const size_t cnt
= dp_packet_batch_size(packets_
);
4389 uint32_t bytes
, volume
;
4390 int exceeded_band
[NETDEV_MAX_BURST
];
4391 uint32_t exceeded_rate
[NETDEV_MAX_BURST
];
4392 int exceeded_pkt
= cnt
; /* First packet that exceeded a band rate. */
4394 if (meter_id
>= MAX_METERS
) {
4398 meter_lock(dp
, meter_id
);
4399 meter
= dp
->meters
[meter_id
];
4404 /* Initialize as negative values. */
4405 memset(exceeded_band
, 0xff, cnt
* sizeof *exceeded_band
);
4406 /* Initialize as zeroes. */
4407 memset(exceeded_rate
, 0, cnt
* sizeof *exceeded_rate
);
4409 /* All packets will hit the meter at the same time. */
4410 long_delta_t
= (now
- meter
->used
) / 1000; /* msec */
4412 /* Make sure delta_t will not be too large, so that bucket will not
4413 * wrap around below. */
4414 delta_t
= (long_delta_t
> (long long int)meter
->max_delta_t
)
4415 ? meter
->max_delta_t
: (uint32_t)long_delta_t
;
4417 /* Update meter stats. */
4419 meter
->packet_count
+= cnt
;
4421 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
4422 bytes
+= dp_packet_size(packet
);
4424 meter
->byte_count
+= bytes
;
4426 /* Meters can operate in terms of packets per second or kilobits per
4428 if (meter
->flags
& OFPMF13_PKTPS
) {
4429 /* Rate in packets/second, bucket 1/1000 packets. */
4430 /* msec * packets/sec = 1/1000 packets. */
4431 volume
= cnt
* 1000; /* Take 'cnt' packets from the bucket. */
4433 /* Rate in kbps, bucket in bits. */
4434 /* msec * kbps = bits */
4438 /* Update all bands and find the one hit with the highest rate for each
4439 * packet (if any). */
4440 for (int m
= 0; m
< meter
->n_bands
; ++m
) {
4441 band
= &meter
->bands
[m
];
4443 /* Update band's bucket. */
4444 band
->bucket
+= delta_t
* band
->up
.rate
;
4445 if (band
->bucket
> band
->up
.burst_size
) {
4446 band
->bucket
= band
->up
.burst_size
;
4449 /* Drain the bucket for all the packets, if possible. */
4450 if (band
->bucket
>= volume
) {
4451 band
->bucket
-= volume
;
4453 int band_exceeded_pkt
;
4455 /* Band limit hit, must process packet-by-packet. */
4456 if (meter
->flags
& OFPMF13_PKTPS
) {
4457 band_exceeded_pkt
= band
->bucket
/ 1000;
4458 band
->bucket
%= 1000; /* Remainder stays in bucket. */
4460 /* Update the exceeding band for each exceeding packet.
4461 * (Only one band will be fired by a packet, and that
4462 * can be different for each packet.) */
4463 for (int i
= band_exceeded_pkt
; i
< cnt
; i
++) {
4464 if (band
->up
.rate
> exceeded_rate
[i
]) {
4465 exceeded_rate
[i
] = band
->up
.rate
;
4466 exceeded_band
[i
] = m
;
4470 /* Packet sizes differ, must process one-by-one. */
4471 band_exceeded_pkt
= cnt
;
4472 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
4473 uint32_t bits
= dp_packet_size(packet
) * 8;
4475 if (band
->bucket
>= bits
) {
4476 band
->bucket
-= bits
;
4478 if (i
< band_exceeded_pkt
) {
4479 band_exceeded_pkt
= i
;
4481 /* Update the exceeding band for the exceeding packet.
4482 * (Only one band will be fired by a packet, and that
4483 * can be different for each packet.) */
4484 if (band
->up
.rate
> exceeded_rate
[i
]) {
4485 exceeded_rate
[i
] = band
->up
.rate
;
4486 exceeded_band
[i
] = m
;
4491 /* Remember the first exceeding packet. */
4492 if (exceeded_pkt
> band_exceeded_pkt
) {
4493 exceeded_pkt
= band_exceeded_pkt
;
4498 /* Fire the highest rate band exceeded by each packet.
4499 * Drop packets if needed, by swapping packet to the end that will be
4502 DP_PACKET_BATCH_REFILL_FOR_EACH (j
, cnt
, packet
, packets_
) {
4503 if (exceeded_band
[j
] >= 0) {
4504 /* Meter drop packet. */
4505 band
= &meter
->bands
[exceeded_band
[j
]];
4506 band
->packet_count
+= 1;
4507 band
->byte_count
+= dp_packet_size(packet
);
4509 dp_packet_delete(packet
);
4511 /* Meter accepts packet. */
4512 dp_packet_batch_refill(packets_
, packet
, j
);
4516 meter_unlock(dp
, meter_id
);
4519 /* Meter set/get/del processing is still single-threaded. */
4521 dpif_netdev_meter_set(struct dpif
*dpif
, ofproto_meter_id
*meter_id
,
4522 struct ofputil_meter_config
*config
)
4524 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4525 uint32_t mid
= meter_id
->uint32
;
4526 struct dp_meter
*meter
;
4529 if (mid
>= MAX_METERS
) {
4530 return EFBIG
; /* Meter_id out of range. */
4533 if (config
->flags
& ~DP_SUPPORTED_METER_FLAGS_MASK
||
4534 !(config
->flags
& (OFPMF13_KBPS
| OFPMF13_PKTPS
))) {
4535 return EBADF
; /* Unsupported flags set */
4538 /* Validate bands */
4539 if (config
->n_bands
== 0 || config
->n_bands
> MAX_BANDS
) {
4540 return EINVAL
; /* Too many bands */
4543 /* Validate rates */
4544 for (i
= 0; i
< config
->n_bands
; i
++) {
4545 if (config
->bands
[i
].rate
== 0) {
4546 return EDOM
; /* rate must be non-zero */
4550 for (i
= 0; i
< config
->n_bands
; ++i
) {
4551 switch (config
->bands
[i
].type
) {
4555 return ENODEV
; /* Unsupported band type */
4559 /* Allocate meter */
4560 meter
= xzalloc(sizeof *meter
4561 + config
->n_bands
* sizeof(struct dp_meter_band
));
4563 meter
->flags
= config
->flags
;
4564 meter
->n_bands
= config
->n_bands
;
4565 meter
->max_delta_t
= 0;
4566 meter
->used
= time_usec();
4569 for (i
= 0; i
< config
->n_bands
; ++i
) {
4570 uint32_t band_max_delta_t
;
4572 /* Set burst size to a workable value if none specified. */
4573 if (config
->bands
[i
].burst_size
== 0) {
4574 config
->bands
[i
].burst_size
= config
->bands
[i
].rate
;
4577 meter
->bands
[i
].up
= config
->bands
[i
];
4578 /* Convert burst size to the bucket units: */
4579 /* pkts => 1/1000 packets, kilobits => bits. */
4580 meter
->bands
[i
].up
.burst_size
*= 1000;
4581 /* Initialize bucket to empty. */
4582 meter
->bands
[i
].bucket
= 0;
4584 /* Figure out max delta_t that is enough to fill any bucket. */
4586 = meter
->bands
[i
].up
.burst_size
/ meter
->bands
[i
].up
.rate
;
4587 if (band_max_delta_t
> meter
->max_delta_t
) {
4588 meter
->max_delta_t
= band_max_delta_t
;
4592 meter_lock(dp
, mid
);
4593 dp_delete_meter(dp
, mid
); /* Free existing meter, if any */
4594 dp
->meters
[mid
] = meter
;
4595 meter_unlock(dp
, mid
);
4603 dpif_netdev_meter_get(const struct dpif
*dpif
,
4604 ofproto_meter_id meter_id_
,
4605 struct ofputil_meter_stats
*stats
, uint16_t n_bands
)
4607 const struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4608 const struct dp_meter
*meter
;
4609 uint32_t meter_id
= meter_id_
.uint32
;
4611 if (meter_id
>= MAX_METERS
) {
4614 meter
= dp
->meters
[meter_id
];
4621 meter_lock(dp
, meter_id
);
4622 stats
->packet_in_count
= meter
->packet_count
;
4623 stats
->byte_in_count
= meter
->byte_count
;
4625 for (i
= 0; i
< n_bands
&& i
< meter
->n_bands
; ++i
) {
4626 stats
->bands
[i
].packet_count
= meter
->bands
[i
].packet_count
;
4627 stats
->bands
[i
].byte_count
= meter
->bands
[i
].byte_count
;
4629 meter_unlock(dp
, meter_id
);
4637 dpif_netdev_meter_del(struct dpif
*dpif
,
4638 ofproto_meter_id meter_id_
,
4639 struct ofputil_meter_stats
*stats
, uint16_t n_bands
)
4641 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4644 error
= dpif_netdev_meter_get(dpif
, meter_id_
, stats
, n_bands
);
4646 uint32_t meter_id
= meter_id_
.uint32
;
4648 meter_lock(dp
, meter_id
);
4649 dp_delete_meter(dp
, meter_id
);
4650 meter_unlock(dp
, meter_id
);
4657 dpif_netdev_disable_upcall(struct dpif
*dpif
)
4658 OVS_NO_THREAD_SAFETY_ANALYSIS
4660 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4661 dp_netdev_disable_upcall(dp
);
4665 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
4666 OVS_RELEASES(dp
->upcall_rwlock
)
4668 fat_rwlock_unlock(&dp
->upcall_rwlock
);
4672 dpif_netdev_enable_upcall(struct dpif
*dpif
)
4673 OVS_NO_THREAD_SAFETY_ANALYSIS
4675 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4676 dp_netdev_enable_upcall(dp
);
4680 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
4682 ovs_mutex_lock(&pmd
->cond_mutex
);
4683 atomic_store_relaxed(&pmd
->reload
, false);
4684 pmd
->last_reload_seq
= seq_read(pmd
->reload_seq
);
4685 xpthread_cond_signal(&pmd
->cond
);
4686 ovs_mutex_unlock(&pmd
->cond_mutex
);
4689 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
4690 * the pointer if succeeds, otherwise, NULL (it can return NULL even if
4691 * 'core_id' is NON_PMD_CORE_ID).
4693 * Caller must unrefs the returned reference. */
4694 static struct dp_netdev_pmd_thread
*
4695 dp_netdev_get_pmd(struct dp_netdev
*dp
, unsigned core_id
)
4697 struct dp_netdev_pmd_thread
*pmd
;
4698 const struct cmap_node
*pnode
;
4700 pnode
= cmap_find(&dp
->poll_threads
, hash_int(core_id
, 0));
4704 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
4706 return dp_netdev_pmd_try_ref(pmd
) ? pmd
: NULL
;
4709 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
4711 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
4712 OVS_REQUIRES(dp
->port_mutex
)
4714 struct dp_netdev_pmd_thread
*non_pmd
;
4716 non_pmd
= xzalloc(sizeof *non_pmd
);
4717 dp_netdev_configure_pmd(non_pmd
, dp
, NON_PMD_CORE_ID
, OVS_NUMA_UNSPEC
);
4720 /* Caller must have valid pointer to 'pmd'. */
4722 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
)
4724 return ovs_refcount_try_ref_rcu(&pmd
->ref_cnt
);
4728 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
)
4730 if (pmd
&& ovs_refcount_unref(&pmd
->ref_cnt
) == 1) {
4731 ovsrcu_postpone(dp_netdev_destroy_pmd
, pmd
);
4735 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
4736 * fails, keeps checking for next node until reaching the end of cmap.
4738 * Caller must unrefs the returned reference. */
4739 static struct dp_netdev_pmd_thread
*
4740 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
)
4742 struct dp_netdev_pmd_thread
*next
;
4745 struct cmap_node
*node
;
4747 node
= cmap_next_position(&dp
->poll_threads
, pos
);
4748 next
= node
? CONTAINER_OF(node
, struct dp_netdev_pmd_thread
, node
)
4750 } while (next
&& !dp_netdev_pmd_try_ref(next
));
4755 /* Configures the 'pmd' based on the input argument. */
4757 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
4758 unsigned core_id
, int numa_id
)
4761 pmd
->core_id
= core_id
;
4762 pmd
->numa_id
= numa_id
;
4763 pmd
->need_reload
= false;
4764 pmd
->n_output_batches
= 0;
4766 ovs_refcount_init(&pmd
->ref_cnt
);
4767 latch_init(&pmd
->exit_latch
);
4768 pmd
->reload_seq
= seq_create();
4769 pmd
->last_reload_seq
= seq_read(pmd
->reload_seq
);
4770 atomic_init(&pmd
->reload
, false);
4771 xpthread_cond_init(&pmd
->cond
, NULL
);
4772 ovs_mutex_init(&pmd
->cond_mutex
);
4773 ovs_mutex_init(&pmd
->flow_mutex
);
4774 ovs_mutex_init(&pmd
->port_mutex
);
4775 cmap_init(&pmd
->flow_table
);
4776 cmap_init(&pmd
->classifiers
);
4777 pmd
->ctx
.last_rxq
= NULL
;
4778 pmd_thread_ctx_time_update(pmd
);
4779 pmd
->next_optimization
= pmd
->ctx
.now
+ DPCLS_OPTIMIZATION_INTERVAL
;
4780 pmd
->rxq_next_cycle_store
= pmd
->ctx
.now
+ PMD_RXQ_INTERVAL_LEN
;
4781 hmap_init(&pmd
->poll_list
);
4782 hmap_init(&pmd
->tx_ports
);
4783 hmap_init(&pmd
->tnl_port_cache
);
4784 hmap_init(&pmd
->send_port_cache
);
4785 /* init the 'flow_cache' since there is no
4786 * actual thread created for NON_PMD_CORE_ID. */
4787 if (core_id
== NON_PMD_CORE_ID
) {
4788 emc_cache_init(&pmd
->flow_cache
);
4789 pmd_alloc_static_tx_qid(pmd
);
4791 pmd_perf_stats_init(&pmd
->perf_stats
);
4792 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
4793 hash_int(core_id
, 0));
4797 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
)
4801 dp_netdev_pmd_flow_flush(pmd
);
4802 hmap_destroy(&pmd
->send_port_cache
);
4803 hmap_destroy(&pmd
->tnl_port_cache
);
4804 hmap_destroy(&pmd
->tx_ports
);
4805 hmap_destroy(&pmd
->poll_list
);
4806 /* All flows (including their dpcls_rules) have been deleted already */
4807 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
4809 ovsrcu_postpone(free
, cls
);
4811 cmap_destroy(&pmd
->classifiers
);
4812 cmap_destroy(&pmd
->flow_table
);
4813 ovs_mutex_destroy(&pmd
->flow_mutex
);
4814 latch_destroy(&pmd
->exit_latch
);
4815 seq_destroy(pmd
->reload_seq
);
4816 xpthread_cond_destroy(&pmd
->cond
);
4817 ovs_mutex_destroy(&pmd
->cond_mutex
);
4818 ovs_mutex_destroy(&pmd
->port_mutex
);
4822 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
4823 * and unrefs the struct. */
4825 dp_netdev_del_pmd(struct dp_netdev
*dp
, struct dp_netdev_pmd_thread
*pmd
)
4827 /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize,
4828 * but extra cleanup is necessary */
4829 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
4830 ovs_mutex_lock(&dp
->non_pmd_mutex
);
4831 emc_cache_uninit(&pmd
->flow_cache
);
4832 pmd_free_cached_ports(pmd
);
4833 pmd_free_static_tx_qid(pmd
);
4834 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
4836 latch_set(&pmd
->exit_latch
);
4837 dp_netdev_reload_pmd__(pmd
);
4838 xpthread_join(pmd
->thread
, NULL
);
4841 dp_netdev_pmd_clear_ports(pmd
);
4843 /* Purges the 'pmd''s flows after stopping the thread, but before
4844 * destroying the flows, so that the flow stats can be collected. */
4845 if (dp
->dp_purge_cb
) {
4846 dp
->dp_purge_cb(dp
->dp_purge_aux
, pmd
->core_id
);
4848 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
4849 dp_netdev_pmd_unref(pmd
);
4852 /* Destroys all pmd threads. If 'non_pmd' is true it also destroys the non pmd
4855 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
, bool non_pmd
)
4857 struct dp_netdev_pmd_thread
*pmd
;
4858 struct dp_netdev_pmd_thread
**pmd_list
;
4859 size_t k
= 0, n_pmds
;
4861 n_pmds
= cmap_count(&dp
->poll_threads
);
4862 pmd_list
= xcalloc(n_pmds
, sizeof *pmd_list
);
4864 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4865 if (!non_pmd
&& pmd
->core_id
== NON_PMD_CORE_ID
) {
4868 /* We cannot call dp_netdev_del_pmd(), since it alters
4869 * 'dp->poll_threads' (while we're iterating it) and it
4871 ovs_assert(k
< n_pmds
);
4872 pmd_list
[k
++] = pmd
;
4875 for (size_t i
= 0; i
< k
; i
++) {
4876 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
4881 /* Deletes all rx queues from pmd->poll_list and all the ports from
4884 dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
)
4886 struct rxq_poll
*poll
;
4887 struct tx_port
*port
;
4889 ovs_mutex_lock(&pmd
->port_mutex
);
4890 HMAP_FOR_EACH_POP (poll
, node
, &pmd
->poll_list
) {
4893 HMAP_FOR_EACH_POP (port
, node
, &pmd
->tx_ports
) {
4896 ovs_mutex_unlock(&pmd
->port_mutex
);
4899 /* Adds rx queue to poll_list of PMD thread, if it's not there already. */
4901 dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
4902 struct dp_netdev_rxq
*rxq
)
4903 OVS_REQUIRES(pmd
->port_mutex
)
4905 int qid
= netdev_rxq_get_queue_id(rxq
->rx
);
4906 uint32_t hash
= hash_2words(odp_to_u32(rxq
->port
->port_no
), qid
);
4907 struct rxq_poll
*poll
;
4909 HMAP_FOR_EACH_WITH_HASH (poll
, node
, hash
, &pmd
->poll_list
) {
4910 if (poll
->rxq
== rxq
) {
4911 /* 'rxq' is already polled by this thread. Do nothing. */
4916 poll
= xmalloc(sizeof *poll
);
4918 hmap_insert(&pmd
->poll_list
, &poll
->node
, hash
);
4920 pmd
->need_reload
= true;
4923 /* Delete 'poll' from poll_list of PMD thread. */
4925 dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
4926 struct rxq_poll
*poll
)
4927 OVS_REQUIRES(pmd
->port_mutex
)
4929 hmap_remove(&pmd
->poll_list
, &poll
->node
);
4932 pmd
->need_reload
= true;
4935 /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the
4936 * changes to take effect. */
4938 dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
4939 struct dp_netdev_port
*port
)
4940 OVS_REQUIRES(pmd
->port_mutex
)
4944 tx
= tx_port_lookup(&pmd
->tx_ports
, port
->port_no
);
4946 /* 'port' is already on this thread tx cache. Do nothing. */
4950 tx
= xzalloc(sizeof *tx
);
4954 tx
->flush_time
= 0LL;
4955 dp_packet_batch_init(&tx
->output_pkts
);
4957 hmap_insert(&pmd
->tx_ports
, &tx
->node
, hash_port_no(tx
->port
->port_no
));
4958 pmd
->need_reload
= true;
4961 /* Del 'tx' from the tx port cache of 'pmd', which must be reloaded for the
4962 * changes to take effect. */
4964 dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
4966 OVS_REQUIRES(pmd
->port_mutex
)
4968 hmap_remove(&pmd
->tx_ports
, &tx
->node
);
4970 pmd
->need_reload
= true;
4974 dpif_netdev_get_datapath_version(void)
4976 return xstrdup("<built-in>");
4980 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
, int cnt
, int size
,
4981 uint16_t tcp_flags
, long long now
)
4985 atomic_store_relaxed(&netdev_flow
->stats
.used
, now
);
4986 non_atomic_ullong_add(&netdev_flow
->stats
.packet_count
, cnt
);
4987 non_atomic_ullong_add(&netdev_flow
->stats
.byte_count
, size
);
4988 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
4990 atomic_store_relaxed(&netdev_flow
->stats
.tcp_flags
, flags
);
4994 dp_netdev_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet_
,
4995 struct flow
*flow
, struct flow_wildcards
*wc
, ovs_u128
*ufid
,
4996 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
4997 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
4999 struct dp_netdev
*dp
= pmd
->dp
;
5001 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
5005 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
5006 struct ds ds
= DS_EMPTY_INITIALIZER
;
5009 struct odp_flow_key_parms odp_parms
= {
5011 .mask
= wc
? &wc
->masks
: NULL
,
5012 .support
= dp_netdev_support
,
5015 ofpbuf_init(&key
, 0);
5016 odp_flow_key_from_flow(&odp_parms
, &key
);
5017 packet_str
= ofp_dp_packet_to_string(packet_
);
5019 odp_flow_key_format(key
.data
, key
.size
, &ds
);
5021 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
5022 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
5024 ofpbuf_uninit(&key
);
5030 return dp
->upcall_cb(packet_
, flow
, ufid
, pmd
->core_id
, type
, userdata
,
5031 actions
, wc
, put_actions
, dp
->upcall_aux
);
5034 static inline uint32_t
5035 dpif_netdev_packet_get_rss_hash_orig_pkt(struct dp_packet
*packet
,
5036 const struct miniflow
*mf
)
5040 if (OVS_LIKELY(dp_packet_rss_valid(packet
))) {
5041 hash
= dp_packet_get_rss_hash(packet
);
5043 hash
= miniflow_hash_5tuple(mf
, 0);
5044 dp_packet_set_rss_hash(packet
, hash
);
5050 static inline uint32_t
5051 dpif_netdev_packet_get_rss_hash(struct dp_packet
*packet
,
5052 const struct miniflow
*mf
)
5054 uint32_t hash
, recirc_depth
;
5056 if (OVS_LIKELY(dp_packet_rss_valid(packet
))) {
5057 hash
= dp_packet_get_rss_hash(packet
);
5059 hash
= miniflow_hash_5tuple(mf
, 0);
5060 dp_packet_set_rss_hash(packet
, hash
);
5063 /* The RSS hash must account for the recirculation depth to avoid
5064 * collisions in the exact match cache */
5065 recirc_depth
= *recirc_depth_get_unsafe();
5066 if (OVS_UNLIKELY(recirc_depth
)) {
5067 hash
= hash_finish(hash
, recirc_depth
);
5068 dp_packet_set_rss_hash(packet
, hash
);
5073 struct packet_batch_per_flow
{
5074 unsigned int byte_count
;
5076 struct dp_netdev_flow
*flow
;
5078 struct dp_packet_batch array
;
5082 packet_batch_per_flow_update(struct packet_batch_per_flow
*batch
,
5083 struct dp_packet
*packet
,
5084 const struct miniflow
*mf
)
5086 batch
->byte_count
+= dp_packet_size(packet
);
5087 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
5088 batch
->array
.packets
[batch
->array
.count
++] = packet
;
5092 packet_batch_per_flow_init(struct packet_batch_per_flow
*batch
,
5093 struct dp_netdev_flow
*flow
)
5095 flow
->batch
= batch
;
5098 dp_packet_batch_init(&batch
->array
);
5099 batch
->byte_count
= 0;
5100 batch
->tcp_flags
= 0;
5104 packet_batch_per_flow_execute(struct packet_batch_per_flow
*batch
,
5105 struct dp_netdev_pmd_thread
*pmd
)
5107 struct dp_netdev_actions
*actions
;
5108 struct dp_netdev_flow
*flow
= batch
->flow
;
5110 dp_netdev_flow_used(flow
, batch
->array
.count
, batch
->byte_count
,
5111 batch
->tcp_flags
, pmd
->ctx
.now
/ 1000);
5113 actions
= dp_netdev_flow_get_actions(flow
);
5115 dp_netdev_execute_actions(pmd
, &batch
->array
, true, &flow
->flow
,
5116 actions
->actions
, actions
->size
);
5120 dp_netdev_queue_batches(struct dp_packet
*pkt
,
5121 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
5122 struct packet_batch_per_flow
*batches
,
5125 struct packet_batch_per_flow
*batch
= flow
->batch
;
5127 if (OVS_UNLIKELY(!batch
)) {
5128 batch
= &batches
[(*n_batches
)++];
5129 packet_batch_per_flow_init(batch
, flow
);
5132 packet_batch_per_flow_update(batch
, pkt
, mf
);
5135 /* Try to process all ('cnt') the 'packets' using only the exact match cache
5136 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
5137 * miniflow is copied into 'keys' and the packet pointer is moved at the
5138 * beginning of the 'packets' array.
5140 * The function returns the number of packets that needs to be processed in the
5141 * 'packets' array (they have been moved to the beginning of the vector).
5143 * For performance reasons a caller may choose not to initialize the metadata
5144 * in 'packets_'. If 'md_is_valid' is false, the metadata in 'packets'
5145 * is not valid and must be initialized by this function using 'port_no'.
5146 * If 'md_is_valid' is true, the metadata is already valid and 'port_no'
5149 static inline size_t
5150 emc_processing(struct dp_netdev_pmd_thread
*pmd
,
5151 struct dp_packet_batch
*packets_
,
5152 struct netdev_flow_key
*keys
,
5153 struct packet_batch_per_flow batches
[], size_t *n_batches
,
5154 bool md_is_valid
, odp_port_t port_no
)
5156 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
5157 struct netdev_flow_key
*key
= &keys
[0];
5158 size_t n_missed
= 0, n_dropped
= 0;
5159 struct dp_packet
*packet
;
5160 const size_t cnt
= dp_packet_batch_size(packets_
);
5164 atomic_read_relaxed(&pmd
->dp
->emc_insert_min
, &cur_min
);
5165 pmd_perf_update_counter(&pmd
->perf_stats
,
5166 md_is_valid
? PMD_STAT_RECIRC
: PMD_STAT_RECV
,
5169 DP_PACKET_BATCH_REFILL_FOR_EACH (i
, cnt
, packet
, packets_
) {
5170 struct dp_netdev_flow
*flow
;
5172 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
5173 dp_packet_delete(packet
);
5179 struct dp_packet
**packets
= packets_
->packets
;
5180 /* Prefetch next packet data and metadata. */
5181 OVS_PREFETCH(dp_packet_data(packets
[i
+1]));
5182 pkt_metadata_prefetch_init(&packets
[i
+1]->md
);
5186 pkt_metadata_init(&packet
->md
, port_no
);
5188 miniflow_extract(packet
, &key
->mf
);
5189 key
->len
= 0; /* Not computed yet. */
5190 /* If EMC is disabled skip hash computation and emc_lookup */
5193 key
->hash
= dpif_netdev_packet_get_rss_hash_orig_pkt(packet
,
5196 key
->hash
= dpif_netdev_packet_get_rss_hash(packet
, &key
->mf
);
5198 flow
= emc_lookup(flow_cache
, key
);
5202 if (OVS_LIKELY(flow
)) {
5203 dp_netdev_queue_batches(packet
, flow
, &key
->mf
, batches
,
5206 /* Exact match cache missed. Group missed packets together at
5207 * the beginning of the 'packets' array. */
5208 dp_packet_batch_refill(packets_
, packet
, i
);
5209 /* 'key[n_missed]' contains the key of the current packet and it
5210 * must be returned to the caller. The next key should be extracted
5211 * to 'keys[n_missed + 1]'. */
5212 key
= &keys
[++n_missed
];
5216 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_EXACT_HIT
,
5217 cnt
- n_dropped
- n_missed
);
5219 return dp_packet_batch_size(packets_
);
5223 handle_packet_upcall(struct dp_netdev_pmd_thread
*pmd
,
5224 struct dp_packet
*packet
,
5225 const struct netdev_flow_key
*key
,
5226 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
5228 struct ofpbuf
*add_actions
;
5229 struct dp_packet_batch b
;
5233 uint64_t cycles
= cycles_counter_update(&pmd
->perf_stats
);
5235 match
.tun_md
.valid
= false;
5236 miniflow_expand(&key
->mf
, &match
.flow
);
5238 ofpbuf_clear(actions
);
5239 ofpbuf_clear(put_actions
);
5241 dpif_flow_hash(pmd
->dp
->dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
5242 error
= dp_netdev_upcall(pmd
, packet
, &match
.flow
, &match
.wc
,
5243 &ufid
, DPIF_UC_MISS
, NULL
, actions
,
5245 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
5246 dp_packet_delete(packet
);
5250 /* The Netlink encoding of datapath flow keys cannot express
5251 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
5252 * tag is interpreted as exact match on the fact that there is no
5253 * VLAN. Unless we refactor a lot of code that translates between
5254 * Netlink and struct flow representations, we have to do the same
5256 if (!match
.wc
.masks
.vlans
[0].tci
) {
5257 match
.wc
.masks
.vlans
[0].tci
= htons(0xffff);
5260 /* We can't allow the packet batching in the next loop to execute
5261 * the actions. Otherwise, if there are any slow path actions,
5262 * we'll send the packet up twice. */
5263 dp_packet_batch_init_packet(&b
, packet
);
5264 dp_netdev_execute_actions(pmd
, &b
, true, &match
.flow
,
5265 actions
->data
, actions
->size
);
5267 add_actions
= put_actions
->size
? put_actions
: actions
;
5268 if (OVS_LIKELY(error
!= ENOSPC
)) {
5269 struct dp_netdev_flow
*netdev_flow
;
5271 /* XXX: There's a race window where a flow covering this packet
5272 * could have already been installed since we last did the flow
5273 * lookup before upcall. This could be solved by moving the
5274 * mutex lock outside the loop, but that's an awful long time
5275 * to be locking everyone out of making flow installs. If we
5276 * move to a per-core classifier, it would be reasonable. */
5277 ovs_mutex_lock(&pmd
->flow_mutex
);
5278 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
, NULL
);
5279 if (OVS_LIKELY(!netdev_flow
)) {
5280 netdev_flow
= dp_netdev_flow_add(pmd
, &match
, &ufid
,
5284 ovs_mutex_unlock(&pmd
->flow_mutex
);
5285 emc_probabilistic_insert(pmd
, key
, netdev_flow
);
5287 if (pmd_perf_metrics_enabled(pmd
)) {
5288 /* Update upcall stats. */
5289 cycles
= cycles_counter_update(&pmd
->perf_stats
) - cycles
;
5290 struct pmd_perf_stats
*s
= &pmd
->perf_stats
;
5291 s
->current
.upcalls
++;
5292 s
->current
.upcall_cycles
+= cycles
;
5293 histogram_add_sample(&s
->cycles_per_upcall
, cycles
);
5299 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
5300 struct dp_packet_batch
*packets_
,
5301 struct netdev_flow_key
*keys
,
5302 struct packet_batch_per_flow batches
[],
5306 const size_t cnt
= dp_packet_batch_size(packets_
);
5307 #if !defined(__CHECKER__) && !defined(_WIN32)
5308 const size_t PKT_ARRAY_SIZE
= cnt
;
5310 /* Sparse or MSVC doesn't like variable length array. */
5311 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
5313 struct dp_packet
*packet
;
5315 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
5316 struct dp_netdev
*dp
= pmd
->dp
;
5317 int upcall_ok_cnt
= 0, upcall_fail_cnt
= 0;
5318 int lookup_cnt
= 0, add_lookup_cnt
;
5321 for (size_t i
= 0; i
< cnt
; i
++) {
5322 /* Key length is needed in all the cases, hash computed on demand. */
5323 keys
[i
].len
= netdev_flow_key_size(miniflow_n_values(&keys
[i
].mf
));
5325 /* Get the classifier for the in_port */
5326 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
5327 if (OVS_LIKELY(cls
)) {
5328 any_miss
= !dpcls_lookup(cls
, keys
, rules
, cnt
, &lookup_cnt
);
5331 memset(rules
, 0, sizeof(rules
));
5333 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
5334 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
5335 struct ofpbuf actions
, put_actions
;
5337 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
5338 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
5340 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5341 struct dp_netdev_flow
*netdev_flow
;
5343 if (OVS_LIKELY(rules
[i
])) {
5347 /* It's possible that an earlier slow path execution installed
5348 * a rule covering this flow. In this case, it's a lot cheaper
5349 * to catch it here than execute a miss. */
5350 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
],
5353 lookup_cnt
+= add_lookup_cnt
;
5354 rules
[i
] = &netdev_flow
->cr
;
5358 int error
= handle_packet_upcall(pmd
, packet
, &keys
[i
],
5359 &actions
, &put_actions
);
5361 if (OVS_UNLIKELY(error
)) {
5368 ofpbuf_uninit(&actions
);
5369 ofpbuf_uninit(&put_actions
);
5370 fat_rwlock_unlock(&dp
->upcall_rwlock
);
5371 } else if (OVS_UNLIKELY(any_miss
)) {
5372 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5373 if (OVS_UNLIKELY(!rules
[i
])) {
5374 dp_packet_delete(packet
);
5380 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5381 struct dp_netdev_flow
*flow
;
5383 if (OVS_UNLIKELY(!rules
[i
])) {
5387 flow
= dp_netdev_flow_cast(rules
[i
]);
5389 emc_probabilistic_insert(pmd
, &keys
[i
], flow
);
5390 dp_netdev_queue_batches(packet
, flow
, &keys
[i
].mf
, batches
, n_batches
);
5393 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_MASKED_HIT
,
5394 cnt
- upcall_ok_cnt
- upcall_fail_cnt
);
5395 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_MASKED_LOOKUP
,
5397 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_MISS
,
5399 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_LOST
,
5403 /* Packets enter the datapath from a port (or from recirculation) here.
5405 * When 'md_is_valid' is true the metadata in 'packets' are already valid.
5406 * When false the metadata in 'packets' need to be initialized. */
5408 dp_netdev_input__(struct dp_netdev_pmd_thread
*pmd
,
5409 struct dp_packet_batch
*packets
,
5410 bool md_is_valid
, odp_port_t port_no
)
5412 #if !defined(__CHECKER__) && !defined(_WIN32)
5413 const size_t PKT_ARRAY_SIZE
= dp_packet_batch_size(packets
);
5415 /* Sparse or MSVC doesn't like variable length array. */
5416 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
5418 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
)
5419 struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
5420 struct packet_batch_per_flow batches
[PKT_ARRAY_SIZE
];
5425 emc_processing(pmd
, packets
, keys
, batches
, &n_batches
,
5426 md_is_valid
, port_no
);
5427 if (!dp_packet_batch_is_empty(packets
)) {
5428 /* Get ingress port from first packet's metadata. */
5429 in_port
= packets
->packets
[0]->md
.in_port
.odp_port
;
5430 fast_path_processing(pmd
, packets
, keys
,
5431 batches
, &n_batches
, in_port
);
5434 /* All the flow batches need to be reset before any call to
5435 * packet_batch_per_flow_execute() as it could potentially trigger
5436 * recirculation. When a packet matching flow ‘j’ happens to be
5437 * recirculated, the nested call to dp_netdev_input__() could potentially
5438 * classify the packet as matching another flow - say 'k'. It could happen
5439 * that in the previous call to dp_netdev_input__() that same flow 'k' had
5440 * already its own batches[k] still waiting to be served. So if its
5441 * ‘batch’ member is not reset, the recirculated packet would be wrongly
5442 * appended to batches[k] of the 1st call to dp_netdev_input__(). */
5444 for (i
= 0; i
< n_batches
; i
++) {
5445 batches
[i
].flow
->batch
= NULL
;
5448 for (i
= 0; i
< n_batches
; i
++) {
5449 packet_batch_per_flow_execute(&batches
[i
], pmd
);
5454 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
5455 struct dp_packet_batch
*packets
,
5458 dp_netdev_input__(pmd
, packets
, false, port_no
);
5462 dp_netdev_recirculate(struct dp_netdev_pmd_thread
*pmd
,
5463 struct dp_packet_batch
*packets
)
5465 dp_netdev_input__(pmd
, packets
, true, 0);
5468 struct dp_netdev_execute_aux
{
5469 struct dp_netdev_pmd_thread
*pmd
;
5470 const struct flow
*flow
;
5474 dpif_netdev_register_dp_purge_cb(struct dpif
*dpif
, dp_purge_callback
*cb
,
5477 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
5478 dp
->dp_purge_aux
= aux
;
5479 dp
->dp_purge_cb
= cb
;
5483 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
5486 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
5487 dp
->upcall_aux
= aux
;
5492 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
5496 struct dp_netdev_port
*port
;
5499 HMAP_FOR_EACH (tx
, node
, &pmd
->send_port_cache
) {
5500 if (!tx
->port
->dynamic_txqs
) {
5503 interval
= pmd
->ctx
.now
- tx
->last_used
;
5504 if (tx
->qid
>= 0 && (purge
|| interval
>= XPS_TIMEOUT
)) {
5506 ovs_mutex_lock(&port
->txq_used_mutex
);
5507 port
->txq_used
[tx
->qid
]--;
5508 ovs_mutex_unlock(&port
->txq_used_mutex
);
5515 dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
5518 struct dp_netdev_port
*port
;
5520 int i
, min_cnt
, min_qid
;
5522 interval
= pmd
->ctx
.now
- tx
->last_used
;
5523 tx
->last_used
= pmd
->ctx
.now
;
5525 if (OVS_LIKELY(tx
->qid
>= 0 && interval
< XPS_TIMEOUT
)) {
5531 ovs_mutex_lock(&port
->txq_used_mutex
);
5533 port
->txq_used
[tx
->qid
]--;
5539 for (i
= 0; i
< netdev_n_txq(port
->netdev
); i
++) {
5540 if (port
->txq_used
[i
] < min_cnt
|| min_cnt
== -1) {
5541 min_cnt
= port
->txq_used
[i
];
5546 port
->txq_used
[min_qid
]++;
5549 ovs_mutex_unlock(&port
->txq_used_mutex
);
5551 dpif_netdev_xps_revalidate_pmd(pmd
, false);
5553 VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
5554 pmd
->core_id
, tx
->qid
, netdev_get_name(tx
->port
->netdev
));
5558 static struct tx_port
*
5559 pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
5562 return tx_port_lookup(&pmd
->tnl_port_cache
, port_no
);
5565 static struct tx_port
*
5566 pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
5569 return tx_port_lookup(&pmd
->send_port_cache
, port_no
);
5573 push_tnl_action(const struct dp_netdev_pmd_thread
*pmd
,
5574 const struct nlattr
*attr
,
5575 struct dp_packet_batch
*batch
)
5577 struct tx_port
*tun_port
;
5578 const struct ovs_action_push_tnl
*data
;
5581 data
= nl_attr_get(attr
);
5583 tun_port
= pmd_tnl_port_cache_lookup(pmd
, data
->tnl_port
);
5588 err
= netdev_push_header(tun_port
->port
->netdev
, batch
, data
);
5593 dp_packet_delete_batch(batch
, true);
5598 dp_execute_userspace_action(struct dp_netdev_pmd_thread
*pmd
,
5599 struct dp_packet
*packet
, bool should_steal
,
5600 struct flow
*flow
, ovs_u128
*ufid
,
5601 struct ofpbuf
*actions
,
5602 const struct nlattr
*userdata
)
5604 struct dp_packet_batch b
;
5607 ofpbuf_clear(actions
);
5609 error
= dp_netdev_upcall(pmd
, packet
, flow
, NULL
, ufid
,
5610 DPIF_UC_ACTION
, userdata
, actions
,
5612 if (!error
|| error
== ENOSPC
) {
5613 dp_packet_batch_init_packet(&b
, packet
);
5614 dp_netdev_execute_actions(pmd
, &b
, should_steal
, flow
,
5615 actions
->data
, actions
->size
);
5616 } else if (should_steal
) {
5617 dp_packet_delete(packet
);
5622 dp_execute_cb(void *aux_
, struct dp_packet_batch
*packets_
,
5623 const struct nlattr
*a
, bool should_steal
)
5624 OVS_NO_THREAD_SAFETY_ANALYSIS
5626 struct dp_netdev_execute_aux
*aux
= aux_
;
5627 uint32_t *depth
= recirc_depth_get();
5628 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
5629 struct dp_netdev
*dp
= pmd
->dp
;
5630 int type
= nl_attr_type(a
);
5633 switch ((enum ovs_action_attr
)type
) {
5634 case OVS_ACTION_ATTR_OUTPUT
:
5635 p
= pmd_send_port_cache_lookup(pmd
, nl_attr_get_odp_port(a
));
5636 if (OVS_LIKELY(p
)) {
5637 struct dp_packet
*packet
;
5638 struct dp_packet_batch out
;
5640 if (!should_steal
) {
5641 dp_packet_batch_clone(&out
, packets_
);
5642 dp_packet_batch_reset_cutlen(packets_
);
5645 dp_packet_batch_apply_cutlen(packets_
);
5648 if (OVS_UNLIKELY(!dp_packet_batch_is_empty(&p
->output_pkts
)
5649 && packets_
->packets
[0]->source
5650 != p
->output_pkts
.packets
[0]->source
)) {
5651 /* XXX: netdev-dpdk assumes that all packets in a single
5652 * output batch has the same source. Flush here to
5653 * avoid memory access issues. */
5654 dp_netdev_pmd_flush_output_on_port(pmd
, p
);
5657 if (dp_packet_batch_size(&p
->output_pkts
)
5658 + dp_packet_batch_size(packets_
) > NETDEV_MAX_BURST
) {
5659 /* Flush here to avoid overflow. */
5660 dp_netdev_pmd_flush_output_on_port(pmd
, p
);
5663 if (dp_packet_batch_is_empty(&p
->output_pkts
)) {
5664 pmd
->n_output_batches
++;
5667 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5668 p
->output_pkts_rxqs
[dp_packet_batch_size(&p
->output_pkts
)] =
5670 dp_packet_batch_add(&p
->output_pkts
, packet
);
5676 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
5678 /* We're requested to push tunnel header, but also we need to take
5679 * the ownership of these packets. Thus, we can avoid performing
5680 * the action, because the caller will not use the result anyway.
5681 * Just break to free the batch. */
5684 dp_packet_batch_apply_cutlen(packets_
);
5685 push_tnl_action(pmd
, a
, packets_
);
5688 case OVS_ACTION_ATTR_TUNNEL_POP
:
5689 if (*depth
< MAX_RECIRC_DEPTH
) {
5690 struct dp_packet_batch
*orig_packets_
= packets_
;
5691 odp_port_t portno
= nl_attr_get_odp_port(a
);
5693 p
= pmd_tnl_port_cache_lookup(pmd
, portno
);
5695 struct dp_packet_batch tnl_pkt
;
5697 if (!should_steal
) {
5698 dp_packet_batch_clone(&tnl_pkt
, packets_
);
5699 packets_
= &tnl_pkt
;
5700 dp_packet_batch_reset_cutlen(orig_packets_
);
5703 dp_packet_batch_apply_cutlen(packets_
);
5705 netdev_pop_header(p
->port
->netdev
, packets_
);
5706 if (dp_packet_batch_is_empty(packets_
)) {
5710 struct dp_packet
*packet
;
5711 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5712 packet
->md
.in_port
.odp_port
= portno
;
5716 dp_netdev_recirculate(pmd
, packets_
);
5723 case OVS_ACTION_ATTR_USERSPACE
:
5724 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
5725 struct dp_packet_batch
*orig_packets_
= packets_
;
5726 const struct nlattr
*userdata
;
5727 struct dp_packet_batch usr_pkt
;
5728 struct ofpbuf actions
;
5733 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
5734 ofpbuf_init(&actions
, 0);
5736 if (packets_
->trunc
) {
5737 if (!should_steal
) {
5738 dp_packet_batch_clone(&usr_pkt
, packets_
);
5739 packets_
= &usr_pkt
;
5741 dp_packet_batch_reset_cutlen(orig_packets_
);
5744 dp_packet_batch_apply_cutlen(packets_
);
5747 struct dp_packet
*packet
;
5748 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5749 flow_extract(packet
, &flow
);
5750 dpif_flow_hash(dp
->dpif
, &flow
, sizeof flow
, &ufid
);
5751 dp_execute_userspace_action(pmd
, packet
, should_steal
, &flow
,
5752 &ufid
, &actions
, userdata
);
5756 dp_packet_delete_batch(packets_
, true);
5759 ofpbuf_uninit(&actions
);
5760 fat_rwlock_unlock(&dp
->upcall_rwlock
);
5766 case OVS_ACTION_ATTR_RECIRC
:
5767 if (*depth
< MAX_RECIRC_DEPTH
) {
5768 struct dp_packet_batch recirc_pkts
;
5770 if (!should_steal
) {
5771 dp_packet_batch_clone(&recirc_pkts
, packets_
);
5772 packets_
= &recirc_pkts
;
5775 struct dp_packet
*packet
;
5776 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5777 packet
->md
.recirc_id
= nl_attr_get_u32(a
);
5781 dp_netdev_recirculate(pmd
, packets_
);
5787 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
5790 case OVS_ACTION_ATTR_CT
: {
5791 const struct nlattr
*b
;
5793 bool commit
= false;
5796 const char *helper
= NULL
;
5797 const uint32_t *setmark
= NULL
;
5798 const struct ovs_key_ct_labels
*setlabel
= NULL
;
5799 struct nat_action_info_t nat_action_info
;
5800 struct nat_action_info_t
*nat_action_info_ref
= NULL
;
5801 bool nat_config
= false;
5803 NL_ATTR_FOR_EACH_UNSAFE (b
, left
, nl_attr_get(a
),
5804 nl_attr_get_size(a
)) {
5805 enum ovs_ct_attr sub_type
= nl_attr_type(b
);
5808 case OVS_CT_ATTR_FORCE_COMMIT
:
5811 case OVS_CT_ATTR_COMMIT
:
5814 case OVS_CT_ATTR_ZONE
:
5815 zone
= nl_attr_get_u16(b
);
5817 case OVS_CT_ATTR_HELPER
:
5818 helper
= nl_attr_get_string(b
);
5820 case OVS_CT_ATTR_MARK
:
5821 setmark
= nl_attr_get(b
);
5823 case OVS_CT_ATTR_LABELS
:
5824 setlabel
= nl_attr_get(b
);
5826 case OVS_CT_ATTR_EVENTMASK
:
5827 /* Silently ignored, as userspace datapath does not generate
5828 * netlink events. */
5830 case OVS_CT_ATTR_NAT
: {
5831 const struct nlattr
*b_nest
;
5832 unsigned int left_nest
;
5833 bool ip_min_specified
= false;
5834 bool proto_num_min_specified
= false;
5835 bool ip_max_specified
= false;
5836 bool proto_num_max_specified
= false;
5837 memset(&nat_action_info
, 0, sizeof nat_action_info
);
5838 nat_action_info_ref
= &nat_action_info
;
5840 NL_NESTED_FOR_EACH_UNSAFE (b_nest
, left_nest
, b
) {
5841 enum ovs_nat_attr sub_type_nest
= nl_attr_type(b_nest
);
5843 switch (sub_type_nest
) {
5844 case OVS_NAT_ATTR_SRC
:
5845 case OVS_NAT_ATTR_DST
:
5847 nat_action_info
.nat_action
|=
5848 ((sub_type_nest
== OVS_NAT_ATTR_SRC
)
5849 ? NAT_ACTION_SRC
: NAT_ACTION_DST
);
5851 case OVS_NAT_ATTR_IP_MIN
:
5852 memcpy(&nat_action_info
.min_addr
,
5853 nl_attr_get(b_nest
),
5854 nl_attr_get_size(b_nest
));
5855 ip_min_specified
= true;
5857 case OVS_NAT_ATTR_IP_MAX
:
5858 memcpy(&nat_action_info
.max_addr
,
5859 nl_attr_get(b_nest
),
5860 nl_attr_get_size(b_nest
));
5861 ip_max_specified
= true;
5863 case OVS_NAT_ATTR_PROTO_MIN
:
5864 nat_action_info
.min_port
=
5865 nl_attr_get_u16(b_nest
);
5866 proto_num_min_specified
= true;
5868 case OVS_NAT_ATTR_PROTO_MAX
:
5869 nat_action_info
.max_port
=
5870 nl_attr_get_u16(b_nest
);
5871 proto_num_max_specified
= true;
5873 case OVS_NAT_ATTR_PERSISTENT
:
5874 case OVS_NAT_ATTR_PROTO_HASH
:
5875 case OVS_NAT_ATTR_PROTO_RANDOM
:
5877 case OVS_NAT_ATTR_UNSPEC
:
5878 case __OVS_NAT_ATTR_MAX
:
5883 if (ip_min_specified
&& !ip_max_specified
) {
5884 nat_action_info
.max_addr
= nat_action_info
.min_addr
;
5886 if (proto_num_min_specified
&& !proto_num_max_specified
) {
5887 nat_action_info
.max_port
= nat_action_info
.min_port
;
5889 if (proto_num_min_specified
|| proto_num_max_specified
) {
5890 if (nat_action_info
.nat_action
& NAT_ACTION_SRC
) {
5891 nat_action_info
.nat_action
|= NAT_ACTION_SRC_PORT
;
5892 } else if (nat_action_info
.nat_action
& NAT_ACTION_DST
) {
5893 nat_action_info
.nat_action
|= NAT_ACTION_DST_PORT
;
5898 case OVS_CT_ATTR_UNSPEC
:
5899 case __OVS_CT_ATTR_MAX
:
5904 /* We won't be able to function properly in this case, hence
5905 * complain loudly. */
5906 if (nat_config
&& !commit
) {
5907 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 5);
5908 VLOG_WARN_RL(&rl
, "NAT specified without commit.");
5911 conntrack_execute(&dp
->conntrack
, packets_
, aux
->flow
->dl_type
, force
,
5912 commit
, zone
, setmark
, setlabel
, aux
->flow
->tp_src
,
5913 aux
->flow
->tp_dst
, helper
, nat_action_info_ref
,
5914 pmd
->ctx
.now
/ 1000);
5918 case OVS_ACTION_ATTR_METER
:
5919 dp_netdev_run_meter(pmd
->dp
, packets_
, nl_attr_get_u32(a
),
5923 case OVS_ACTION_ATTR_PUSH_VLAN
:
5924 case OVS_ACTION_ATTR_POP_VLAN
:
5925 case OVS_ACTION_ATTR_PUSH_MPLS
:
5926 case OVS_ACTION_ATTR_POP_MPLS
:
5927 case OVS_ACTION_ATTR_SET
:
5928 case OVS_ACTION_ATTR_SET_MASKED
:
5929 case OVS_ACTION_ATTR_SAMPLE
:
5930 case OVS_ACTION_ATTR_HASH
:
5931 case OVS_ACTION_ATTR_UNSPEC
:
5932 case OVS_ACTION_ATTR_TRUNC
:
5933 case OVS_ACTION_ATTR_PUSH_ETH
:
5934 case OVS_ACTION_ATTR_POP_ETH
:
5935 case OVS_ACTION_ATTR_CLONE
:
5936 case OVS_ACTION_ATTR_PUSH_NSH
:
5937 case OVS_ACTION_ATTR_POP_NSH
:
5938 case OVS_ACTION_ATTR_CT_CLEAR
:
5939 case __OVS_ACTION_ATTR_MAX
:
5943 dp_packet_delete_batch(packets_
, should_steal
);
5947 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
5948 struct dp_packet_batch
*packets
,
5949 bool should_steal
, const struct flow
*flow
,
5950 const struct nlattr
*actions
, size_t actions_len
)
5952 struct dp_netdev_execute_aux aux
= { pmd
, flow
};
5954 odp_execute_actions(&aux
, packets
, should_steal
, actions
,
5955 actions_len
, dp_execute_cb
);
5958 struct dp_netdev_ct_dump
{
5959 struct ct_dpif_dump_state up
;
5960 struct conntrack_dump dump
;
5961 struct conntrack
*ct
;
5962 struct dp_netdev
*dp
;
5966 dpif_netdev_ct_dump_start(struct dpif
*dpif
, struct ct_dpif_dump_state
**dump_
,
5967 const uint16_t *pzone
, int *ptot_bkts
)
5969 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
5970 struct dp_netdev_ct_dump
*dump
;
5972 dump
= xzalloc(sizeof *dump
);
5974 dump
->ct
= &dp
->conntrack
;
5976 conntrack_dump_start(&dp
->conntrack
, &dump
->dump
, pzone
, ptot_bkts
);
5984 dpif_netdev_ct_dump_next(struct dpif
*dpif OVS_UNUSED
,
5985 struct ct_dpif_dump_state
*dump_
,
5986 struct ct_dpif_entry
*entry
)
5988 struct dp_netdev_ct_dump
*dump
;
5990 INIT_CONTAINER(dump
, dump_
, up
);
5992 return conntrack_dump_next(&dump
->dump
, entry
);
5996 dpif_netdev_ct_dump_done(struct dpif
*dpif OVS_UNUSED
,
5997 struct ct_dpif_dump_state
*dump_
)
5999 struct dp_netdev_ct_dump
*dump
;
6002 INIT_CONTAINER(dump
, dump_
, up
);
6004 err
= conntrack_dump_done(&dump
->dump
);
6012 dpif_netdev_ct_flush(struct dpif
*dpif
, const uint16_t *zone
,
6013 const struct ct_dpif_tuple
*tuple
)
6015 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6018 return conntrack_flush_tuple(&dp
->conntrack
, tuple
, zone
? *zone
: 0);
6020 return conntrack_flush(&dp
->conntrack
, zone
);
6024 dpif_netdev_ct_set_maxconns(struct dpif
*dpif
, uint32_t maxconns
)
6026 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6028 return conntrack_set_maxconns(&dp
->conntrack
, maxconns
);
6032 dpif_netdev_ct_get_maxconns(struct dpif
*dpif
, uint32_t *maxconns
)
6034 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6036 return conntrack_get_maxconns(&dp
->conntrack
, maxconns
);
6040 dpif_netdev_ct_get_nconns(struct dpif
*dpif
, uint32_t *nconns
)
6042 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6044 return conntrack_get_nconns(&dp
->conntrack
, nconns
);
6047 const struct dpif_class dpif_netdev_class
= {
6050 dpif_netdev_enumerate
,
6051 dpif_netdev_port_open_type
,
6054 dpif_netdev_destroy
,
6057 dpif_netdev_get_stats
,
6058 dpif_netdev_port_add
,
6059 dpif_netdev_port_del
,
6060 dpif_netdev_port_set_config
,
6061 dpif_netdev_port_query_by_number
,
6062 dpif_netdev_port_query_by_name
,
6063 NULL
, /* port_get_pid */
6064 dpif_netdev_port_dump_start
,
6065 dpif_netdev_port_dump_next
,
6066 dpif_netdev_port_dump_done
,
6067 dpif_netdev_port_poll
,
6068 dpif_netdev_port_poll_wait
,
6069 dpif_netdev_flow_flush
,
6070 dpif_netdev_flow_dump_create
,
6071 dpif_netdev_flow_dump_destroy
,
6072 dpif_netdev_flow_dump_thread_create
,
6073 dpif_netdev_flow_dump_thread_destroy
,
6074 dpif_netdev_flow_dump_next
,
6075 dpif_netdev_operate
,
6076 NULL
, /* recv_set */
6077 NULL
, /* handlers_set */
6078 dpif_netdev_set_config
,
6079 dpif_netdev_queue_to_priority
,
6081 NULL
, /* recv_wait */
6082 NULL
, /* recv_purge */
6083 dpif_netdev_register_dp_purge_cb
,
6084 dpif_netdev_register_upcall_cb
,
6085 dpif_netdev_enable_upcall
,
6086 dpif_netdev_disable_upcall
,
6087 dpif_netdev_get_datapath_version
,
6088 dpif_netdev_ct_dump_start
,
6089 dpif_netdev_ct_dump_next
,
6090 dpif_netdev_ct_dump_done
,
6091 dpif_netdev_ct_flush
,
6092 dpif_netdev_ct_set_maxconns
,
6093 dpif_netdev_ct_get_maxconns
,
6094 dpif_netdev_ct_get_nconns
,
6095 dpif_netdev_meter_get_features
,
6096 dpif_netdev_meter_set
,
6097 dpif_netdev_meter_get
,
6098 dpif_netdev_meter_del
,
6102 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
6103 const char *argv
[], void *aux OVS_UNUSED
)
6105 struct dp_netdev_port
*port
;
6106 struct dp_netdev
*dp
;
6109 ovs_mutex_lock(&dp_netdev_mutex
);
6110 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
6111 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
6112 ovs_mutex_unlock(&dp_netdev_mutex
);
6113 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
6116 ovs_refcount_ref(&dp
->ref_cnt
);
6117 ovs_mutex_unlock(&dp_netdev_mutex
);
6119 ovs_mutex_lock(&dp
->port_mutex
);
6120 if (get_port_by_name(dp
, argv
[2], &port
)) {
6121 unixctl_command_reply_error(conn
, "unknown port");
6125 port_no
= u32_to_odp(atoi(argv
[3]));
6126 if (!port_no
|| port_no
== ODPP_NONE
) {
6127 unixctl_command_reply_error(conn
, "bad port number");
6130 if (dp_netdev_lookup_port(dp
, port_no
)) {
6131 unixctl_command_reply_error(conn
, "port number already in use");
6136 hmap_remove(&dp
->ports
, &port
->node
);
6137 reconfigure_datapath(dp
);
6139 /* Reinsert with new port number. */
6140 port
->port_no
= port_no
;
6141 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
6142 reconfigure_datapath(dp
);
6144 seq_change(dp
->port_seq
);
6145 unixctl_command_reply(conn
, NULL
);
6148 ovs_mutex_unlock(&dp
->port_mutex
);
6149 dp_netdev_unref(dp
);
6153 dpif_dummy_register__(const char *type
)
6155 struct dpif_class
*class;
6157 class = xmalloc(sizeof *class);
6158 *class = dpif_netdev_class
;
6159 class->type
= xstrdup(type
);
6160 dp_register_provider(class);
6164 dpif_dummy_override(const char *type
)
6169 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
6170 * a userland-only build. It's useful for testsuite.
6172 error
= dp_unregister_provider(type
);
6173 if (error
== 0 || error
== EAFNOSUPPORT
) {
6174 dpif_dummy_register__(type
);
6179 dpif_dummy_register(enum dummy_level level
)
6181 if (level
== DUMMY_OVERRIDE_ALL
) {
6186 dp_enumerate_types(&types
);
6187 SSET_FOR_EACH (type
, &types
) {
6188 dpif_dummy_override(type
);
6190 sset_destroy(&types
);
6191 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
6192 dpif_dummy_override("system");
6195 dpif_dummy_register__("dummy");
6197 unixctl_command_register("dpif-dummy/change-port-number",
6198 "dp port new-number",
6199 3, 3, dpif_dummy_change_port_number
, NULL
);
6202 /* Datapath Classifier. */
6204 /* A set of rules that all have the same fields wildcarded. */
6205 struct dpcls_subtable
{
6206 /* The fields are only used by writers. */
6207 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
6209 /* These fields are accessed by readers. */
6210 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
6211 uint32_t hit_cnt
; /* Number of match hits in subtable in current
6212 optimization interval. */
6213 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
6214 /* 'mask' must be the last field, additional space is allocated here. */
6217 /* Initializes 'cls' as a classifier that initially contains no classification
6220 dpcls_init(struct dpcls
*cls
)
6222 cmap_init(&cls
->subtables_map
);
6223 pvector_init(&cls
->subtables
);
6227 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
6229 VLOG_DBG("Destroying subtable %p for in_port %d", subtable
, cls
->in_port
);
6230 pvector_remove(&cls
->subtables
, subtable
);
6231 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
6232 subtable
->mask
.hash
);
6233 cmap_destroy(&subtable
->rules
);
6234 ovsrcu_postpone(free
, subtable
);
6237 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
6238 * caller's responsibility.
6239 * May only be called after all the readers have been terminated. */
6241 dpcls_destroy(struct dpcls
*cls
)
6244 struct dpcls_subtable
*subtable
;
6246 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
6247 ovs_assert(cmap_count(&subtable
->rules
) == 0);
6248 dpcls_destroy_subtable(cls
, subtable
);
6250 cmap_destroy(&cls
->subtables_map
);
6251 pvector_destroy(&cls
->subtables
);
6255 static struct dpcls_subtable
*
6256 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
6258 struct dpcls_subtable
*subtable
;
6260 /* Need to add one. */
6261 subtable
= xmalloc(sizeof *subtable
6262 - sizeof subtable
->mask
.mf
+ mask
->len
);
6263 cmap_init(&subtable
->rules
);
6264 subtable
->hit_cnt
= 0;
6265 netdev_flow_key_clone(&subtable
->mask
, mask
);
6266 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
6267 /* Add the new subtable at the end of the pvector (with no hits yet) */
6268 pvector_insert(&cls
->subtables
, subtable
, 0);
6269 VLOG_DBG("Creating %"PRIuSIZE
". subtable %p for in_port %d",
6270 cmap_count(&cls
->subtables_map
), subtable
, cls
->in_port
);
6271 pvector_publish(&cls
->subtables
);
6276 static inline struct dpcls_subtable
*
6277 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
6279 struct dpcls_subtable
*subtable
;
6281 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
6282 &cls
->subtables_map
) {
6283 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
6287 return dpcls_create_subtable(cls
, mask
);
6291 /* Periodically sort the dpcls subtable vectors according to hit counts */
6293 dpcls_sort_subtable_vector(struct dpcls
*cls
)
6295 struct pvector
*pvec
= &cls
->subtables
;
6296 struct dpcls_subtable
*subtable
;
6298 PVECTOR_FOR_EACH (subtable
, pvec
) {
6299 pvector_change_priority(pvec
, subtable
, subtable
->hit_cnt
);
6300 subtable
->hit_cnt
= 0;
6302 pvector_publish(pvec
);
6306 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
,
6307 struct polled_queue
*poll_list
, int poll_cnt
)
6311 if (pmd
->ctx
.now
> pmd
->rxq_next_cycle_store
) {
6313 /* Get the cycles that were used to process each queue and store. */
6314 for (unsigned i
= 0; i
< poll_cnt
; i
++) {
6315 uint64_t rxq_cyc_curr
= dp_netdev_rxq_get_cycles(poll_list
[i
].rxq
,
6316 RXQ_CYCLES_PROC_CURR
);
6317 dp_netdev_rxq_set_intrvl_cycles(poll_list
[i
].rxq
, rxq_cyc_curr
);
6318 dp_netdev_rxq_set_cycles(poll_list
[i
].rxq
, RXQ_CYCLES_PROC_CURR
,
6321 curr_tsc
= cycles_counter_update(&pmd
->perf_stats
);
6322 if (pmd
->intrvl_tsc_prev
) {
6323 /* There is a prev timestamp, store a new intrvl cycle count. */
6324 atomic_store_relaxed(&pmd
->intrvl_cycles
,
6325 curr_tsc
- pmd
->intrvl_tsc_prev
);
6327 pmd
->intrvl_tsc_prev
= curr_tsc
;
6328 /* Start new measuring interval */
6329 pmd
->rxq_next_cycle_store
= pmd
->ctx
.now
+ PMD_RXQ_INTERVAL_LEN
;
6332 if (pmd
->ctx
.now
> pmd
->next_optimization
) {
6333 /* Try to obtain the flow lock to block out revalidator threads.
6334 * If not possible, just try next time. */
6335 if (!ovs_mutex_trylock(&pmd
->flow_mutex
)) {
6336 /* Optimize each classifier */
6337 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
6338 dpcls_sort_subtable_vector(cls
);
6340 ovs_mutex_unlock(&pmd
->flow_mutex
);
6341 /* Start new measuring interval */
6342 pmd
->next_optimization
= pmd
->ctx
.now
6343 + DPCLS_OPTIMIZATION_INTERVAL
;
6348 /* Insert 'rule' into 'cls'. */
6350 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
6351 const struct netdev_flow_key
*mask
)
6353 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
6355 /* Refer to subtable's mask, also for later removal. */
6356 rule
->mask
= &subtable
->mask
;
6357 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
6360 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
6362 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
6364 struct dpcls_subtable
*subtable
;
6366 ovs_assert(rule
->mask
);
6368 /* Get subtable from reference in rule->mask. */
6369 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
6370 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
6372 /* Delete empty subtable. */
6373 dpcls_destroy_subtable(cls
, subtable
);
6374 pvector_publish(&cls
->subtables
);
6378 /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
6379 * in 'mask' the values in 'key' and 'target' are the same. */
6381 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
6382 const struct netdev_flow_key
*target
)
6384 const uint64_t *keyp
= miniflow_get_values(&rule
->flow
.mf
);
6385 const uint64_t *maskp
= miniflow_get_values(&rule
->mask
->mf
);
6388 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, target
, rule
->flow
.mf
.map
) {
6389 if (OVS_UNLIKELY((value
& *maskp
++) != *keyp
++)) {
6396 /* For each miniflow in 'keys' performs a classifier lookup writing the result
6397 * into the corresponding slot in 'rules'. If a particular entry in 'keys' is
6398 * NULL it is skipped.
6400 * This function is optimized for use in the userspace datapath and therefore
6401 * does not implement a lot of features available in the standard
6402 * classifier_lookup() function. Specifically, it does not implement
6403 * priorities, instead returning any rule which matches the flow.
6405 * Returns true if all miniflows found a corresponding rule. */
6407 dpcls_lookup(struct dpcls
*cls
, const struct netdev_flow_key keys
[],
6408 struct dpcls_rule
**rules
, const size_t cnt
,
6411 /* The received 'cnt' miniflows are the search-keys that will be processed
6412 * to find a matching entry into the available subtables.
6413 * The number of bits in map_type is equal to NETDEV_MAX_BURST. */
6414 typedef uint32_t map_type
;
6415 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
6416 BUILD_ASSERT_DECL(MAP_BITS
>= NETDEV_MAX_BURST
);
6418 struct dpcls_subtable
*subtable
;
6420 map_type keys_map
= TYPE_MAXIMUM(map_type
); /* Set all bits. */
6422 uint32_t hashes
[MAP_BITS
];
6423 const struct cmap_node
*nodes
[MAP_BITS
];
6425 if (cnt
!= MAP_BITS
) {
6426 keys_map
>>= MAP_BITS
- cnt
; /* Clear extra bits. */
6428 memset(rules
, 0, cnt
* sizeof *rules
);
6430 int lookups_match
= 0, subtable_pos
= 1;
6432 /* The Datapath classifier - aka dpcls - is composed of subtables.
6433 * Subtables are dynamically created as needed when new rules are inserted.
6434 * Each subtable collects rules with matches on a specific subset of packet
6435 * fields as defined by the subtable's mask. We proceed to process every
6436 * search-key against each subtable, but when a match is found for a
6437 * search-key, the search for that key can stop because the rules are
6438 * non-overlapping. */
6439 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
6442 /* Compute hashes for the remaining keys. Each search-key is
6443 * masked with the subtable's mask to avoid hashing the wildcarded
6445 ULLONG_FOR_EACH_1(i
, keys_map
) {
6446 hashes
[i
] = netdev_flow_key_hash_in_mask(&keys
[i
],
6450 found_map
= cmap_find_batch(&subtable
->rules
, keys_map
, hashes
, nodes
);
6451 /* Check results. When the i-th bit of found_map is set, it means
6452 * that a set of nodes with a matching hash value was found for the
6453 * i-th search-key. Due to possible hash collisions we need to check
6454 * which of the found rules, if any, really matches our masked
6456 ULLONG_FOR_EACH_1(i
, found_map
) {
6457 struct dpcls_rule
*rule
;
6459 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
6460 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, &keys
[i
]))) {
6462 /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap
6463 * within one second optimization interval. */
6464 subtable
->hit_cnt
++;
6465 lookups_match
+= subtable_pos
;
6469 /* None of the found rules was a match. Reset the i-th bit to
6470 * keep searching this key in the next subtable. */
6471 ULLONG_SET0(found_map
, i
); /* Did not match. */
6473 ; /* Keep Sparse happy. */
6475 keys_map
&= ~found_map
; /* Clear the found rules. */
6477 if (num_lookups_p
) {
6478 *num_lookups_p
= lookups_match
;
6480 return true; /* All found. */
6484 if (num_lookups_p
) {
6485 *num_lookups_p
= lookups_match
;
6487 return false; /* Some misses. */