2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
25 #include <sys/types.h>
26 #include <netinet/in.h>
30 #include <sys/ioctl.h>
31 #include <sys/socket.h>
37 #include "conntrack.h"
41 #include "dp-packet.h"
43 #include "dpif-netdev-perf.h"
44 #include "dpif-provider.h"
46 #include "fat-rwlock.h"
52 #include "netdev-provider.h"
53 #include "netdev-vport.h"
55 #include "odp-execute.h"
57 #include "openvswitch/dynamic-string.h"
58 #include "openvswitch/list.h"
59 #include "openvswitch/match.h"
60 #include "openvswitch/ofp-parse.h"
61 #include "openvswitch/ofp-print.h"
62 #include "openvswitch/ofpbuf.h"
63 #include "openvswitch/shash.h"
64 #include "openvswitch/vlog.h"
68 #include "openvswitch/poll-loop.h"
75 #include "tnl-neigh-cache.h"
76 #include "tnl-ports.h"
81 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
83 #define FLOW_DUMP_MAX_BATCH 50
84 /* Use per thread recirc_depth to prevent recirculation loop. */
85 #define MAX_RECIRC_DEPTH 6
86 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
88 /* Use instant packet send by default. */
89 #define DEFAULT_TX_FLUSH_INTERVAL 0
91 /* Configuration parameters. */
92 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
93 enum { MAX_METERS
= 65536 }; /* Maximum number of meters. */
94 enum { MAX_BANDS
= 8 }; /* Maximum number of bands / meter. */
95 enum { N_METER_LOCKS
= 64 }; /* Maximum number of meters. */
97 /* Protects against changes to 'dp_netdevs'. */
98 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
100 /* Contains all 'struct dp_netdev's. */
101 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
102 = SHASH_INITIALIZER(&dp_netdevs
);
104 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
106 #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \
107 | CS_INVALID | CS_REPLY_DIR | CS_TRACKED \
108 | CS_SRC_NAT | CS_DST_NAT)
109 #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK)
111 static struct odp_support dp_netdev_support
= {
112 .max_vlan_headers
= SIZE_MAX
,
113 .max_mpls_depth
= SIZE_MAX
,
119 .ct_state_nat
= true,
120 .ct_orig_tuple
= true,
121 .ct_orig_tuple6
= true,
124 /* Stores a miniflow with inline values */
126 struct netdev_flow_key
{
127 uint32_t hash
; /* Hash function differs for different users. */
128 uint32_t len
; /* Length of the following miniflow (incl. map). */
130 uint64_t buf
[FLOW_MAX_PACKET_U64S
];
133 /* EMC cache and SMC cache compose the datapath flow cache (DFC)
135 * Exact match cache for frequently used flows
137 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
138 * search its entries for a miniflow that matches exactly the miniflow of the
139 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
141 * A cache entry holds a reference to its 'dp_netdev_flow'.
143 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
144 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
145 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
146 * value is the index of a cache entry where the miniflow could be.
149 * Signature match cache (SMC)
151 * This cache stores a 16-bit signature for each flow without storing keys, and
152 * stores the corresponding 16-bit flow_table index to the 'dp_netdev_flow'.
153 * Each flow thus occupies 32bit which is much more memory efficient than EMC.
154 * SMC uses a set-associative design that each bucket contains
155 * SMC_ENTRY_PER_BUCKET number of entries.
156 * Since 16-bit flow_table index is used, if there are more than 2^16
157 * dp_netdev_flow, SMC will miss them that cannot be indexed by a 16-bit value.
163 * Each pmd_thread has its own private exact match cache.
164 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
167 #define EM_FLOW_HASH_SHIFT 13
168 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
169 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
170 #define EM_FLOW_HASH_SEGS 2
172 /* SMC uses a set-associative design. A bucket contains a set of entries that
173 * a flow item can occupy. For now, it uses one hash function rather than two
174 * as for the EMC design. */
175 #define SMC_ENTRY_PER_BUCKET 4
176 #define SMC_ENTRIES (1u << 20)
177 #define SMC_BUCKET_CNT (SMC_ENTRIES / SMC_ENTRY_PER_BUCKET)
178 #define SMC_MASK (SMC_BUCKET_CNT - 1)
180 /* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */
181 #define DEFAULT_EM_FLOW_INSERT_INV_PROB 100
182 #define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
183 DEFAULT_EM_FLOW_INSERT_INV_PROB)
186 struct dp_netdev_flow
*flow
;
187 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
191 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
192 int sweep_idx
; /* For emc_cache_slow_sweep(). */
196 uint16_t sig
[SMC_ENTRY_PER_BUCKET
];
197 uint16_t flow_idx
[SMC_ENTRY_PER_BUCKET
];
200 /* Signature match cache, differentiate from EMC cache */
202 struct smc_bucket buckets
[SMC_BUCKET_CNT
];
206 struct emc_cache emc_cache
;
207 struct smc_cache smc_cache
;
210 /* Iterate in the exact match cache through every entry that might contain a
211 * miniflow with hash 'HASH'. */
212 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
213 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
214 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
215 i__ < EM_FLOW_HASH_SEGS; \
216 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
218 /* Simple non-wildcarding single-priority classifier. */
220 /* Time in microseconds between successive optimizations of the dpcls
222 #define DPCLS_OPTIMIZATION_INTERVAL 1000000LL
224 /* Time in microseconds of the interval in which rxq processing cycles used
225 * in rxq to pmd assignments is measured and stored. */
226 #define PMD_RXQ_INTERVAL_LEN 10000000LL
228 /* Number of intervals for which cycles are stored
229 * and used during rxq to pmd assignment. */
230 #define PMD_RXQ_INTERVAL_MAX 6
233 struct cmap_node node
; /* Within dp_netdev_pmd_thread.classifiers */
235 struct cmap subtables_map
;
236 struct pvector subtables
;
239 /* A rule to be inserted to the classifier. */
241 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
242 struct netdev_flow_key
*mask
; /* Subtable's mask. */
243 struct netdev_flow_key flow
; /* Matching key. */
244 /* 'flow' must be the last field, additional space is allocated here. */
247 /* Data structure to keep packet order till fastpath processing. */
248 struct dp_packet_flow_map
{
249 struct dp_packet
*packet
;
250 struct dp_netdev_flow
*flow
;
254 static void dpcls_init(struct dpcls
*);
255 static void dpcls_destroy(struct dpcls
*);
256 static void dpcls_sort_subtable_vector(struct dpcls
*);
257 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
258 const struct netdev_flow_key
*mask
);
259 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
260 static bool dpcls_lookup(struct dpcls
*cls
,
261 const struct netdev_flow_key
*keys
[],
262 struct dpcls_rule
**rules
, size_t cnt
,
264 static bool dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
265 const struct netdev_flow_key
*target
);
266 /* Set of supported meter flags */
267 #define DP_SUPPORTED_METER_FLAGS_MASK \
268 (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
270 /* Set of supported meter band types */
271 #define DP_SUPPORTED_METER_BAND_TYPES \
272 ( 1 << OFPMBT13_DROP )
274 struct dp_meter_band
{
275 struct ofputil_meter_band up
; /* type, prec_level, pad, rate, burst_size */
276 uint32_t bucket
; /* In 1/1000 packets (for PKTPS), or in bits (for KBPS) */
277 uint64_t packet_count
;
284 uint32_t max_delta_t
;
286 uint64_t packet_count
;
288 struct dp_meter_band bands
[];
291 /* Datapath based on the network device interface from netdev.h.
297 * Some members, marked 'const', are immutable. Accessing other members
298 * requires synchronization, as noted in more detail below.
300 * Acquisition order is, from outermost to innermost:
302 * dp_netdev_mutex (global)
307 const struct dpif_class
*const class;
308 const char *const name
;
310 struct ovs_refcount ref_cnt
;
311 atomic_flag destroyed
;
315 * Any lookup into 'ports' or any access to the dp_netdev_ports found
316 * through 'ports' requires taking 'port_mutex'. */
317 struct ovs_mutex port_mutex
;
319 struct seq
*port_seq
; /* Incremented whenever a port changes. */
321 /* The time that a packet can wait in output batch for sending. */
322 atomic_uint32_t tx_flush_interval
;
325 struct ovs_mutex meter_locks
[N_METER_LOCKS
];
326 struct dp_meter
*meters
[MAX_METERS
]; /* Meter bands. */
328 /* Probability of EMC insertions is a factor of 'emc_insert_min'.*/
329 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
) atomic_uint32_t emc_insert_min
;
330 /* Enable collection of PMD performance metrics. */
331 atomic_bool pmd_perf_metrics
;
332 /* Enable the SMC cache from ovsdb config */
333 atomic_bool smc_enable_db
;
335 /* Protects access to ofproto-dpif-upcall interface during revalidator
336 * thread synchronization. */
337 struct fat_rwlock upcall_rwlock
;
338 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
341 /* Callback function for notifying the purging of dp flows (during
342 * reseting pmd deletion). */
343 dp_purge_callback
*dp_purge_cb
;
346 /* Stores all 'struct dp_netdev_pmd_thread's. */
347 struct cmap poll_threads
;
348 /* id pool for per thread static_tx_qid. */
349 struct id_pool
*tx_qid_pool
;
350 struct ovs_mutex tx_qid_pool_mutex
;
352 /* Protects the access of the 'struct dp_netdev_pmd_thread'
353 * instance for non-pmd thread. */
354 struct ovs_mutex non_pmd_mutex
;
356 /* Each pmd thread will store its pointer to
357 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
358 ovsthread_key_t per_pmd_key
;
360 struct seq
*reconfigure_seq
;
361 uint64_t last_reconfigure_seq
;
363 /* Cpu mask for pin of pmd threads. */
366 uint64_t last_tnl_conf_seq
;
368 struct conntrack conntrack
;
371 static void meter_lock(const struct dp_netdev
*dp
, uint32_t meter_id
)
372 OVS_ACQUIRES(dp
->meter_locks
[meter_id
% N_METER_LOCKS
])
374 ovs_mutex_lock(&dp
->meter_locks
[meter_id
% N_METER_LOCKS
]);
377 static void meter_unlock(const struct dp_netdev
*dp
, uint32_t meter_id
)
378 OVS_RELEASES(dp
->meter_locks
[meter_id
% N_METER_LOCKS
])
380 ovs_mutex_unlock(&dp
->meter_locks
[meter_id
% N_METER_LOCKS
]);
384 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
386 OVS_REQUIRES(dp
->port_mutex
);
388 enum rxq_cycles_counter_type
{
389 RXQ_CYCLES_PROC_CURR
, /* Cycles spent successfully polling and
390 processing packets during the current
392 RXQ_CYCLES_PROC_HIST
, /* Total cycles of all intervals that are used
393 during rxq to pmd assignment. */
398 DP_NETDEV_FLOW_OFFLOAD_OP_ADD
,
399 DP_NETDEV_FLOW_OFFLOAD_OP_MOD
,
400 DP_NETDEV_FLOW_OFFLOAD_OP_DEL
,
403 struct dp_flow_offload_item
{
404 struct dp_netdev_pmd_thread
*pmd
;
405 struct dp_netdev_flow
*flow
;
408 struct nlattr
*actions
;
411 struct ovs_list node
;
414 struct dp_flow_offload
{
415 struct ovs_mutex mutex
;
416 struct ovs_list list
;
420 static struct dp_flow_offload dp_flow_offload
= {
421 .mutex
= OVS_MUTEX_INITIALIZER
,
422 .list
= OVS_LIST_INITIALIZER(&dp_flow_offload
.list
),
425 static struct ovsthread_once offload_thread_once
426 = OVSTHREAD_ONCE_INITIALIZER
;
428 #define XPS_TIMEOUT 500000LL /* In microseconds. */
430 /* Contained by struct dp_netdev_port's 'rxqs' member. */
431 struct dp_netdev_rxq
{
432 struct dp_netdev_port
*port
;
433 struct netdev_rxq
*rx
;
434 unsigned core_id
; /* Core to which this queue should be
435 pinned. OVS_CORE_UNSPEC if the
436 queue doesn't need to be pinned to a
438 unsigned intrvl_idx
; /* Write index for 'cycles_intrvl'. */
439 struct dp_netdev_pmd_thread
*pmd
; /* pmd thread that polls this queue. */
440 bool is_vhost
; /* Is rxq of a vhost port. */
442 /* Counters of cycles spent successfully polling and processing pkts. */
443 atomic_ullong cycles
[RXQ_N_CYCLES
];
444 /* We store PMD_RXQ_INTERVAL_MAX intervals of data for an rxq and then
445 sum them to yield the cycles used for an rxq. */
446 atomic_ullong cycles_intrvl
[PMD_RXQ_INTERVAL_MAX
];
449 /* A port in a netdev-based datapath. */
450 struct dp_netdev_port
{
452 bool dynamic_txqs
; /* If true XPS will be used. */
453 bool need_reconfigure
; /* True if we should reconfigure netdev. */
454 struct netdev
*netdev
;
455 struct hmap_node node
; /* Node in dp_netdev's 'ports'. */
456 struct netdev_saved_flags
*sf
;
457 struct dp_netdev_rxq
*rxqs
;
458 unsigned n_rxq
; /* Number of elements in 'rxqs' */
459 unsigned *txq_used
; /* Number of threads that use each tx queue. */
460 struct ovs_mutex txq_used_mutex
;
461 char *type
; /* Port type as requested by user. */
462 char *rxq_affinity_list
; /* Requested affinity of rx queues. */
465 /* Contained by struct dp_netdev_flow's 'stats' member. */
466 struct dp_netdev_flow_stats
{
467 atomic_llong used
; /* Last used time, in monotonic msecs. */
468 atomic_ullong packet_count
; /* Number of packets matched. */
469 atomic_ullong byte_count
; /* Number of bytes matched. */
470 atomic_uint16_t tcp_flags
; /* Bitwise-OR of seen tcp_flags values. */
473 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
479 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
480 * its pmd thread's classifier. The text below calls this classifier 'cls'.
485 * The thread safety rules described here for "struct dp_netdev_flow" are
486 * motivated by two goals:
488 * - Prevent threads that read members of "struct dp_netdev_flow" from
489 * reading bad data due to changes by some thread concurrently modifying
492 * - Prevent two threads making changes to members of a given "struct
493 * dp_netdev_flow" from interfering with each other.
499 * A flow 'flow' may be accessed without a risk of being freed during an RCU
500 * grace period. Code that needs to hold onto a flow for a while
501 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
503 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
504 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
507 * Some members, marked 'const', are immutable. Accessing other members
508 * requires synchronization, as noted in more detail below.
510 struct dp_netdev_flow
{
511 const struct flow flow
; /* Unmasked flow that created this entry. */
512 /* Hash table index by unmasked flow. */
513 const struct cmap_node node
; /* In owning dp_netdev_pmd_thread's */
515 const struct cmap_node mark_node
; /* In owning flow_mark's mark_to_flow */
516 const ovs_u128 ufid
; /* Unique flow identifier. */
517 const ovs_u128 mega_ufid
; /* Unique mega flow identifier. */
518 const unsigned pmd_id
; /* The 'core_id' of pmd thread owning this */
521 /* Number of references.
522 * The classifier owns one reference.
523 * Any thread trying to keep a rule from being freed should hold its own
525 struct ovs_refcount ref_cnt
;
528 uint32_t mark
; /* Unique flow mark assigned to a flow */
531 struct dp_netdev_flow_stats stats
;
534 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
536 /* While processing a group of input packets, the datapath uses the next
537 * member to store a pointer to the output batch for the flow. It is
538 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
539 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
540 struct packet_batch_per_flow
*batch
;
542 /* Packet classification. */
543 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
544 /* 'cr' must be the last member. */
547 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
548 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
549 static int dpif_netdev_flow_from_nlattrs(const struct nlattr
*, uint32_t,
550 struct flow
*, bool);
552 /* A set of datapath actions within a "struct dp_netdev_flow".
558 * A struct dp_netdev_actions 'actions' is protected with RCU. */
559 struct dp_netdev_actions
{
560 /* These members are immutable: they do not change during the struct's
562 unsigned int size
; /* Size of 'actions', in bytes. */
563 struct nlattr actions
[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
566 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
568 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
569 const struct dp_netdev_flow
*);
570 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
572 struct polled_queue
{
573 struct dp_netdev_rxq
*rxq
;
577 /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
579 struct dp_netdev_rxq
*rxq
;
580 struct hmap_node node
;
583 /* Contained by struct dp_netdev_pmd_thread's 'send_port_cache',
584 * 'tnl_port_cache' or 'tx_ports'. */
586 struct dp_netdev_port
*port
;
589 struct hmap_node node
;
590 long long flush_time
;
591 struct dp_packet_batch output_pkts
;
592 struct dp_netdev_rxq
*output_pkts_rxqs
[NETDEV_MAX_BURST
];
595 /* A set of properties for the current processing loop that is not directly
596 * associated with the pmd thread itself, but with the packets being
597 * processed or the short-term system configuration (for example, time).
598 * Contained by struct dp_netdev_pmd_thread's 'ctx' member. */
599 struct dp_netdev_pmd_thread_ctx
{
600 /* Latest measured time. See 'pmd_thread_ctx_time_update()'. */
602 /* RX queue from which last packet was received. */
603 struct dp_netdev_rxq
*last_rxq
;
606 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
607 * the performance overhead of interrupt processing. Therefore netdev can
608 * not implement rx-wait for these devices. dpif-netdev needs to poll
609 * these device to check for recv buffer. pmd-thread does polling for
610 * devices assigned to itself.
612 * DPDK used PMD for accessing NIC.
614 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
615 * I/O of all non-pmd threads. There will be no actual thread created
618 * Each struct has its own flow cache and classifier per managed ingress port.
619 * For packets received on ingress port, a look up is done on corresponding PMD
620 * thread's flow cache and in case of a miss, lookup is performed in the
621 * corresponding classifier of port. Packets are executed with the found
622 * actions in either case.
624 struct dp_netdev_pmd_thread
{
625 struct dp_netdev
*dp
;
626 struct ovs_refcount ref_cnt
; /* Every reference must be refcount'ed. */
627 struct cmap_node node
; /* In 'dp->poll_threads'. */
629 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
630 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
632 /* Per thread exact-match cache. Note, the instance for cpu core
633 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
634 * need to be protected by 'non_pmd_mutex'. Every other instance
635 * will only be accessed by its own pmd thread. */
636 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
) struct dfc_cache flow_cache
;
638 /* Flow-Table and classifiers
640 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
641 * changes to 'classifiers' must be made while still holding the
644 struct ovs_mutex flow_mutex
;
645 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
647 /* One classifier per in_port polled by the pmd */
648 struct cmap classifiers
;
649 /* Periodically sort subtable vectors according to hit frequencies */
650 long long int next_optimization
;
651 /* End of the next time interval for which processing cycles
652 are stored for each polled rxq. */
653 long long int rxq_next_cycle_store
;
655 /* Last interval timestamp. */
656 uint64_t intrvl_tsc_prev
;
657 /* Last interval cycles. */
658 atomic_ullong intrvl_cycles
;
660 /* Current context of the PMD thread. */
661 struct dp_netdev_pmd_thread_ctx ctx
;
663 struct latch exit_latch
; /* For terminating the pmd thread. */
664 struct seq
*reload_seq
;
665 uint64_t last_reload_seq
;
666 atomic_bool reload
; /* Do we need to reload ports? */
668 unsigned core_id
; /* CPU core id of this pmd thread. */
669 int numa_id
; /* numa node id of this pmd thread. */
672 /* Queue id used by this pmd thread to send packets on all netdevs if
673 * XPS disabled for this netdev. All static_tx_qid's are unique and less
674 * than 'cmap_count(dp->poll_threads)'. */
675 uint32_t static_tx_qid
;
677 /* Number of filled output batches. */
678 int n_output_batches
;
680 struct ovs_mutex port_mutex
; /* Mutex for 'poll_list' and 'tx_ports'. */
681 /* List of rx queues to poll. */
682 struct hmap poll_list OVS_GUARDED
;
683 /* Map of 'tx_port's used for transmission. Written by the main thread,
684 * read by the pmd thread. */
685 struct hmap tx_ports OVS_GUARDED
;
687 /* These are thread-local copies of 'tx_ports'. One contains only tunnel
688 * ports (that support push_tunnel/pop_tunnel), the other contains ports
689 * with at least one txq (that support send). A port can be in both.
691 * There are two separate maps to make sure that we don't try to execute
692 * OUTPUT on a device which has 0 txqs or PUSH/POP on a non-tunnel device.
694 * The instances for cpu core NON_PMD_CORE_ID can be accessed by multiple
695 * threads, and thusly need to be protected by 'non_pmd_mutex'. Every
696 * other instance will only be accessed by its own pmd thread. */
697 struct hmap tnl_port_cache
;
698 struct hmap send_port_cache
;
700 /* Keep track of detailed PMD performance statistics. */
701 struct pmd_perf_stats perf_stats
;
703 /* Set to true if the pmd thread needs to be reloaded. */
707 /* Interface to netdev-based datapath. */
710 struct dp_netdev
*dp
;
711 uint64_t last_port_seq
;
714 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
715 struct dp_netdev_port
**portp
)
716 OVS_REQUIRES(dp
->port_mutex
);
717 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
718 struct dp_netdev_port
**portp
)
719 OVS_REQUIRES(dp
->port_mutex
);
720 static void dp_netdev_free(struct dp_netdev
*)
721 OVS_REQUIRES(dp_netdev_mutex
);
722 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
723 const char *type
, odp_port_t port_no
)
724 OVS_REQUIRES(dp
->port_mutex
);
725 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
726 OVS_REQUIRES(dp
->port_mutex
);
727 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
728 bool create
, struct dpif
**);
729 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
730 struct dp_packet_batch
*,
732 const struct flow
*flow
,
733 const struct nlattr
*actions
,
735 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
736 struct dp_packet_batch
*, odp_port_t port_no
);
737 static void dp_netdev_recirculate(struct dp_netdev_pmd_thread
*,
738 struct dp_packet_batch
*);
740 static void dp_netdev_disable_upcall(struct dp_netdev
*);
741 static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
742 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
743 struct dp_netdev
*dp
, unsigned core_id
,
745 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
);
746 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
747 OVS_REQUIRES(dp
->port_mutex
);
749 static void *pmd_thread_main(void *);
750 static struct dp_netdev_pmd_thread
*dp_netdev_get_pmd(struct dp_netdev
*dp
,
752 static struct dp_netdev_pmd_thread
*
753 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
);
754 static void dp_netdev_del_pmd(struct dp_netdev
*dp
,
755 struct dp_netdev_pmd_thread
*pmd
);
756 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
, bool non_pmd
);
757 static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
);
758 static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
759 struct dp_netdev_port
*port
)
760 OVS_REQUIRES(pmd
->port_mutex
);
761 static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
763 OVS_REQUIRES(pmd
->port_mutex
);
764 static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
765 struct dp_netdev_rxq
*rxq
)
766 OVS_REQUIRES(pmd
->port_mutex
);
767 static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
768 struct rxq_poll
*poll
)
769 OVS_REQUIRES(pmd
->port_mutex
);
771 dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread
*pmd
,
774 static void reconfigure_datapath(struct dp_netdev
*dp
)
775 OVS_REQUIRES(dp
->port_mutex
);
776 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
);
777 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
);
778 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
);
779 static void pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
780 OVS_REQUIRES(pmd
->port_mutex
);
782 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
,
783 struct polled_queue
*poll_list
, int poll_cnt
);
785 dp_netdev_rxq_set_cycles(struct dp_netdev_rxq
*rx
,
786 enum rxq_cycles_counter_type type
,
787 unsigned long long cycles
);
789 dp_netdev_rxq_get_cycles(struct dp_netdev_rxq
*rx
,
790 enum rxq_cycles_counter_type type
);
792 dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq
*rx
,
793 unsigned long long cycles
);
795 dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq
*rx
, unsigned idx
);
797 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
799 static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
802 static inline bool emc_entry_alive(struct emc_entry
*ce
);
803 static void emc_clear_entry(struct emc_entry
*ce
);
804 static void smc_clear_entry(struct smc_bucket
*b
, int idx
);
806 static void dp_netdev_request_reconfigure(struct dp_netdev
*dp
);
808 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread
*pmd
);
809 static void queue_netdev_flow_del(struct dp_netdev_pmd_thread
*pmd
,
810 struct dp_netdev_flow
*flow
);
813 emc_cache_init(struct emc_cache
*flow_cache
)
817 flow_cache
->sweep_idx
= 0;
818 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
819 flow_cache
->entries
[i
].flow
= NULL
;
820 flow_cache
->entries
[i
].key
.hash
= 0;
821 flow_cache
->entries
[i
].key
.len
= sizeof(struct miniflow
);
822 flowmap_init(&flow_cache
->entries
[i
].key
.mf
.map
);
827 smc_cache_init(struct smc_cache
*smc_cache
)
830 for (i
= 0; i
< SMC_BUCKET_CNT
; i
++) {
831 for (j
= 0; j
< SMC_ENTRY_PER_BUCKET
; j
++) {
832 smc_cache
->buckets
[i
].flow_idx
[j
] = UINT16_MAX
;
838 dfc_cache_init(struct dfc_cache
*flow_cache
)
840 emc_cache_init(&flow_cache
->emc_cache
);
841 smc_cache_init(&flow_cache
->smc_cache
);
845 emc_cache_uninit(struct emc_cache
*flow_cache
)
849 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
850 emc_clear_entry(&flow_cache
->entries
[i
]);
855 smc_cache_uninit(struct smc_cache
*smc
)
859 for (i
= 0; i
< SMC_BUCKET_CNT
; i
++) {
860 for (j
= 0; j
< SMC_ENTRY_PER_BUCKET
; j
++) {
861 smc_clear_entry(&(smc
->buckets
[i
]), j
);
867 dfc_cache_uninit(struct dfc_cache
*flow_cache
)
869 smc_cache_uninit(&flow_cache
->smc_cache
);
870 emc_cache_uninit(&flow_cache
->emc_cache
);
873 /* Check and clear dead flow references slowly (one entry at each
876 emc_cache_slow_sweep(struct emc_cache
*flow_cache
)
878 struct emc_entry
*entry
= &flow_cache
->entries
[flow_cache
->sweep_idx
];
880 if (!emc_entry_alive(entry
)) {
881 emc_clear_entry(entry
);
883 flow_cache
->sweep_idx
= (flow_cache
->sweep_idx
+ 1) & EM_FLOW_HASH_MASK
;
886 /* Updates the time in PMD threads context and should be called in three cases:
888 * 1. PMD structure initialization:
889 * - dp_netdev_configure_pmd()
891 * 2. Before processing of the new packet batch:
892 * - dpif_netdev_execute()
893 * - dp_netdev_process_rxq_port()
895 * 3. At least once per polling iteration in main polling threads if no
896 * packets received on current iteration:
897 * - dpif_netdev_run()
898 * - pmd_thread_main()
900 * 'pmd->ctx.now' should be used without update in all other cases if possible.
903 pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread
*pmd
)
905 pmd
->ctx
.now
= time_usec();
908 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
910 dpif_is_netdev(const struct dpif
*dpif
)
912 return dpif
->dpif_class
->open
== dpif_netdev_open
;
915 static struct dpif_netdev
*
916 dpif_netdev_cast(const struct dpif
*dpif
)
918 ovs_assert(dpif_is_netdev(dpif
));
919 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
922 static struct dp_netdev
*
923 get_dp_netdev(const struct dpif
*dpif
)
925 return dpif_netdev_cast(dpif
)->dp
;
929 PMD_INFO_SHOW_STATS
, /* Show how cpu cycles are spent. */
930 PMD_INFO_CLEAR_STATS
, /* Set the cycles count to 0. */
931 PMD_INFO_SHOW_RXQ
, /* Show poll lists of pmd threads. */
932 PMD_INFO_PERF_SHOW
, /* Show pmd performance details. */
936 format_pmd_thread(struct ds
*reply
, struct dp_netdev_pmd_thread
*pmd
)
938 ds_put_cstr(reply
, (pmd
->core_id
== NON_PMD_CORE_ID
)
939 ? "main thread" : "pmd thread");
940 if (pmd
->numa_id
!= OVS_NUMA_UNSPEC
) {
941 ds_put_format(reply
, " numa_id %d", pmd
->numa_id
);
943 if (pmd
->core_id
!= OVS_CORE_UNSPEC
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
944 ds_put_format(reply
, " core_id %u", pmd
->core_id
);
946 ds_put_cstr(reply
, ":\n");
950 pmd_info_show_stats(struct ds
*reply
,
951 struct dp_netdev_pmd_thread
*pmd
)
953 uint64_t stats
[PMD_N_STATS
];
954 uint64_t total_cycles
, total_packets
;
955 double passes_per_pkt
= 0;
956 double lookups_per_hit
= 0;
957 double packets_per_batch
= 0;
959 pmd_perf_read_counters(&pmd
->perf_stats
, stats
);
960 total_cycles
= stats
[PMD_CYCLES_ITER_IDLE
]
961 + stats
[PMD_CYCLES_ITER_BUSY
];
962 total_packets
= stats
[PMD_STAT_RECV
];
964 format_pmd_thread(reply
, pmd
);
966 if (total_packets
> 0) {
967 passes_per_pkt
= (total_packets
+ stats
[PMD_STAT_RECIRC
])
968 / (double) total_packets
;
970 if (stats
[PMD_STAT_MASKED_HIT
] > 0) {
971 lookups_per_hit
= stats
[PMD_STAT_MASKED_LOOKUP
]
972 / (double) stats
[PMD_STAT_MASKED_HIT
];
974 if (stats
[PMD_STAT_SENT_BATCHES
] > 0) {
975 packets_per_batch
= stats
[PMD_STAT_SENT_PKTS
]
976 / (double) stats
[PMD_STAT_SENT_BATCHES
];
980 " packets received: %"PRIu64
"\n"
981 " packet recirculations: %"PRIu64
"\n"
982 " avg. datapath passes per packet: %.02f\n"
983 " emc hits: %"PRIu64
"\n"
984 " smc hits: %"PRIu64
"\n"
985 " megaflow hits: %"PRIu64
"\n"
986 " avg. subtable lookups per megaflow hit: %.02f\n"
987 " miss with success upcall: %"PRIu64
"\n"
988 " miss with failed upcall: %"PRIu64
"\n"
989 " avg. packets per output batch: %.02f\n",
990 total_packets
, stats
[PMD_STAT_RECIRC
],
991 passes_per_pkt
, stats
[PMD_STAT_EXACT_HIT
],
992 stats
[PMD_STAT_SMC_HIT
],
993 stats
[PMD_STAT_MASKED_HIT
], lookups_per_hit
,
994 stats
[PMD_STAT_MISS
], stats
[PMD_STAT_LOST
],
997 if (total_cycles
== 0) {
1001 ds_put_format(reply
,
1002 " idle cycles: %"PRIu64
" (%.02f%%)\n"
1003 " processing cycles: %"PRIu64
" (%.02f%%)\n",
1004 stats
[PMD_CYCLES_ITER_IDLE
],
1005 stats
[PMD_CYCLES_ITER_IDLE
] / (double) total_cycles
* 100,
1006 stats
[PMD_CYCLES_ITER_BUSY
],
1007 stats
[PMD_CYCLES_ITER_BUSY
] / (double) total_cycles
* 100);
1009 if (total_packets
== 0) {
1013 ds_put_format(reply
,
1014 " avg cycles per packet: %.02f (%"PRIu64
"/%"PRIu64
")\n",
1015 total_cycles
/ (double) total_packets
,
1016 total_cycles
, total_packets
);
1018 ds_put_format(reply
,
1019 " avg processing cycles per packet: "
1020 "%.02f (%"PRIu64
"/%"PRIu64
")\n",
1021 stats
[PMD_CYCLES_ITER_BUSY
] / (double) total_packets
,
1022 stats
[PMD_CYCLES_ITER_BUSY
], total_packets
);
1026 pmd_info_show_perf(struct ds
*reply
,
1027 struct dp_netdev_pmd_thread
*pmd
,
1028 struct pmd_perf_params
*par
)
1030 if (pmd
->core_id
!= NON_PMD_CORE_ID
) {
1032 xastrftime_msec("%H:%M:%S.###", time_wall_msec(), true);
1033 long long now
= time_msec();
1034 double duration
= (now
- pmd
->perf_stats
.start_ms
) / 1000.0;
1036 ds_put_cstr(reply
, "\n");
1037 ds_put_format(reply
, "Time: %s\n", time_str
);
1038 ds_put_format(reply
, "Measurement duration: %.3f s\n", duration
);
1039 ds_put_cstr(reply
, "\n");
1040 format_pmd_thread(reply
, pmd
);
1041 ds_put_cstr(reply
, "\n");
1042 pmd_perf_format_overall_stats(reply
, &pmd
->perf_stats
, duration
);
1043 if (pmd_perf_metrics_enabled(pmd
)) {
1044 /* Prevent parallel clearing of perf metrics. */
1045 ovs_mutex_lock(&pmd
->perf_stats
.clear_mutex
);
1046 if (par
->histograms
) {
1047 ds_put_cstr(reply
, "\n");
1048 pmd_perf_format_histograms(reply
, &pmd
->perf_stats
);
1050 if (par
->iter_hist_len
> 0) {
1051 ds_put_cstr(reply
, "\n");
1052 pmd_perf_format_iteration_history(reply
, &pmd
->perf_stats
,
1053 par
->iter_hist_len
);
1055 if (par
->ms_hist_len
> 0) {
1056 ds_put_cstr(reply
, "\n");
1057 pmd_perf_format_ms_history(reply
, &pmd
->perf_stats
,
1060 ovs_mutex_unlock(&pmd
->perf_stats
.clear_mutex
);
1067 compare_poll_list(const void *a_
, const void *b_
)
1069 const struct rxq_poll
*a
= a_
;
1070 const struct rxq_poll
*b
= b_
;
1072 const char *namea
= netdev_rxq_get_name(a
->rxq
->rx
);
1073 const char *nameb
= netdev_rxq_get_name(b
->rxq
->rx
);
1075 int cmp
= strcmp(namea
, nameb
);
1077 return netdev_rxq_get_queue_id(a
->rxq
->rx
)
1078 - netdev_rxq_get_queue_id(b
->rxq
->rx
);
1085 sorted_poll_list(struct dp_netdev_pmd_thread
*pmd
, struct rxq_poll
**list
,
1088 struct rxq_poll
*ret
, *poll
;
1091 *n
= hmap_count(&pmd
->poll_list
);
1095 ret
= xcalloc(*n
, sizeof *ret
);
1097 HMAP_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
1101 ovs_assert(i
== *n
);
1102 qsort(ret
, *n
, sizeof *ret
, compare_poll_list
);
1109 pmd_info_show_rxq(struct ds
*reply
, struct dp_netdev_pmd_thread
*pmd
)
1111 if (pmd
->core_id
!= NON_PMD_CORE_ID
) {
1112 struct rxq_poll
*list
;
1114 uint64_t total_cycles
= 0;
1116 ds_put_format(reply
,
1117 "pmd thread numa_id %d core_id %u:\n isolated : %s\n",
1118 pmd
->numa_id
, pmd
->core_id
, (pmd
->isolated
)
1119 ? "true" : "false");
1121 ovs_mutex_lock(&pmd
->port_mutex
);
1122 sorted_poll_list(pmd
, &list
, &n_rxq
);
1124 /* Get the total pmd cycles for an interval. */
1125 atomic_read_relaxed(&pmd
->intrvl_cycles
, &total_cycles
);
1126 /* Estimate the cycles to cover all intervals. */
1127 total_cycles
*= PMD_RXQ_INTERVAL_MAX
;
1129 for (int i
= 0; i
< n_rxq
; i
++) {
1130 struct dp_netdev_rxq
*rxq
= list
[i
].rxq
;
1131 const char *name
= netdev_rxq_get_name(rxq
->rx
);
1132 uint64_t proc_cycles
= 0;
1134 for (int j
= 0; j
< PMD_RXQ_INTERVAL_MAX
; j
++) {
1135 proc_cycles
+= dp_netdev_rxq_get_intrvl_cycles(rxq
, j
);
1137 ds_put_format(reply
, " port: %-16s queue-id: %2d", name
,
1138 netdev_rxq_get_queue_id(list
[i
].rxq
->rx
));
1139 ds_put_format(reply
, " pmd usage: ");
1141 ds_put_format(reply
, "%2"PRIu64
"",
1142 proc_cycles
* 100 / total_cycles
);
1143 ds_put_cstr(reply
, " %");
1145 ds_put_format(reply
, "%s", "NOT AVAIL");
1147 ds_put_cstr(reply
, "\n");
1149 ovs_mutex_unlock(&pmd
->port_mutex
);
1155 compare_poll_thread_list(const void *a_
, const void *b_
)
1157 const struct dp_netdev_pmd_thread
*a
, *b
;
1159 a
= *(struct dp_netdev_pmd_thread
**)a_
;
1160 b
= *(struct dp_netdev_pmd_thread
**)b_
;
1162 if (a
->core_id
< b
->core_id
) {
1165 if (a
->core_id
> b
->core_id
) {
1171 /* Create a sorted list of pmd's from the dp->poll_threads cmap. We can use
1172 * this list, as long as we do not go to quiescent state. */
1174 sorted_poll_thread_list(struct dp_netdev
*dp
,
1175 struct dp_netdev_pmd_thread
***list
,
1178 struct dp_netdev_pmd_thread
*pmd
;
1179 struct dp_netdev_pmd_thread
**pmd_list
;
1180 size_t k
= 0, n_pmds
;
1182 n_pmds
= cmap_count(&dp
->poll_threads
);
1183 pmd_list
= xcalloc(n_pmds
, sizeof *pmd_list
);
1185 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1189 pmd_list
[k
++] = pmd
;
1192 qsort(pmd_list
, k
, sizeof *pmd_list
, compare_poll_thread_list
);
1199 dpif_netdev_pmd_rebalance(struct unixctl_conn
*conn
, int argc
,
1200 const char *argv
[], void *aux OVS_UNUSED
)
1202 struct ds reply
= DS_EMPTY_INITIALIZER
;
1203 struct dp_netdev
*dp
= NULL
;
1205 ovs_mutex_lock(&dp_netdev_mutex
);
1208 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
1209 } else if (shash_count(&dp_netdevs
) == 1) {
1210 /* There's only one datapath */
1211 dp
= shash_first(&dp_netdevs
)->data
;
1215 ovs_mutex_unlock(&dp_netdev_mutex
);
1216 unixctl_command_reply_error(conn
,
1217 "please specify an existing datapath");
1221 dp_netdev_request_reconfigure(dp
);
1222 ovs_mutex_unlock(&dp_netdev_mutex
);
1223 ds_put_cstr(&reply
, "pmd rxq rebalance requested.\n");
1224 unixctl_command_reply(conn
, ds_cstr(&reply
));
1229 dpif_netdev_pmd_info(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
1232 struct ds reply
= DS_EMPTY_INITIALIZER
;
1233 struct dp_netdev_pmd_thread
**pmd_list
;
1234 struct dp_netdev
*dp
= NULL
;
1235 enum pmd_info_type type
= *(enum pmd_info_type
*) aux
;
1236 unsigned int core_id
;
1237 bool filter_on_pmd
= false;
1240 ovs_mutex_lock(&dp_netdev_mutex
);
1243 if (!strcmp(argv
[1], "-pmd") && argc
> 2) {
1244 if (str_to_uint(argv
[2], 10, &core_id
)) {
1245 filter_on_pmd
= true;
1250 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
1257 if (shash_count(&dp_netdevs
) == 1) {
1258 /* There's only one datapath */
1259 dp
= shash_first(&dp_netdevs
)->data
;
1261 ovs_mutex_unlock(&dp_netdev_mutex
);
1262 unixctl_command_reply_error(conn
,
1263 "please specify an existing datapath");
1268 sorted_poll_thread_list(dp
, &pmd_list
, &n
);
1269 for (size_t i
= 0; i
< n
; i
++) {
1270 struct dp_netdev_pmd_thread
*pmd
= pmd_list
[i
];
1274 if (filter_on_pmd
&& pmd
->core_id
!= core_id
) {
1277 if (type
== PMD_INFO_SHOW_RXQ
) {
1278 pmd_info_show_rxq(&reply
, pmd
);
1279 } else if (type
== PMD_INFO_CLEAR_STATS
) {
1280 pmd_perf_stats_clear(&pmd
->perf_stats
);
1281 } else if (type
== PMD_INFO_SHOW_STATS
) {
1282 pmd_info_show_stats(&reply
, pmd
);
1283 } else if (type
== PMD_INFO_PERF_SHOW
) {
1284 pmd_info_show_perf(&reply
, pmd
, (struct pmd_perf_params
*)aux
);
1289 ovs_mutex_unlock(&dp_netdev_mutex
);
1291 unixctl_command_reply(conn
, ds_cstr(&reply
));
1296 pmd_perf_show_cmd(struct unixctl_conn
*conn
, int argc
,
1298 void *aux OVS_UNUSED
)
1300 struct pmd_perf_params par
;
1301 long int it_hist
= 0, ms_hist
= 0;
1302 par
.histograms
= true;
1305 if (!strcmp(argv
[1], "-nh")) {
1306 par
.histograms
= false;
1309 } else if (!strcmp(argv
[1], "-it") && argc
> 2) {
1310 it_hist
= strtol(argv
[2], NULL
, 10);
1313 } else if (it_hist
> HISTORY_LEN
) {
1314 it_hist
= HISTORY_LEN
;
1318 } else if (!strcmp(argv
[1], "-ms") && argc
> 2) {
1319 ms_hist
= strtol(argv
[2], NULL
, 10);
1322 } else if (ms_hist
> HISTORY_LEN
) {
1323 ms_hist
= HISTORY_LEN
;
1331 par
.iter_hist_len
= it_hist
;
1332 par
.ms_hist_len
= ms_hist
;
1333 par
.command_type
= PMD_INFO_PERF_SHOW
;
1334 dpif_netdev_pmd_info(conn
, argc
, argv
, &par
);
1338 dpif_netdev_init(void)
1340 static enum pmd_info_type show_aux
= PMD_INFO_SHOW_STATS
,
1341 clear_aux
= PMD_INFO_CLEAR_STATS
,
1342 poll_aux
= PMD_INFO_SHOW_RXQ
;
1344 unixctl_command_register("dpif-netdev/pmd-stats-show", "[-pmd core] [dp]",
1345 0, 3, dpif_netdev_pmd_info
,
1347 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[-pmd core] [dp]",
1348 0, 3, dpif_netdev_pmd_info
,
1349 (void *)&clear_aux
);
1350 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[-pmd core] [dp]",
1351 0, 3, dpif_netdev_pmd_info
,
1353 unixctl_command_register("dpif-netdev/pmd-perf-show",
1354 "[-nh] [-it iter-history-len]"
1355 " [-ms ms-history-len]"
1356 " [-pmd core] [dp]",
1357 0, 8, pmd_perf_show_cmd
,
1359 unixctl_command_register("dpif-netdev/pmd-rxq-rebalance", "[dp]",
1360 0, 1, dpif_netdev_pmd_rebalance
,
1362 unixctl_command_register("dpif-netdev/pmd-perf-log-set",
1363 "on|off [-b before] [-a after] [-e|-ne] "
1364 "[-us usec] [-q qlen]",
1365 0, 10, pmd_perf_log_set_cmd
,
1371 dpif_netdev_enumerate(struct sset
*all_dps
,
1372 const struct dpif_class
*dpif_class
)
1374 struct shash_node
*node
;
1376 ovs_mutex_lock(&dp_netdev_mutex
);
1377 SHASH_FOR_EACH(node
, &dp_netdevs
) {
1378 struct dp_netdev
*dp
= node
->data
;
1379 if (dpif_class
!= dp
->class) {
1380 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
1381 * If the class doesn't match, skip this dpif. */
1384 sset_add(all_dps
, node
->name
);
1386 ovs_mutex_unlock(&dp_netdev_mutex
);
1392 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
1394 return class != &dpif_netdev_class
;
1398 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
1400 return strcmp(type
, "internal") ? type
1401 : dpif_netdev_class_is_dummy(class) ? "dummy-internal"
1405 static struct dpif
*
1406 create_dpif_netdev(struct dp_netdev
*dp
)
1408 uint16_t netflow_id
= hash_string(dp
->name
, 0);
1409 struct dpif_netdev
*dpif
;
1411 ovs_refcount_ref(&dp
->ref_cnt
);
1413 dpif
= xmalloc(sizeof *dpif
);
1414 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
1416 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
1421 /* Choose an unused, non-zero port number and return it on success.
1422 * Return ODPP_NONE on failure. */
1424 choose_port(struct dp_netdev
*dp
, const char *name
)
1425 OVS_REQUIRES(dp
->port_mutex
)
1429 if (dp
->class != &dpif_netdev_class
) {
1433 /* If the port name begins with "br", start the number search at
1434 * 100 to make writing tests easier. */
1435 if (!strncmp(name
, "br", 2)) {
1439 /* If the port name contains a number, try to assign that port number.
1440 * This can make writing unit tests easier because port numbers are
1442 for (p
= name
; *p
!= '\0'; p
++) {
1443 if (isdigit((unsigned char) *p
)) {
1444 port_no
= start_no
+ strtol(p
, NULL
, 10);
1445 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
1446 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
1447 return u32_to_odp(port_no
);
1454 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
1455 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
1456 return u32_to_odp(port_no
);
1464 create_dp_netdev(const char *name
, const struct dpif_class
*class,
1465 struct dp_netdev
**dpp
)
1466 OVS_REQUIRES(dp_netdev_mutex
)
1468 struct dp_netdev
*dp
;
1471 dp
= xzalloc(sizeof *dp
);
1472 shash_add(&dp_netdevs
, name
, dp
);
1474 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
1475 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
1476 ovs_refcount_init(&dp
->ref_cnt
);
1477 atomic_flag_clear(&dp
->destroyed
);
1479 ovs_mutex_init(&dp
->port_mutex
);
1480 hmap_init(&dp
->ports
);
1481 dp
->port_seq
= seq_create();
1482 fat_rwlock_init(&dp
->upcall_rwlock
);
1484 dp
->reconfigure_seq
= seq_create();
1485 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
1487 for (int i
= 0; i
< N_METER_LOCKS
; ++i
) {
1488 ovs_mutex_init_adaptive(&dp
->meter_locks
[i
]);
1491 /* Disable upcalls by default. */
1492 dp_netdev_disable_upcall(dp
);
1493 dp
->upcall_aux
= NULL
;
1494 dp
->upcall_cb
= NULL
;
1496 conntrack_init(&dp
->conntrack
);
1498 atomic_init(&dp
->emc_insert_min
, DEFAULT_EM_FLOW_INSERT_MIN
);
1499 atomic_init(&dp
->tx_flush_interval
, DEFAULT_TX_FLUSH_INTERVAL
);
1501 cmap_init(&dp
->poll_threads
);
1503 ovs_mutex_init(&dp
->tx_qid_pool_mutex
);
1504 /* We need 1 Tx queue for each possible core + 1 for non-PMD threads. */
1505 dp
->tx_qid_pool
= id_pool_create(0, ovs_numa_get_n_cores() + 1);
1507 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
1508 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
1510 ovs_mutex_lock(&dp
->port_mutex
);
1511 /* non-PMD will be created before all other threads and will
1512 * allocate static_tx_qid = 0. */
1513 dp_netdev_set_nonpmd(dp
);
1515 error
= do_add_port(dp
, name
, dpif_netdev_port_open_type(dp
->class,
1518 ovs_mutex_unlock(&dp
->port_mutex
);
1524 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
1530 dp_netdev_request_reconfigure(struct dp_netdev
*dp
)
1532 seq_change(dp
->reconfigure_seq
);
1536 dp_netdev_is_reconf_required(struct dp_netdev
*dp
)
1538 return seq_read(dp
->reconfigure_seq
) != dp
->last_reconfigure_seq
;
1542 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
1543 bool create
, struct dpif
**dpifp
)
1545 struct dp_netdev
*dp
;
1548 ovs_mutex_lock(&dp_netdev_mutex
);
1549 dp
= shash_find_data(&dp_netdevs
, name
);
1551 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
1553 error
= (dp
->class != class ? EINVAL
1558 *dpifp
= create_dpif_netdev(dp
);
1561 ovs_mutex_unlock(&dp_netdev_mutex
);
1567 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
1568 OVS_NO_THREAD_SAFETY_ANALYSIS
1570 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
1571 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
1573 /* Before freeing a lock we should release it */
1574 fat_rwlock_unlock(&dp
->upcall_rwlock
);
1575 fat_rwlock_destroy(&dp
->upcall_rwlock
);
1579 dp_delete_meter(struct dp_netdev
*dp
, uint32_t meter_id
)
1580 OVS_REQUIRES(dp
->meter_locks
[meter_id
% N_METER_LOCKS
])
1582 if (dp
->meters
[meter_id
]) {
1583 free(dp
->meters
[meter_id
]);
1584 dp
->meters
[meter_id
] = NULL
;
1588 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
1589 * through the 'dp_netdevs' shash while freeing 'dp'. */
1591 dp_netdev_free(struct dp_netdev
*dp
)
1592 OVS_REQUIRES(dp_netdev_mutex
)
1594 struct dp_netdev_port
*port
, *next
;
1596 shash_find_and_delete(&dp_netdevs
, dp
->name
);
1598 ovs_mutex_lock(&dp
->port_mutex
);
1599 HMAP_FOR_EACH_SAFE (port
, next
, node
, &dp
->ports
) {
1600 do_del_port(dp
, port
);
1602 ovs_mutex_unlock(&dp
->port_mutex
);
1604 dp_netdev_destroy_all_pmds(dp
, true);
1605 cmap_destroy(&dp
->poll_threads
);
1607 ovs_mutex_destroy(&dp
->tx_qid_pool_mutex
);
1608 id_pool_destroy(dp
->tx_qid_pool
);
1610 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
1611 ovsthread_key_delete(dp
->per_pmd_key
);
1613 conntrack_destroy(&dp
->conntrack
);
1616 seq_destroy(dp
->reconfigure_seq
);
1618 seq_destroy(dp
->port_seq
);
1619 hmap_destroy(&dp
->ports
);
1620 ovs_mutex_destroy(&dp
->port_mutex
);
1622 /* Upcalls must be disabled at this point */
1623 dp_netdev_destroy_upcall_lock(dp
);
1627 for (i
= 0; i
< MAX_METERS
; ++i
) {
1629 dp_delete_meter(dp
, i
);
1630 meter_unlock(dp
, i
);
1632 for (i
= 0; i
< N_METER_LOCKS
; ++i
) {
1633 ovs_mutex_destroy(&dp
->meter_locks
[i
]);
1636 free(dp
->pmd_cmask
);
1637 free(CONST_CAST(char *, dp
->name
));
1642 dp_netdev_unref(struct dp_netdev
*dp
)
1645 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1646 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1647 ovs_mutex_lock(&dp_netdev_mutex
);
1648 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1651 ovs_mutex_unlock(&dp_netdev_mutex
);
1656 dpif_netdev_close(struct dpif
*dpif
)
1658 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1660 dp_netdev_unref(dp
);
1665 dpif_netdev_destroy(struct dpif
*dpif
)
1667 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1669 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
1670 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1671 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1679 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1680 * load/store semantics. While the increment is not atomic, the load and
1681 * store operations are, making it impossible to read inconsistent values.
1683 * This is used to update thread local stats counters. */
1685 non_atomic_ullong_add(atomic_ullong
*var
, unsigned long long n
)
1687 unsigned long long tmp
;
1689 atomic_read_relaxed(var
, &tmp
);
1691 atomic_store_relaxed(var
, tmp
);
1695 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
1697 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1698 struct dp_netdev_pmd_thread
*pmd
;
1699 uint64_t pmd_stats
[PMD_N_STATS
];
1701 stats
->n_flows
= stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
1702 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1703 stats
->n_flows
+= cmap_count(&pmd
->flow_table
);
1704 pmd_perf_read_counters(&pmd
->perf_stats
, pmd_stats
);
1705 stats
->n_hit
+= pmd_stats
[PMD_STAT_EXACT_HIT
];
1706 stats
->n_hit
+= pmd_stats
[PMD_STAT_SMC_HIT
];
1707 stats
->n_hit
+= pmd_stats
[PMD_STAT_MASKED_HIT
];
1708 stats
->n_missed
+= pmd_stats
[PMD_STAT_MISS
];
1709 stats
->n_lost
+= pmd_stats
[PMD_STAT_LOST
];
1711 stats
->n_masks
= UINT32_MAX
;
1712 stats
->n_mask_hit
= UINT64_MAX
;
1718 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
1720 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1721 ovs_mutex_lock(&pmd
->dp
->non_pmd_mutex
);
1722 ovs_mutex_lock(&pmd
->port_mutex
);
1723 pmd_load_cached_ports(pmd
);
1724 ovs_mutex_unlock(&pmd
->port_mutex
);
1725 ovs_mutex_unlock(&pmd
->dp
->non_pmd_mutex
);
1729 ovs_mutex_lock(&pmd
->cond_mutex
);
1730 seq_change(pmd
->reload_seq
);
1731 atomic_store_relaxed(&pmd
->reload
, true);
1732 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
1733 ovs_mutex_unlock(&pmd
->cond_mutex
);
1737 hash_port_no(odp_port_t port_no
)
1739 return hash_int(odp_to_u32(port_no
), 0);
1743 port_create(const char *devname
, const char *type
,
1744 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1746 struct netdev_saved_flags
*sf
;
1747 struct dp_netdev_port
*port
;
1748 enum netdev_flags flags
;
1749 struct netdev
*netdev
;
1754 /* Open and validate network device. */
1755 error
= netdev_open(devname
, type
, &netdev
);
1759 /* XXX reject non-Ethernet devices */
1761 netdev_get_flags(netdev
, &flags
);
1762 if (flags
& NETDEV_LOOPBACK
) {
1763 VLOG_ERR("%s: cannot add a loopback device", devname
);
1768 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
1770 VLOG_ERR("%s: cannot set promisc flag", devname
);
1774 port
= xzalloc(sizeof *port
);
1775 port
->port_no
= port_no
;
1776 port
->netdev
= netdev
;
1777 port
->type
= xstrdup(type
);
1779 port
->need_reconfigure
= true;
1780 ovs_mutex_init(&port
->txq_used_mutex
);
1787 netdev_close(netdev
);
1792 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
1794 OVS_REQUIRES(dp
->port_mutex
)
1796 struct dp_netdev_port
*port
;
1799 /* Reject devices already in 'dp'. */
1800 if (!get_port_by_name(dp
, devname
, &port
)) {
1804 error
= port_create(devname
, type
, port_no
, &port
);
1809 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
1810 seq_change(dp
->port_seq
);
1812 reconfigure_datapath(dp
);
1818 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
1819 odp_port_t
*port_nop
)
1821 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1822 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1823 const char *dpif_port
;
1827 ovs_mutex_lock(&dp
->port_mutex
);
1828 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1829 if (*port_nop
!= ODPP_NONE
) {
1830 port_no
= *port_nop
;
1831 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
1833 port_no
= choose_port(dp
, dpif_port
);
1834 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
1837 *port_nop
= port_no
;
1838 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
1840 ovs_mutex_unlock(&dp
->port_mutex
);
1846 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
1848 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1851 ovs_mutex_lock(&dp
->port_mutex
);
1852 if (port_no
== ODPP_LOCAL
) {
1855 struct dp_netdev_port
*port
;
1857 error
= get_port_by_number(dp
, port_no
, &port
);
1859 do_del_port(dp
, port
);
1862 ovs_mutex_unlock(&dp
->port_mutex
);
1868 is_valid_port_number(odp_port_t port_no
)
1870 return port_no
!= ODPP_NONE
;
1873 static struct dp_netdev_port
*
1874 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
1875 OVS_REQUIRES(dp
->port_mutex
)
1877 struct dp_netdev_port
*port
;
1879 HMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
1880 if (port
->port_no
== port_no
) {
1888 get_port_by_number(struct dp_netdev
*dp
,
1889 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1890 OVS_REQUIRES(dp
->port_mutex
)
1892 if (!is_valid_port_number(port_no
)) {
1896 *portp
= dp_netdev_lookup_port(dp
, port_no
);
1897 return *portp
? 0 : ENODEV
;
1902 port_destroy(struct dp_netdev_port
*port
)
1908 netdev_close(port
->netdev
);
1909 netdev_restore_flags(port
->sf
);
1911 for (unsigned i
= 0; i
< port
->n_rxq
; i
++) {
1912 netdev_rxq_close(port
->rxqs
[i
].rx
);
1914 ovs_mutex_destroy(&port
->txq_used_mutex
);
1915 free(port
->rxq_affinity_list
);
1916 free(port
->txq_used
);
1923 get_port_by_name(struct dp_netdev
*dp
,
1924 const char *devname
, struct dp_netdev_port
**portp
)
1925 OVS_REQUIRES(dp
->port_mutex
)
1927 struct dp_netdev_port
*port
;
1929 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1930 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1936 /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non
1941 /* Returns 'true' if there is a port with pmd netdev. */
1943 has_pmd_port(struct dp_netdev
*dp
)
1944 OVS_REQUIRES(dp
->port_mutex
)
1946 struct dp_netdev_port
*port
;
1948 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1949 if (netdev_is_pmd(port
->netdev
)) {
1958 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1959 OVS_REQUIRES(dp
->port_mutex
)
1961 hmap_remove(&dp
->ports
, &port
->node
);
1962 seq_change(dp
->port_seq
);
1964 reconfigure_datapath(dp
);
1970 answer_port_query(const struct dp_netdev_port
*port
,
1971 struct dpif_port
*dpif_port
)
1973 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1974 dpif_port
->type
= xstrdup(port
->type
);
1975 dpif_port
->port_no
= port
->port_no
;
1979 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1980 struct dpif_port
*dpif_port
)
1982 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1983 struct dp_netdev_port
*port
;
1986 ovs_mutex_lock(&dp
->port_mutex
);
1987 error
= get_port_by_number(dp
, port_no
, &port
);
1988 if (!error
&& dpif_port
) {
1989 answer_port_query(port
, dpif_port
);
1991 ovs_mutex_unlock(&dp
->port_mutex
);
1997 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1998 struct dpif_port
*dpif_port
)
2000 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2001 struct dp_netdev_port
*port
;
2004 ovs_mutex_lock(&dp
->port_mutex
);
2005 error
= get_port_by_name(dp
, devname
, &port
);
2006 if (!error
&& dpif_port
) {
2007 answer_port_query(port
, dpif_port
);
2009 ovs_mutex_unlock(&dp
->port_mutex
);
2015 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
2017 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
2021 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
2023 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
2024 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
2029 dp_netdev_flow_hash(const ovs_u128
*ufid
)
2031 return ufid
->u32
[0];
2034 static inline struct dpcls
*
2035 dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread
*pmd
,
2039 uint32_t hash
= hash_port_no(in_port
);
2040 CMAP_FOR_EACH_WITH_HASH (cls
, node
, hash
, &pmd
->classifiers
) {
2041 if (cls
->in_port
== in_port
) {
2042 /* Port classifier exists already */
2049 static inline struct dpcls
*
2050 dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread
*pmd
,
2052 OVS_REQUIRES(pmd
->flow_mutex
)
2054 struct dpcls
*cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
2055 uint32_t hash
= hash_port_no(in_port
);
2058 /* Create new classifier for in_port */
2059 cls
= xmalloc(sizeof(*cls
));
2061 cls
->in_port
= in_port
;
2062 cmap_insert(&pmd
->classifiers
, &cls
->node
, hash
);
2063 VLOG_DBG("Creating dpcls %p for in_port %d", cls
, in_port
);
2068 #define MAX_FLOW_MARK (UINT32_MAX - 1)
2069 #define INVALID_FLOW_MARK (UINT32_MAX)
2071 struct megaflow_to_mark_data
{
2072 const struct cmap_node node
;
2078 struct cmap megaflow_to_mark
;
2079 struct cmap mark_to_flow
;
2080 struct id_pool
*pool
;
2083 static struct flow_mark flow_mark
= {
2084 .megaflow_to_mark
= CMAP_INITIALIZER
,
2085 .mark_to_flow
= CMAP_INITIALIZER
,
2089 flow_mark_alloc(void)
2093 if (!flow_mark
.pool
) {
2094 /* Haven't initiated yet, do it here */
2095 flow_mark
.pool
= id_pool_create(0, MAX_FLOW_MARK
);
2098 if (id_pool_alloc_id(flow_mark
.pool
, &mark
)) {
2102 return INVALID_FLOW_MARK
;
2106 flow_mark_free(uint32_t mark
)
2108 id_pool_free_id(flow_mark
.pool
, mark
);
2111 /* associate megaflow with a mark, which is a 1:1 mapping */
2113 megaflow_to_mark_associate(const ovs_u128
*mega_ufid
, uint32_t mark
)
2115 size_t hash
= dp_netdev_flow_hash(mega_ufid
);
2116 struct megaflow_to_mark_data
*data
= xzalloc(sizeof(*data
));
2118 data
->mega_ufid
= *mega_ufid
;
2121 cmap_insert(&flow_mark
.megaflow_to_mark
,
2122 CONST_CAST(struct cmap_node
*, &data
->node
), hash
);
2125 /* disassociate meagaflow with a mark */
2127 megaflow_to_mark_disassociate(const ovs_u128
*mega_ufid
)
2129 size_t hash
= dp_netdev_flow_hash(mega_ufid
);
2130 struct megaflow_to_mark_data
*data
;
2132 CMAP_FOR_EACH_WITH_HASH (data
, node
, hash
, &flow_mark
.megaflow_to_mark
) {
2133 if (ovs_u128_equals(*mega_ufid
, data
->mega_ufid
)) {
2134 cmap_remove(&flow_mark
.megaflow_to_mark
,
2135 CONST_CAST(struct cmap_node
*, &data
->node
), hash
);
2141 VLOG_WARN("Masked ufid "UUID_FMT
" is not associated with a mark?\n",
2142 UUID_ARGS((struct uuid
*)mega_ufid
));
2145 static inline uint32_t
2146 megaflow_to_mark_find(const ovs_u128
*mega_ufid
)
2148 size_t hash
= dp_netdev_flow_hash(mega_ufid
);
2149 struct megaflow_to_mark_data
*data
;
2151 CMAP_FOR_EACH_WITH_HASH (data
, node
, hash
, &flow_mark
.megaflow_to_mark
) {
2152 if (ovs_u128_equals(*mega_ufid
, data
->mega_ufid
)) {
2157 VLOG_WARN("Mark id for ufid "UUID_FMT
" was not found\n",
2158 UUID_ARGS((struct uuid
*)mega_ufid
));
2159 return INVALID_FLOW_MARK
;
2162 /* associate mark with a flow, which is 1:N mapping */
2164 mark_to_flow_associate(const uint32_t mark
, struct dp_netdev_flow
*flow
)
2166 dp_netdev_flow_ref(flow
);
2168 cmap_insert(&flow_mark
.mark_to_flow
,
2169 CONST_CAST(struct cmap_node
*, &flow
->mark_node
),
2173 VLOG_DBG("Associated dp_netdev flow %p with mark %u\n", flow
, mark
);
2177 flow_mark_has_no_ref(uint32_t mark
)
2179 struct dp_netdev_flow
*flow
;
2181 CMAP_FOR_EACH_WITH_HASH (flow
, mark_node
, hash_int(mark
, 0),
2182 &flow_mark
.mark_to_flow
) {
2183 if (flow
->mark
== mark
) {
2192 mark_to_flow_disassociate(struct dp_netdev_pmd_thread
*pmd
,
2193 struct dp_netdev_flow
*flow
)
2196 uint32_t mark
= flow
->mark
;
2197 struct cmap_node
*mark_node
= CONST_CAST(struct cmap_node
*,
2200 cmap_remove(&flow_mark
.mark_to_flow
, mark_node
, hash_int(mark
, 0));
2201 flow
->mark
= INVALID_FLOW_MARK
;
2204 * no flow is referencing the mark any more? If so, let's
2205 * remove the flow from hardware and free the mark.
2207 if (flow_mark_has_no_ref(mark
)) {
2208 struct dp_netdev_port
*port
;
2209 odp_port_t in_port
= flow
->flow
.in_port
.odp_port
;
2211 ovs_mutex_lock(&pmd
->dp
->port_mutex
);
2212 port
= dp_netdev_lookup_port(pmd
->dp
, in_port
);
2214 ret
= netdev_flow_del(port
->netdev
, &flow
->mega_ufid
, NULL
);
2216 ovs_mutex_unlock(&pmd
->dp
->port_mutex
);
2218 flow_mark_free(mark
);
2219 VLOG_DBG("Freed flow mark %u\n", mark
);
2221 megaflow_to_mark_disassociate(&flow
->mega_ufid
);
2223 dp_netdev_flow_unref(flow
);
2229 flow_mark_flush(struct dp_netdev_pmd_thread
*pmd
)
2231 struct dp_netdev_flow
*flow
;
2233 CMAP_FOR_EACH (flow
, mark_node
, &flow_mark
.mark_to_flow
) {
2234 if (flow
->pmd_id
== pmd
->core_id
) {
2235 queue_netdev_flow_del(pmd
, flow
);
2240 static struct dp_netdev_flow
*
2241 mark_to_flow_find(const struct dp_netdev_pmd_thread
*pmd
,
2242 const uint32_t mark
)
2244 struct dp_netdev_flow
*flow
;
2246 CMAP_FOR_EACH_WITH_HASH (flow
, mark_node
, hash_int(mark
, 0),
2247 &flow_mark
.mark_to_flow
) {
2248 if (flow
->mark
== mark
&& flow
->pmd_id
== pmd
->core_id
&&
2249 flow
->dead
== false) {
2257 static struct dp_flow_offload_item
*
2258 dp_netdev_alloc_flow_offload(struct dp_netdev_pmd_thread
*pmd
,
2259 struct dp_netdev_flow
*flow
,
2262 struct dp_flow_offload_item
*offload
;
2264 offload
= xzalloc(sizeof(*offload
));
2266 offload
->flow
= flow
;
2269 dp_netdev_flow_ref(flow
);
2270 dp_netdev_pmd_try_ref(pmd
);
2276 dp_netdev_free_flow_offload(struct dp_flow_offload_item
*offload
)
2278 dp_netdev_pmd_unref(offload
->pmd
);
2279 dp_netdev_flow_unref(offload
->flow
);
2281 free(offload
->actions
);
2286 dp_netdev_append_flow_offload(struct dp_flow_offload_item
*offload
)
2288 ovs_mutex_lock(&dp_flow_offload
.mutex
);
2289 ovs_list_push_back(&dp_flow_offload
.list
, &offload
->node
);
2290 xpthread_cond_signal(&dp_flow_offload
.cond
);
2291 ovs_mutex_unlock(&dp_flow_offload
.mutex
);
2295 dp_netdev_flow_offload_del(struct dp_flow_offload_item
*offload
)
2297 return mark_to_flow_disassociate(offload
->pmd
, offload
->flow
);
2301 * There are two flow offload operations here: addition and modification.
2303 * For flow addition, this function does:
2304 * - allocate a new flow mark id
2305 * - perform hardware flow offload
2306 * - associate the flow mark with flow and mega flow
2308 * For flow modification, both flow mark and the associations are still
2309 * valid, thus only item 2 needed.
2312 dp_netdev_flow_offload_put(struct dp_flow_offload_item
*offload
)
2314 struct dp_netdev_port
*port
;
2315 struct dp_netdev_pmd_thread
*pmd
= offload
->pmd
;
2316 struct dp_netdev_flow
*flow
= offload
->flow
;
2317 odp_port_t in_port
= flow
->flow
.in_port
.odp_port
;
2318 bool modification
= offload
->op
== DP_NETDEV_FLOW_OFFLOAD_OP_MOD
;
2319 struct offload_info info
;
2329 ovs_assert(mark
!= INVALID_FLOW_MARK
);
2332 * If a mega flow has already been offloaded (from other PMD
2333 * instances), do not offload it again.
2335 mark
= megaflow_to_mark_find(&flow
->mega_ufid
);
2336 if (mark
!= INVALID_FLOW_MARK
) {
2337 VLOG_DBG("Flow has already been offloaded with mark %u\n", mark
);
2338 if (flow
->mark
!= INVALID_FLOW_MARK
) {
2339 ovs_assert(flow
->mark
== mark
);
2341 mark_to_flow_associate(mark
, flow
);
2346 mark
= flow_mark_alloc();
2347 if (mark
== INVALID_FLOW_MARK
) {
2348 VLOG_ERR("Failed to allocate flow mark!\n");
2351 info
.flow_mark
= mark
;
2353 ovs_mutex_lock(&pmd
->dp
->port_mutex
);
2354 port
= dp_netdev_lookup_port(pmd
->dp
, in_port
);
2356 ovs_mutex_unlock(&pmd
->dp
->port_mutex
);
2359 ret
= netdev_flow_put(port
->netdev
, &offload
->match
,
2360 CONST_CAST(struct nlattr
*, offload
->actions
),
2361 offload
->actions_len
, &flow
->mega_ufid
, &info
,
2363 ovs_mutex_unlock(&pmd
->dp
->port_mutex
);
2366 if (!modification
) {
2367 flow_mark_free(mark
);
2369 mark_to_flow_disassociate(pmd
, flow
);
2374 if (!modification
) {
2375 megaflow_to_mark_associate(&flow
->mega_ufid
, mark
);
2376 mark_to_flow_associate(mark
, flow
);
2383 dp_netdev_flow_offload_main(void *data OVS_UNUSED
)
2385 struct dp_flow_offload_item
*offload
;
2386 struct ovs_list
*list
;
2391 ovs_mutex_lock(&dp_flow_offload
.mutex
);
2392 if (ovs_list_is_empty(&dp_flow_offload
.list
)) {
2393 ovsrcu_quiesce_start();
2394 ovs_mutex_cond_wait(&dp_flow_offload
.cond
,
2395 &dp_flow_offload
.mutex
);
2397 list
= ovs_list_pop_front(&dp_flow_offload
.list
);
2398 offload
= CONTAINER_OF(list
, struct dp_flow_offload_item
, node
);
2399 ovs_mutex_unlock(&dp_flow_offload
.mutex
);
2401 switch (offload
->op
) {
2402 case DP_NETDEV_FLOW_OFFLOAD_OP_ADD
:
2404 ret
= dp_netdev_flow_offload_put(offload
);
2406 case DP_NETDEV_FLOW_OFFLOAD_OP_MOD
:
2408 ret
= dp_netdev_flow_offload_put(offload
);
2410 case DP_NETDEV_FLOW_OFFLOAD_OP_DEL
:
2412 ret
= dp_netdev_flow_offload_del(offload
);
2418 VLOG_DBG("%s to %s netdev flow\n",
2419 ret
== 0 ? "succeed" : "failed", op
);
2420 dp_netdev_free_flow_offload(offload
);
2427 queue_netdev_flow_del(struct dp_netdev_pmd_thread
*pmd
,
2428 struct dp_netdev_flow
*flow
)
2430 struct dp_flow_offload_item
*offload
;
2432 if (ovsthread_once_start(&offload_thread_once
)) {
2433 xpthread_cond_init(&dp_flow_offload
.cond
, NULL
);
2434 ovs_thread_create("dp_netdev_flow_offload",
2435 dp_netdev_flow_offload_main
, NULL
);
2436 ovsthread_once_done(&offload_thread_once
);
2439 offload
= dp_netdev_alloc_flow_offload(pmd
, flow
,
2440 DP_NETDEV_FLOW_OFFLOAD_OP_DEL
);
2441 dp_netdev_append_flow_offload(offload
);
2445 queue_netdev_flow_put(struct dp_netdev_pmd_thread
*pmd
,
2446 struct dp_netdev_flow
*flow
, struct match
*match
,
2447 const struct nlattr
*actions
, size_t actions_len
)
2449 struct dp_flow_offload_item
*offload
;
2452 if (!netdev_is_flow_api_enabled()) {
2456 if (ovsthread_once_start(&offload_thread_once
)) {
2457 xpthread_cond_init(&dp_flow_offload
.cond
, NULL
);
2458 ovs_thread_create("dp_netdev_flow_offload",
2459 dp_netdev_flow_offload_main
, NULL
);
2460 ovsthread_once_done(&offload_thread_once
);
2463 if (flow
->mark
!= INVALID_FLOW_MARK
) {
2464 op
= DP_NETDEV_FLOW_OFFLOAD_OP_MOD
;
2466 op
= DP_NETDEV_FLOW_OFFLOAD_OP_ADD
;
2468 offload
= dp_netdev_alloc_flow_offload(pmd
, flow
, op
);
2469 offload
->match
= *match
;
2470 offload
->actions
= xmalloc(actions_len
);
2471 memcpy(offload
->actions
, actions
, actions_len
);
2472 offload
->actions_len
= actions_len
;
2474 dp_netdev_append_flow_offload(offload
);
2478 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread
*pmd
,
2479 struct dp_netdev_flow
*flow
)
2480 OVS_REQUIRES(pmd
->flow_mutex
)
2482 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
2484 odp_port_t in_port
= flow
->flow
.in_port
.odp_port
;
2486 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
2487 ovs_assert(cls
!= NULL
);
2488 dpcls_remove(cls
, &flow
->cr
);
2489 cmap_remove(&pmd
->flow_table
, node
, dp_netdev_flow_hash(&flow
->ufid
));
2490 if (flow
->mark
!= INVALID_FLOW_MARK
) {
2491 queue_netdev_flow_del(pmd
, flow
);
2495 dp_netdev_flow_unref(flow
);
2499 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
)
2501 struct dp_netdev_flow
*netdev_flow
;
2503 ovs_mutex_lock(&pmd
->flow_mutex
);
2504 CMAP_FOR_EACH (netdev_flow
, node
, &pmd
->flow_table
) {
2505 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
2507 ovs_mutex_unlock(&pmd
->flow_mutex
);
2511 dpif_netdev_flow_flush(struct dpif
*dpif
)
2513 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2514 struct dp_netdev_pmd_thread
*pmd
;
2516 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2517 dp_netdev_pmd_flow_flush(pmd
);
2523 struct dp_netdev_port_state
{
2524 struct hmap_position position
;
2529 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
2531 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
2536 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
2537 struct dpif_port
*dpif_port
)
2539 struct dp_netdev_port_state
*state
= state_
;
2540 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2541 struct hmap_node
*node
;
2544 ovs_mutex_lock(&dp
->port_mutex
);
2545 node
= hmap_at_position(&dp
->ports
, &state
->position
);
2547 struct dp_netdev_port
*port
;
2549 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
2552 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
2553 dpif_port
->name
= state
->name
;
2554 dpif_port
->type
= port
->type
;
2555 dpif_port
->port_no
= port
->port_no
;
2561 ovs_mutex_unlock(&dp
->port_mutex
);
2567 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
2569 struct dp_netdev_port_state
*state
= state_
;
2576 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
2578 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
2579 uint64_t new_port_seq
;
2582 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
2583 if (dpif
->last_port_seq
!= new_port_seq
) {
2584 dpif
->last_port_seq
= new_port_seq
;
2594 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
2596 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
2598 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
2601 static struct dp_netdev_flow
*
2602 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
2604 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
2607 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
2609 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
2612 /* netdev_flow_key utilities.
2614 * netdev_flow_key is basically a miniflow. We use these functions
2615 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
2616 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
2618 * - Since we are dealing exclusively with miniflows created by
2619 * miniflow_extract(), if the map is different the miniflow is different.
2620 * Therefore we can be faster by comparing the map and the miniflow in a
2622 * - These functions can be inlined by the compiler. */
2624 /* Given the number of bits set in miniflow's maps, returns the size of the
2625 * 'netdev_flow_key.mf' */
2626 static inline size_t
2627 netdev_flow_key_size(size_t flow_u64s
)
2629 return sizeof(struct miniflow
) + MINIFLOW_VALUES_SIZE(flow_u64s
);
2633 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
2634 const struct netdev_flow_key
*b
)
2636 /* 'b->len' may be not set yet. */
2637 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
2640 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
2641 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
2642 * generated by miniflow_extract. */
2644 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
2645 const struct miniflow
*mf
)
2647 return !memcmp(&key
->mf
, mf
, key
->len
);
2651 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
2652 const struct netdev_flow_key
*src
)
2655 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
2658 /* Initialize a netdev_flow_key 'mask' from 'match'. */
2660 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
2661 const struct match
*match
)
2663 uint64_t *dst
= miniflow_values(&mask
->mf
);
2664 struct flowmap fmap
;
2668 /* Only check masks that make sense for the flow. */
2669 flow_wc_map(&match
->flow
, &fmap
);
2670 flowmap_init(&mask
->mf
.map
);
2672 FLOWMAP_FOR_EACH_INDEX(idx
, fmap
) {
2673 uint64_t mask_u64
= flow_u64_value(&match
->wc
.masks
, idx
);
2676 flowmap_set(&mask
->mf
.map
, idx
, 1);
2678 hash
= hash_add64(hash
, mask_u64
);
2684 FLOWMAP_FOR_EACH_MAP (map
, mask
->mf
.map
) {
2685 hash
= hash_add64(hash
, map
);
2688 size_t n
= dst
- miniflow_get_values(&mask
->mf
);
2690 mask
->hash
= hash_finish(hash
, n
* 8);
2691 mask
->len
= netdev_flow_key_size(n
);
2694 /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
2696 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
2697 const struct flow
*flow
,
2698 const struct netdev_flow_key
*mask
)
2700 uint64_t *dst_u64
= miniflow_values(&dst
->mf
);
2701 const uint64_t *mask_u64
= miniflow_get_values(&mask
->mf
);
2705 dst
->len
= mask
->len
;
2706 dst
->mf
= mask
->mf
; /* Copy maps. */
2708 FLOW_FOR_EACH_IN_MAPS(value
, flow
, mask
->mf
.map
) {
2709 *dst_u64
= value
& *mask_u64
++;
2710 hash
= hash_add64(hash
, *dst_u64
++);
2712 dst
->hash
= hash_finish(hash
,
2713 (dst_u64
- miniflow_get_values(&dst
->mf
)) * 8);
2716 /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
2717 #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
2718 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
2720 /* Returns a hash value for the bits of 'key' where there are 1-bits in
2722 static inline uint32_t
2723 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
2724 const struct netdev_flow_key
*mask
)
2726 const uint64_t *p
= miniflow_get_values(&mask
->mf
);
2730 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, key
, mask
->mf
.map
) {
2731 hash
= hash_add64(hash
, value
& *p
++);
2734 return hash_finish(hash
, (p
- miniflow_get_values(&mask
->mf
)) * 8);
2738 emc_entry_alive(struct emc_entry
*ce
)
2740 return ce
->flow
&& !ce
->flow
->dead
;
2744 emc_clear_entry(struct emc_entry
*ce
)
2747 dp_netdev_flow_unref(ce
->flow
);
2753 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
2754 const struct netdev_flow_key
*key
)
2756 if (ce
->flow
!= flow
) {
2758 dp_netdev_flow_unref(ce
->flow
);
2761 if (dp_netdev_flow_ref(flow
)) {
2768 netdev_flow_key_clone(&ce
->key
, key
);
2773 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
2774 struct dp_netdev_flow
*flow
)
2776 struct emc_entry
*to_be_replaced
= NULL
;
2777 struct emc_entry
*current_entry
;
2779 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
2780 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
2781 /* We found the entry with the 'mf' miniflow */
2782 emc_change_entry(current_entry
, flow
, NULL
);
2786 /* Replacement policy: put the flow in an empty (not alive) entry, or
2787 * in the first entry where it can be */
2789 || (emc_entry_alive(to_be_replaced
)
2790 && !emc_entry_alive(current_entry
))
2791 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
2792 to_be_replaced
= current_entry
;
2795 /* We didn't find the miniflow in the cache.
2796 * The 'to_be_replaced' entry is where the new flow will be stored */
2798 emc_change_entry(to_be_replaced
, flow
, key
);
2802 emc_probabilistic_insert(struct dp_netdev_pmd_thread
*pmd
,
2803 const struct netdev_flow_key
*key
,
2804 struct dp_netdev_flow
*flow
)
2806 /* Insert an entry into the EMC based on probability value 'min'. By
2807 * default the value is UINT32_MAX / 100 which yields an insertion
2808 * probability of 1/100 ie. 1% */
2812 atomic_read_relaxed(&pmd
->dp
->emc_insert_min
, &min
);
2814 if (min
&& random_uint32() <= min
) {
2815 emc_insert(&(pmd
->flow_cache
).emc_cache
, key
, flow
);
2819 static inline struct dp_netdev_flow
*
2820 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
2822 struct emc_entry
*current_entry
;
2824 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
2825 if (current_entry
->key
.hash
== key
->hash
2826 && emc_entry_alive(current_entry
)
2827 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
2829 /* We found the entry with the 'key->mf' miniflow */
2830 return current_entry
->flow
;
2837 static inline const struct cmap_node
*
2838 smc_entry_get(struct dp_netdev_pmd_thread
*pmd
, const uint32_t hash
)
2840 struct smc_cache
*cache
= &(pmd
->flow_cache
).smc_cache
;
2841 struct smc_bucket
*bucket
= &cache
->buckets
[hash
& SMC_MASK
];
2842 uint16_t sig
= hash
>> 16;
2843 uint16_t index
= UINT16_MAX
;
2845 for (int i
= 0; i
< SMC_ENTRY_PER_BUCKET
; i
++) {
2846 if (bucket
->sig
[i
] == sig
) {
2847 index
= bucket
->flow_idx
[i
];
2851 if (index
!= UINT16_MAX
) {
2852 return cmap_find_by_index(&pmd
->flow_table
, index
);
2858 smc_clear_entry(struct smc_bucket
*b
, int idx
)
2860 b
->flow_idx
[idx
] = UINT16_MAX
;
2863 /* Insert the flow_table index into SMC. Insertion may fail when 1) SMC is
2864 * turned off, 2) the flow_table index is larger than uint16_t can handle.
2865 * If there is already an SMC entry having same signature, the index will be
2866 * updated. If there is no existing entry, but an empty entry is available,
2867 * the empty entry will be taken. If no empty entry or existing same signature,
2868 * a random entry from the hashed bucket will be picked. */
2870 smc_insert(struct dp_netdev_pmd_thread
*pmd
,
2871 const struct netdev_flow_key
*key
,
2874 struct smc_cache
*smc_cache
= &(pmd
->flow_cache
).smc_cache
;
2875 struct smc_bucket
*bucket
= &smc_cache
->buckets
[key
->hash
& SMC_MASK
];
2877 uint32_t cmap_index
;
2881 atomic_read_relaxed(&pmd
->dp
->smc_enable_db
, &smc_enable_db
);
2882 if (!smc_enable_db
) {
2886 cmap_index
= cmap_find_index(&pmd
->flow_table
, hash
);
2887 index
= (cmap_index
>= UINT16_MAX
) ? UINT16_MAX
: (uint16_t)cmap_index
;
2889 /* If the index is larger than SMC can handle (uint16_t), we don't
2891 if (index
== UINT16_MAX
) {
2895 /* If an entry with same signature already exists, update the index */
2896 uint16_t sig
= key
->hash
>> 16;
2897 for (i
= 0; i
< SMC_ENTRY_PER_BUCKET
; i
++) {
2898 if (bucket
->sig
[i
] == sig
) {
2899 bucket
->flow_idx
[i
] = index
;
2903 /* If there is an empty entry, occupy it. */
2904 for (i
= 0; i
< SMC_ENTRY_PER_BUCKET
; i
++) {
2905 if (bucket
->flow_idx
[i
] == UINT16_MAX
) {
2906 bucket
->sig
[i
] = sig
;
2907 bucket
->flow_idx
[i
] = index
;
2911 /* Otherwise, pick a random entry. */
2912 i
= random_uint32() % SMC_ENTRY_PER_BUCKET
;
2913 bucket
->sig
[i
] = sig
;
2914 bucket
->flow_idx
[i
] = index
;
2917 static struct dp_netdev_flow
*
2918 dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread
*pmd
,
2919 const struct netdev_flow_key
*key
,
2923 struct dpcls_rule
*rule
;
2924 odp_port_t in_port
= u32_to_odp(MINIFLOW_GET_U32(&key
->mf
,
2926 struct dp_netdev_flow
*netdev_flow
= NULL
;
2928 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
2929 if (OVS_LIKELY(cls
)) {
2930 dpcls_lookup(cls
, &key
, &rule
, 1, lookup_num_p
);
2931 netdev_flow
= dp_netdev_flow_cast(rule
);
2936 static struct dp_netdev_flow
*
2937 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread
*pmd
,
2938 const ovs_u128
*ufidp
, const struct nlattr
*key
,
2941 struct dp_netdev_flow
*netdev_flow
;
2945 /* If a UFID is not provided, determine one based on the key. */
2946 if (!ufidp
&& key
&& key_len
2947 && !dpif_netdev_flow_from_nlattrs(key
, key_len
, &flow
, false)) {
2948 dpif_flow_hash(pmd
->dp
->dpif
, &flow
, sizeof flow
, &ufid
);
2953 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, dp_netdev_flow_hash(ufidp
),
2955 if (ovs_u128_equals(netdev_flow
->ufid
, *ufidp
)) {
2965 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_
,
2966 struct dpif_flow_stats
*stats
)
2968 struct dp_netdev_flow
*netdev_flow
;
2969 unsigned long long n
;
2973 netdev_flow
= CONST_CAST(struct dp_netdev_flow
*, netdev_flow_
);
2975 atomic_read_relaxed(&netdev_flow
->stats
.packet_count
, &n
);
2976 stats
->n_packets
= n
;
2977 atomic_read_relaxed(&netdev_flow
->stats
.byte_count
, &n
);
2979 atomic_read_relaxed(&netdev_flow
->stats
.used
, &used
);
2981 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
2982 stats
->tcp_flags
= flags
;
2985 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
2986 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
2987 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
2990 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
2991 struct ofpbuf
*key_buf
, struct ofpbuf
*mask_buf
,
2992 struct dpif_flow
*flow
, bool terse
)
2995 memset(flow
, 0, sizeof *flow
);
2997 struct flow_wildcards wc
;
2998 struct dp_netdev_actions
*actions
;
3000 struct odp_flow_key_parms odp_parms
= {
3001 .flow
= &netdev_flow
->flow
,
3003 .support
= dp_netdev_support
,
3006 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
3007 /* in_port is exact matched, but we have left it out from the mask for
3008 * optimnization reasons. Add in_port back to the mask. */
3009 wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
3012 offset
= key_buf
->size
;
3013 flow
->key
= ofpbuf_tail(key_buf
);
3014 odp_flow_key_from_flow(&odp_parms
, key_buf
);
3015 flow
->key_len
= key_buf
->size
- offset
;
3018 offset
= mask_buf
->size
;
3019 flow
->mask
= ofpbuf_tail(mask_buf
);
3020 odp_parms
.key_buf
= key_buf
;
3021 odp_flow_key_from_mask(&odp_parms
, mask_buf
);
3022 flow
->mask_len
= mask_buf
->size
- offset
;
3025 actions
= dp_netdev_flow_get_actions(netdev_flow
);
3026 flow
->actions
= actions
->actions
;
3027 flow
->actions_len
= actions
->size
;
3030 flow
->ufid
= netdev_flow
->ufid
;
3031 flow
->ufid_present
= true;
3032 flow
->pmd_id
= netdev_flow
->pmd_id
;
3033 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
3037 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
3038 const struct nlattr
*mask_key
,
3039 uint32_t mask_key_len
, const struct flow
*flow
,
3040 struct flow_wildcards
*wc
, bool probe
)
3042 enum odp_key_fitness fitness
;
3044 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, wc
, flow
);
3047 /* This should not happen: it indicates that
3048 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
3049 * disagree on the acceptable form of a mask. Log the problem
3050 * as an error, with enough details to enable debugging. */
3051 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3053 if (!VLOG_DROP_ERR(&rl
)) {
3057 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
3059 VLOG_ERR("internal error parsing flow mask %s (%s)",
3060 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
3072 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
3073 struct flow
*flow
, bool probe
)
3075 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
3077 /* This should not happen: it indicates that
3078 * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
3079 * the acceptable form of a flow. Log the problem as an error,
3080 * with enough details to enable debugging. */
3081 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
3083 if (!VLOG_DROP_ERR(&rl
)) {
3087 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
3088 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
3096 if (flow
->ct_state
& DP_NETDEV_CS_UNSUPPORTED_MASK
) {
3104 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
3106 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3107 struct dp_netdev_flow
*netdev_flow
;
3108 struct dp_netdev_pmd_thread
*pmd
;
3109 struct hmapx to_find
= HMAPX_INITIALIZER(&to_find
);
3110 struct hmapx_node
*node
;
3113 if (get
->pmd_id
== PMD_ID_NULL
) {
3114 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3115 if (dp_netdev_pmd_try_ref(pmd
) && !hmapx_add(&to_find
, pmd
)) {
3116 dp_netdev_pmd_unref(pmd
);
3120 pmd
= dp_netdev_get_pmd(dp
, get
->pmd_id
);
3124 hmapx_add(&to_find
, pmd
);
3127 if (!hmapx_count(&to_find
)) {
3131 HMAPX_FOR_EACH (node
, &to_find
) {
3132 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3133 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, get
->ufid
, get
->key
,
3136 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->buffer
,
3145 HMAPX_FOR_EACH (node
, &to_find
) {
3146 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3147 dp_netdev_pmd_unref(pmd
);
3150 hmapx_destroy(&to_find
);
3155 dp_netdev_get_mega_ufid(const struct match
*match
, ovs_u128
*mega_ufid
)
3157 struct flow masked_flow
;
3160 for (i
= 0; i
< sizeof(struct flow
); i
++) {
3161 ((uint8_t *)&masked_flow
)[i
] = ((uint8_t *)&match
->flow
)[i
] &
3162 ((uint8_t *)&match
->wc
)[i
];
3164 dpif_flow_hash(NULL
, &masked_flow
, sizeof(struct flow
), mega_ufid
);
3167 static struct dp_netdev_flow
*
3168 dp_netdev_flow_add(struct dp_netdev_pmd_thread
*pmd
,
3169 struct match
*match
, const ovs_u128
*ufid
,
3170 const struct nlattr
*actions
, size_t actions_len
)
3171 OVS_REQUIRES(pmd
->flow_mutex
)
3173 struct dp_netdev_flow
*flow
;
3174 struct netdev_flow_key mask
;
3177 /* Make sure in_port is exact matched before we read it. */
3178 ovs_assert(match
->wc
.masks
.in_port
.odp_port
== ODPP_NONE
);
3179 odp_port_t in_port
= match
->flow
.in_port
.odp_port
;
3181 /* As we select the dpcls based on the port number, each netdev flow
3182 * belonging to the same dpcls will have the same odp_port value.
3183 * For performance reasons we wildcard odp_port here in the mask. In the
3184 * typical case dp_hash is also wildcarded, and the resulting 8-byte
3185 * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and
3186 * will not be part of the subtable mask.
3187 * This will speed up the hash computation during dpcls_lookup() because
3188 * there is one less call to hash_add64() in this case. */
3189 match
->wc
.masks
.in_port
.odp_port
= 0;
3190 netdev_flow_mask_init(&mask
, match
);
3191 match
->wc
.masks
.in_port
.odp_port
= ODPP_NONE
;
3193 /* Make sure wc does not have metadata. */
3194 ovs_assert(!FLOWMAP_HAS_FIELD(&mask
.mf
.map
, metadata
)
3195 && !FLOWMAP_HAS_FIELD(&mask
.mf
.map
, regs
));
3197 /* Do not allocate extra space. */
3198 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
3199 memset(&flow
->stats
, 0, sizeof flow
->stats
);
3202 flow
->mark
= INVALID_FLOW_MARK
;
3203 *CONST_CAST(unsigned *, &flow
->pmd_id
) = pmd
->core_id
;
3204 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
3205 *CONST_CAST(ovs_u128
*, &flow
->ufid
) = *ufid
;
3206 ovs_refcount_init(&flow
->ref_cnt
);
3207 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
3209 dp_netdev_get_mega_ufid(match
, CONST_CAST(ovs_u128
*, &flow
->mega_ufid
));
3210 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
3212 /* Select dpcls for in_port. Relies on in_port to be exact match. */
3213 cls
= dp_netdev_pmd_find_dpcls(pmd
, in_port
);
3214 dpcls_insert(cls
, &flow
->cr
, &mask
);
3216 cmap_insert(&pmd
->flow_table
, CONST_CAST(struct cmap_node
*, &flow
->node
),
3217 dp_netdev_flow_hash(&flow
->ufid
));
3219 queue_netdev_flow_put(pmd
, flow
, match
, actions
, actions_len
);
3221 if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl
)))) {
3222 struct ds ds
= DS_EMPTY_INITIALIZER
;
3223 struct ofpbuf key_buf
, mask_buf
;
3224 struct odp_flow_key_parms odp_parms
= {
3225 .flow
= &match
->flow
,
3226 .mask
= &match
->wc
.masks
,
3227 .support
= dp_netdev_support
,
3230 ofpbuf_init(&key_buf
, 0);
3231 ofpbuf_init(&mask_buf
, 0);
3233 odp_flow_key_from_flow(&odp_parms
, &key_buf
);
3234 odp_parms
.key_buf
= &key_buf
;
3235 odp_flow_key_from_mask(&odp_parms
, &mask_buf
);
3237 ds_put_cstr(&ds
, "flow_add: ");
3238 odp_format_ufid(ufid
, &ds
);
3239 ds_put_cstr(&ds
, " ");
3240 odp_flow_format(key_buf
.data
, key_buf
.size
,
3241 mask_buf
.data
, mask_buf
.size
,
3243 ds_put_cstr(&ds
, ", actions:");
3244 format_odp_actions(&ds
, actions
, actions_len
, NULL
);
3246 VLOG_DBG("%s", ds_cstr(&ds
));
3248 ofpbuf_uninit(&key_buf
);
3249 ofpbuf_uninit(&mask_buf
);
3251 /* Add a printout of the actual match installed. */
3254 ds_put_cstr(&ds
, "flow match: ");
3255 miniflow_expand(&flow
->cr
.flow
.mf
, &m
.flow
);
3256 miniflow_expand(&flow
->cr
.mask
->mf
, &m
.wc
.masks
);
3257 memset(&m
.tun_md
, 0, sizeof m
.tun_md
);
3258 match_format(&m
, NULL
, &ds
, OFP_DEFAULT_PRIORITY
);
3260 VLOG_DBG("%s", ds_cstr(&ds
));
3269 flow_put_on_pmd(struct dp_netdev_pmd_thread
*pmd
,
3270 struct netdev_flow_key
*key
,
3271 struct match
*match
,
3273 const struct dpif_flow_put
*put
,
3274 struct dpif_flow_stats
*stats
)
3276 struct dp_netdev_flow
*netdev_flow
;
3280 memset(stats
, 0, sizeof *stats
);
3283 ovs_mutex_lock(&pmd
->flow_mutex
);
3284 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
, NULL
);
3286 if (put
->flags
& DPIF_FP_CREATE
) {
3287 if (cmap_count(&pmd
->flow_table
) < MAX_FLOWS
) {
3288 dp_netdev_flow_add(pmd
, match
, ufid
, put
->actions
,
3298 if (put
->flags
& DPIF_FP_MODIFY
) {
3299 struct dp_netdev_actions
*new_actions
;
3300 struct dp_netdev_actions
*old_actions
;
3302 new_actions
= dp_netdev_actions_create(put
->actions
,
3305 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
3306 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
3308 queue_netdev_flow_put(pmd
, netdev_flow
, match
,
3309 put
->actions
, put
->actions_len
);
3312 get_dpif_flow_stats(netdev_flow
, stats
);
3314 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
3315 /* XXX: The userspace datapath uses thread local statistics
3316 * (for flows), which should be updated only by the owning
3317 * thread. Since we cannot write on stats memory here,
3318 * we choose not to support this flag. Please note:
3319 * - This feature is currently used only by dpctl commands with
3321 * - Should the need arise, this operation can be implemented
3322 * by keeping a base value (to be update here) for each
3323 * counter, and subtracting it before outputting the stats */
3327 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
3328 } else if (put
->flags
& DPIF_FP_CREATE
) {
3331 /* Overlapping flow. */
3335 ovs_mutex_unlock(&pmd
->flow_mutex
);
3340 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
3342 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3343 struct netdev_flow_key key
, mask
;
3344 struct dp_netdev_pmd_thread
*pmd
;
3348 bool probe
= put
->flags
& DPIF_FP_PROBE
;
3351 memset(put
->stats
, 0, sizeof *put
->stats
);
3353 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
,
3358 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
3359 put
->mask
, put
->mask_len
,
3360 &match
.flow
, &match
.wc
, probe
);
3368 dpif_flow_hash(dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
3371 /* Must produce a netdev_flow_key for lookup.
3372 * Use the same method as employed to create the key when adding
3373 * the flow to the dplcs to make sure they match. */
3374 netdev_flow_mask_init(&mask
, &match
);
3375 netdev_flow_key_init_masked(&key
, &match
.flow
, &mask
);
3377 if (put
->pmd_id
== PMD_ID_NULL
) {
3378 if (cmap_count(&dp
->poll_threads
) == 0) {
3381 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3382 struct dpif_flow_stats pmd_stats
;
3385 pmd_error
= flow_put_on_pmd(pmd
, &key
, &match
, &ufid
, put
,
3389 } else if (put
->stats
) {
3390 put
->stats
->n_packets
+= pmd_stats
.n_packets
;
3391 put
->stats
->n_bytes
+= pmd_stats
.n_bytes
;
3392 put
->stats
->used
= MAX(put
->stats
->used
, pmd_stats
.used
);
3393 put
->stats
->tcp_flags
|= pmd_stats
.tcp_flags
;
3397 pmd
= dp_netdev_get_pmd(dp
, put
->pmd_id
);
3401 error
= flow_put_on_pmd(pmd
, &key
, &match
, &ufid
, put
, put
->stats
);
3402 dp_netdev_pmd_unref(pmd
);
3409 flow_del_on_pmd(struct dp_netdev_pmd_thread
*pmd
,
3410 struct dpif_flow_stats
*stats
,
3411 const struct dpif_flow_del
*del
)
3413 struct dp_netdev_flow
*netdev_flow
;
3416 ovs_mutex_lock(&pmd
->flow_mutex
);
3417 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, del
->ufid
, del
->key
,
3421 get_dpif_flow_stats(netdev_flow
, stats
);
3423 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
3427 ovs_mutex_unlock(&pmd
->flow_mutex
);
3433 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
3435 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3436 struct dp_netdev_pmd_thread
*pmd
;
3440 memset(del
->stats
, 0, sizeof *del
->stats
);
3443 if (del
->pmd_id
== PMD_ID_NULL
) {
3444 if (cmap_count(&dp
->poll_threads
) == 0) {
3447 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3448 struct dpif_flow_stats pmd_stats
;
3451 pmd_error
= flow_del_on_pmd(pmd
, &pmd_stats
, del
);
3454 } else if (del
->stats
) {
3455 del
->stats
->n_packets
+= pmd_stats
.n_packets
;
3456 del
->stats
->n_bytes
+= pmd_stats
.n_bytes
;
3457 del
->stats
->used
= MAX(del
->stats
->used
, pmd_stats
.used
);
3458 del
->stats
->tcp_flags
|= pmd_stats
.tcp_flags
;
3462 pmd
= dp_netdev_get_pmd(dp
, del
->pmd_id
);
3466 error
= flow_del_on_pmd(pmd
, del
->stats
, del
);
3467 dp_netdev_pmd_unref(pmd
);
3474 struct dpif_netdev_flow_dump
{
3475 struct dpif_flow_dump up
;
3476 struct cmap_position poll_thread_pos
;
3477 struct cmap_position flow_pos
;
3478 struct dp_netdev_pmd_thread
*cur_pmd
;
3480 struct ovs_mutex mutex
;
3483 static struct dpif_netdev_flow_dump
*
3484 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
3486 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
3489 static struct dpif_flow_dump
*
3490 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
, bool terse
,
3491 char *type OVS_UNUSED
)
3493 struct dpif_netdev_flow_dump
*dump
;
3495 dump
= xzalloc(sizeof *dump
);
3496 dpif_flow_dump_init(&dump
->up
, dpif_
);
3497 dump
->up
.terse
= terse
;
3498 ovs_mutex_init(&dump
->mutex
);
3504 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
3506 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
3508 ovs_mutex_destroy(&dump
->mutex
);
3513 struct dpif_netdev_flow_dump_thread
{
3514 struct dpif_flow_dump_thread up
;
3515 struct dpif_netdev_flow_dump
*dump
;
3516 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
3517 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
3520 static struct dpif_netdev_flow_dump_thread
*
3521 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
3523 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
3526 static struct dpif_flow_dump_thread
*
3527 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
3529 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
3530 struct dpif_netdev_flow_dump_thread
*thread
;
3532 thread
= xmalloc(sizeof *thread
);
3533 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
3534 thread
->dump
= dump
;
3539 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
3541 struct dpif_netdev_flow_dump_thread
*thread
3542 = dpif_netdev_flow_dump_thread_cast(thread_
);
3548 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
3549 struct dpif_flow
*flows
, int max_flows
)
3551 struct dpif_netdev_flow_dump_thread
*thread
3552 = dpif_netdev_flow_dump_thread_cast(thread_
);
3553 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
3554 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
3558 ovs_mutex_lock(&dump
->mutex
);
3559 if (!dump
->status
) {
3560 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
3561 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
3562 struct dp_netdev_pmd_thread
*pmd
= dump
->cur_pmd
;
3563 int flow_limit
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
3565 /* First call to dump_next(), extracts the first pmd thread.
3566 * If there is no pmd thread, returns immediately. */
3568 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
3570 ovs_mutex_unlock(&dump
->mutex
);
3577 for (n_flows
= 0; n_flows
< flow_limit
; n_flows
++) {
3578 struct cmap_node
*node
;
3580 node
= cmap_next_position(&pmd
->flow_table
, &dump
->flow_pos
);
3584 netdev_flows
[n_flows
] = CONTAINER_OF(node
,
3585 struct dp_netdev_flow
,
3588 /* When finishing dumping the current pmd thread, moves to
3590 if (n_flows
< flow_limit
) {
3591 memset(&dump
->flow_pos
, 0, sizeof dump
->flow_pos
);
3592 dp_netdev_pmd_unref(pmd
);
3593 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
3599 /* Keeps the reference to next caller. */
3600 dump
->cur_pmd
= pmd
;
3602 /* If the current dump is empty, do not exit the loop, since the
3603 * remaining pmds could have flows to be dumped. Just dumps again
3604 * on the new 'pmd'. */
3607 ovs_mutex_unlock(&dump
->mutex
);
3609 for (i
= 0; i
< n_flows
; i
++) {
3610 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
3611 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
3612 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
3613 struct dpif_flow
*f
= &flows
[i
];
3614 struct ofpbuf key
, mask
;
3616 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
3617 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
3618 dp_netdev_flow_to_dpif_flow(netdev_flow
, &key
, &mask
, f
,
3626 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
3627 OVS_NO_THREAD_SAFETY_ANALYSIS
3629 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3630 struct dp_netdev_pmd_thread
*pmd
;
3631 struct dp_packet_batch pp
;
3633 if (dp_packet_size(execute
->packet
) < ETH_HEADER_LEN
||
3634 dp_packet_size(execute
->packet
) > UINT16_MAX
) {
3638 /* Tries finding the 'pmd'. If NULL is returned, that means
3639 * the current thread is a non-pmd thread and should use
3640 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
3641 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
3643 pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
3649 if (execute
->probe
) {
3650 /* If this is part of a probe, Drop the packet, since executing
3651 * the action may actually cause spurious packets be sent into
3653 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3654 dp_netdev_pmd_unref(pmd
);
3659 /* If the current thread is non-pmd thread, acquires
3660 * the 'non_pmd_mutex'. */
3661 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3662 ovs_mutex_lock(&dp
->non_pmd_mutex
);
3665 /* Update current time in PMD context. */
3666 pmd_thread_ctx_time_update(pmd
);
3668 /* The action processing expects the RSS hash to be valid, because
3669 * it's always initialized at the beginning of datapath processing.
3670 * In this case, though, 'execute->packet' may not have gone through
3671 * the datapath at all, it may have been generated by the upper layer
3672 * (OpenFlow packet-out, BFD frame, ...). */
3673 if (!dp_packet_rss_valid(execute
->packet
)) {
3674 dp_packet_set_rss_hash(execute
->packet
,
3675 flow_hash_5tuple(execute
->flow
, 0));
3678 dp_packet_batch_init_packet(&pp
, execute
->packet
);
3679 dp_netdev_execute_actions(pmd
, &pp
, false, execute
->flow
,
3680 execute
->actions
, execute
->actions_len
);
3681 dp_netdev_pmd_flush_output_packets(pmd
, true);
3683 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
3684 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
3685 dp_netdev_pmd_unref(pmd
);
3692 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
3696 for (i
= 0; i
< n_ops
; i
++) {
3697 struct dpif_op
*op
= ops
[i
];
3700 case DPIF_OP_FLOW_PUT
:
3701 op
->error
= dpif_netdev_flow_put(dpif
, &op
->flow_put
);
3704 case DPIF_OP_FLOW_DEL
:
3705 op
->error
= dpif_netdev_flow_del(dpif
, &op
->flow_del
);
3708 case DPIF_OP_EXECUTE
:
3709 op
->error
= dpif_netdev_execute(dpif
, &op
->execute
);
3712 case DPIF_OP_FLOW_GET
:
3713 op
->error
= dpif_netdev_flow_get(dpif
, &op
->flow_get
);
3719 /* Applies datapath configuration from the database. Some of the changes are
3720 * actually applied in dpif_netdev_run(). */
3722 dpif_netdev_set_config(struct dpif
*dpif
, const struct smap
*other_config
)
3724 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3725 const char *cmask
= smap_get(other_config
, "pmd-cpu-mask");
3726 unsigned long long insert_prob
=
3727 smap_get_ullong(other_config
, "emc-insert-inv-prob",
3728 DEFAULT_EM_FLOW_INSERT_INV_PROB
);
3729 uint32_t insert_min
, cur_min
;
3730 uint32_t tx_flush_interval
, cur_tx_flush_interval
;
3732 tx_flush_interval
= smap_get_int(other_config
, "tx-flush-interval",
3733 DEFAULT_TX_FLUSH_INTERVAL
);
3734 atomic_read_relaxed(&dp
->tx_flush_interval
, &cur_tx_flush_interval
);
3735 if (tx_flush_interval
!= cur_tx_flush_interval
) {
3736 atomic_store_relaxed(&dp
->tx_flush_interval
, tx_flush_interval
);
3737 VLOG_INFO("Flushing interval for tx queues set to %"PRIu32
" us",
3741 if (!nullable_string_is_equal(dp
->pmd_cmask
, cmask
)) {
3742 free(dp
->pmd_cmask
);
3743 dp
->pmd_cmask
= nullable_xstrdup(cmask
);
3744 dp_netdev_request_reconfigure(dp
);
3747 atomic_read_relaxed(&dp
->emc_insert_min
, &cur_min
);
3748 if (insert_prob
<= UINT32_MAX
) {
3749 insert_min
= insert_prob
== 0 ? 0 : UINT32_MAX
/ insert_prob
;
3751 insert_min
= DEFAULT_EM_FLOW_INSERT_MIN
;
3752 insert_prob
= DEFAULT_EM_FLOW_INSERT_INV_PROB
;
3755 if (insert_min
!= cur_min
) {
3756 atomic_store_relaxed(&dp
->emc_insert_min
, insert_min
);
3757 if (insert_min
== 0) {
3758 VLOG_INFO("EMC has been disabled");
3760 VLOG_INFO("EMC insertion probability changed to 1/%llu (~%.2f%%)",
3761 insert_prob
, (100 / (float)insert_prob
));
3765 bool perf_enabled
= smap_get_bool(other_config
, "pmd-perf-metrics", false);
3766 bool cur_perf_enabled
;
3767 atomic_read_relaxed(&dp
->pmd_perf_metrics
, &cur_perf_enabled
);
3768 if (perf_enabled
!= cur_perf_enabled
) {
3769 atomic_store_relaxed(&dp
->pmd_perf_metrics
, perf_enabled
);
3771 VLOG_INFO("PMD performance metrics collection enabled");
3773 VLOG_INFO("PMD performance metrics collection disabled");
3777 bool smc_enable
= smap_get_bool(other_config
, "smc-enable", false);
3779 atomic_read_relaxed(&dp
->smc_enable_db
, &cur_smc
);
3780 if (smc_enable
!= cur_smc
) {
3781 atomic_store_relaxed(&dp
->smc_enable_db
, smc_enable
);
3783 VLOG_INFO("SMC cache is enabled");
3785 VLOG_INFO("SMC cache is disabled");
3791 /* Parses affinity list and returns result in 'core_ids'. */
3793 parse_affinity_list(const char *affinity_list
, unsigned *core_ids
, int n_rxq
)
3796 char *list
, *copy
, *key
, *value
;
3799 for (i
= 0; i
< n_rxq
; i
++) {
3800 core_ids
[i
] = OVS_CORE_UNSPEC
;
3803 if (!affinity_list
) {
3807 list
= copy
= xstrdup(affinity_list
);
3809 while (ofputil_parse_key_value(&list
, &key
, &value
)) {
3810 int rxq_id
, core_id
;
3812 if (!str_to_int(key
, 0, &rxq_id
) || rxq_id
< 0
3813 || !str_to_int(value
, 0, &core_id
) || core_id
< 0) {
3818 if (rxq_id
< n_rxq
) {
3819 core_ids
[rxq_id
] = core_id
;
3827 /* Parses 'affinity_list' and applies configuration if it is valid. */
3829 dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port
*port
,
3830 const char *affinity_list
)
3832 unsigned *core_ids
, i
;
3835 core_ids
= xmalloc(port
->n_rxq
* sizeof *core_ids
);
3836 if (parse_affinity_list(affinity_list
, core_ids
, port
->n_rxq
)) {
3841 for (i
= 0; i
< port
->n_rxq
; i
++) {
3842 port
->rxqs
[i
].core_id
= core_ids
[i
];
3850 /* Changes the affinity of port's rx queues. The changes are actually applied
3851 * in dpif_netdev_run(). */
3853 dpif_netdev_port_set_config(struct dpif
*dpif
, odp_port_t port_no
,
3854 const struct smap
*cfg
)
3856 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3857 struct dp_netdev_port
*port
;
3859 const char *affinity_list
= smap_get(cfg
, "pmd-rxq-affinity");
3861 ovs_mutex_lock(&dp
->port_mutex
);
3862 error
= get_port_by_number(dp
, port_no
, &port
);
3863 if (error
|| !netdev_is_pmd(port
->netdev
)
3864 || nullable_string_is_equal(affinity_list
, port
->rxq_affinity_list
)) {
3868 error
= dpif_netdev_port_set_rxq_affinity(port
, affinity_list
);
3872 free(port
->rxq_affinity_list
);
3873 port
->rxq_affinity_list
= nullable_xstrdup(affinity_list
);
3875 dp_netdev_request_reconfigure(dp
);
3877 ovs_mutex_unlock(&dp
->port_mutex
);
3882 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
3883 uint32_t queue_id
, uint32_t *priority
)
3885 *priority
= queue_id
;
3890 /* Creates and returns a new 'struct dp_netdev_actions', whose actions are
3891 * a copy of the 'size' bytes of 'actions' input parameters. */
3892 struct dp_netdev_actions
*
3893 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
3895 struct dp_netdev_actions
*netdev_actions
;
3897 netdev_actions
= xmalloc(sizeof *netdev_actions
+ size
);
3898 memcpy(netdev_actions
->actions
, actions
, size
);
3899 netdev_actions
->size
= size
;
3901 return netdev_actions
;
3904 struct dp_netdev_actions
*
3905 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
3907 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
3911 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
3917 dp_netdev_rxq_set_cycles(struct dp_netdev_rxq
*rx
,
3918 enum rxq_cycles_counter_type type
,
3919 unsigned long long cycles
)
3921 atomic_store_relaxed(&rx
->cycles
[type
], cycles
);
3925 dp_netdev_rxq_add_cycles(struct dp_netdev_rxq
*rx
,
3926 enum rxq_cycles_counter_type type
,
3927 unsigned long long cycles
)
3929 non_atomic_ullong_add(&rx
->cycles
[type
], cycles
);
3933 dp_netdev_rxq_get_cycles(struct dp_netdev_rxq
*rx
,
3934 enum rxq_cycles_counter_type type
)
3936 unsigned long long processing_cycles
;
3937 atomic_read_relaxed(&rx
->cycles
[type
], &processing_cycles
);
3938 return processing_cycles
;
3942 dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq
*rx
,
3943 unsigned long long cycles
)
3945 unsigned int idx
= rx
->intrvl_idx
++ % PMD_RXQ_INTERVAL_MAX
;
3946 atomic_store_relaxed(&rx
->cycles_intrvl
[idx
], cycles
);
3950 dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq
*rx
, unsigned idx
)
3952 unsigned long long processing_cycles
;
3953 atomic_read_relaxed(&rx
->cycles_intrvl
[idx
], &processing_cycles
);
3954 return processing_cycles
;
3957 #if ATOMIC_ALWAYS_LOCK_FREE_8B
3959 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread
*pmd
)
3961 bool pmd_perf_enabled
;
3962 atomic_read_relaxed(&pmd
->dp
->pmd_perf_metrics
, &pmd_perf_enabled
);
3963 return pmd_perf_enabled
;
3966 /* If stores and reads of 64-bit integers are not atomic, the full PMD
3967 * performance metrics are not available as locked access to 64 bit
3968 * integers would be prohibitively expensive. */
3970 pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread
*pmd OVS_UNUSED
)
3977 dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread
*pmd
,
3984 struct cycle_timer timer
;
3986 uint32_t tx_flush_interval
;
3988 cycle_timer_start(&pmd
->perf_stats
, &timer
);
3990 dynamic_txqs
= p
->port
->dynamic_txqs
;
3992 tx_qid
= dpif_netdev_xps_get_tx_qid(pmd
, p
);
3994 tx_qid
= pmd
->static_tx_qid
;
3997 output_cnt
= dp_packet_batch_size(&p
->output_pkts
);
3998 ovs_assert(output_cnt
> 0);
4000 netdev_send(p
->port
->netdev
, tx_qid
, &p
->output_pkts
, dynamic_txqs
);
4001 dp_packet_batch_init(&p
->output_pkts
);
4003 /* Update time of the next flush. */
4004 atomic_read_relaxed(&pmd
->dp
->tx_flush_interval
, &tx_flush_interval
);
4005 p
->flush_time
= pmd
->ctx
.now
+ tx_flush_interval
;
4007 ovs_assert(pmd
->n_output_batches
> 0);
4008 pmd
->n_output_batches
--;
4010 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_SENT_PKTS
, output_cnt
);
4011 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_SENT_BATCHES
, 1);
4013 /* Distribute send cycles evenly among transmitted packets and assign to
4014 * their respective rx queues. */
4015 cycles
= cycle_timer_stop(&pmd
->perf_stats
, &timer
) / output_cnt
;
4016 for (i
= 0; i
< output_cnt
; i
++) {
4017 if (p
->output_pkts_rxqs
[i
]) {
4018 dp_netdev_rxq_add_cycles(p
->output_pkts_rxqs
[i
],
4019 RXQ_CYCLES_PROC_CURR
, cycles
);
4027 dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread
*pmd
,
4033 if (!pmd
->n_output_batches
) {
4037 HMAP_FOR_EACH (p
, node
, &pmd
->send_port_cache
) {
4038 if (!dp_packet_batch_is_empty(&p
->output_pkts
)
4039 && (force
|| pmd
->ctx
.now
>= p
->flush_time
)) {
4040 output_cnt
+= dp_netdev_pmd_flush_output_on_port(pmd
, p
);
4047 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
4048 struct dp_netdev_rxq
*rxq
,
4051 struct pmd_perf_stats
*s
= &pmd
->perf_stats
;
4052 struct dp_packet_batch batch
;
4053 struct cycle_timer timer
;
4056 int rem_qlen
= 0, *qlen_p
= NULL
;
4059 /* Measure duration for polling and processing rx burst. */
4060 cycle_timer_start(&pmd
->perf_stats
, &timer
);
4062 pmd
->ctx
.last_rxq
= rxq
;
4063 dp_packet_batch_init(&batch
);
4065 /* Fetch the rx queue length only for vhostuser ports. */
4066 if (pmd_perf_metrics_enabled(pmd
) && rxq
->is_vhost
) {
4070 error
= netdev_rxq_recv(rxq
->rx
, &batch
, qlen_p
);
4072 /* At least one packet received. */
4073 *recirc_depth_get() = 0;
4074 pmd_thread_ctx_time_update(pmd
);
4075 batch_cnt
= batch
.count
;
4076 if (pmd_perf_metrics_enabled(pmd
)) {
4077 /* Update batch histogram. */
4078 s
->current
.batches
++;
4079 histogram_add_sample(&s
->pkts_per_batch
, batch_cnt
);
4080 /* Update the maximum vhost rx queue fill level. */
4081 if (rxq
->is_vhost
&& rem_qlen
>= 0) {
4082 uint32_t qfill
= batch_cnt
+ rem_qlen
;
4083 if (qfill
> s
->current
.max_vhost_qfill
) {
4084 s
->current
.max_vhost_qfill
= qfill
;
4088 /* Process packet batch. */
4089 dp_netdev_input(pmd
, &batch
, port_no
);
4091 /* Assign processing cycles to rx queue. */
4092 cycles
= cycle_timer_stop(&pmd
->perf_stats
, &timer
);
4093 dp_netdev_rxq_add_cycles(rxq
, RXQ_CYCLES_PROC_CURR
, cycles
);
4095 dp_netdev_pmd_flush_output_packets(pmd
, false);
4097 /* Discard cycles. */
4098 cycle_timer_stop(&pmd
->perf_stats
, &timer
);
4099 if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
4100 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
4102 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
4103 netdev_rxq_get_name(rxq
->rx
), ovs_strerror(error
));
4107 pmd
->ctx
.last_rxq
= NULL
;
4112 static struct tx_port
*
4113 tx_port_lookup(const struct hmap
*hmap
, odp_port_t port_no
)
4117 HMAP_FOR_EACH_IN_BUCKET (tx
, node
, hash_port_no(port_no
), hmap
) {
4118 if (tx
->port
->port_no
== port_no
) {
4127 port_reconfigure(struct dp_netdev_port
*port
)
4129 struct netdev
*netdev
= port
->netdev
;
4132 /* Closes the existing 'rxq's. */
4133 for (i
= 0; i
< port
->n_rxq
; i
++) {
4134 netdev_rxq_close(port
->rxqs
[i
].rx
);
4135 port
->rxqs
[i
].rx
= NULL
;
4137 unsigned last_nrxq
= port
->n_rxq
;
4140 /* Allows 'netdev' to apply the pending configuration changes. */
4141 if (netdev_is_reconf_required(netdev
) || port
->need_reconfigure
) {
4142 err
= netdev_reconfigure(netdev
);
4143 if (err
&& (err
!= EOPNOTSUPP
)) {
4144 VLOG_ERR("Failed to set interface %s new configuration",
4145 netdev_get_name(netdev
));
4149 /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */
4150 port
->rxqs
= xrealloc(port
->rxqs
,
4151 sizeof *port
->rxqs
* netdev_n_rxq(netdev
));
4152 /* Realloc 'used' counters for tx queues. */
4153 free(port
->txq_used
);
4154 port
->txq_used
= xcalloc(netdev_n_txq(netdev
), sizeof *port
->txq_used
);
4156 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
4157 bool new_queue
= i
>= last_nrxq
;
4159 memset(&port
->rxqs
[i
], 0, sizeof port
->rxqs
[i
]);
4162 port
->rxqs
[i
].port
= port
;
4163 port
->rxqs
[i
].is_vhost
= !strncmp(port
->type
, "dpdkvhost", 9);
4165 err
= netdev_rxq_open(netdev
, &port
->rxqs
[i
].rx
, i
);
4172 /* Parse affinity list to apply configuration for new queues. */
4173 dpif_netdev_port_set_rxq_affinity(port
, port
->rxq_affinity_list
);
4175 /* If reconfiguration was successful mark it as such, so we can use it */
4176 port
->need_reconfigure
= false;
4181 struct rr_numa_list
{
4182 struct hmap numas
; /* Contains 'struct rr_numa' */
4186 struct hmap_node node
;
4190 /* Non isolated pmds on numa node 'numa_id' */
4191 struct dp_netdev_pmd_thread
**pmds
;
4198 static struct rr_numa
*
4199 rr_numa_list_lookup(struct rr_numa_list
*rr
, int numa_id
)
4201 struct rr_numa
*numa
;
4203 HMAP_FOR_EACH_WITH_HASH (numa
, node
, hash_int(numa_id
, 0), &rr
->numas
) {
4204 if (numa
->numa_id
== numa_id
) {
4212 /* Returns the next node in numa list following 'numa' in round-robin fashion.
4213 * Returns first node if 'numa' is a null pointer or the last node in 'rr'.
4214 * Returns NULL if 'rr' numa list is empty. */
4215 static struct rr_numa
*
4216 rr_numa_list_next(struct rr_numa_list
*rr
, const struct rr_numa
*numa
)
4218 struct hmap_node
*node
= NULL
;
4221 node
= hmap_next(&rr
->numas
, &numa
->node
);
4224 node
= hmap_first(&rr
->numas
);
4227 return (node
) ? CONTAINER_OF(node
, struct rr_numa
, node
) : NULL
;
4231 rr_numa_list_populate(struct dp_netdev
*dp
, struct rr_numa_list
*rr
)
4233 struct dp_netdev_pmd_thread
*pmd
;
4234 struct rr_numa
*numa
;
4236 hmap_init(&rr
->numas
);
4238 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4239 if (pmd
->core_id
== NON_PMD_CORE_ID
|| pmd
->isolated
) {
4243 numa
= rr_numa_list_lookup(rr
, pmd
->numa_id
);
4245 numa
= xzalloc(sizeof *numa
);
4246 numa
->numa_id
= pmd
->numa_id
;
4247 hmap_insert(&rr
->numas
, &numa
->node
, hash_int(pmd
->numa_id
, 0));
4250 numa
->pmds
= xrealloc(numa
->pmds
, numa
->n_pmds
* sizeof *numa
->pmds
);
4251 numa
->pmds
[numa
->n_pmds
- 1] = pmd
;
4252 /* At least one pmd so initialise curr_idx and idx_inc. */
4253 numa
->cur_index
= 0;
4254 numa
->idx_inc
= true;
4258 /* Returns the next pmd from the numa node in
4259 * incrementing or decrementing order. */
4260 static struct dp_netdev_pmd_thread
*
4261 rr_numa_get_pmd(struct rr_numa
*numa
)
4263 int numa_idx
= numa
->cur_index
;
4265 if (numa
->idx_inc
== true) {
4266 /* Incrementing through list of pmds. */
4267 if (numa
->cur_index
== numa
->n_pmds
-1) {
4268 /* Reached the last pmd. */
4269 numa
->idx_inc
= false;
4274 /* Decrementing through list of pmds. */
4275 if (numa
->cur_index
== 0) {
4276 /* Reached the first pmd. */
4277 numa
->idx_inc
= true;
4282 return numa
->pmds
[numa_idx
];
4286 rr_numa_list_destroy(struct rr_numa_list
*rr
)
4288 struct rr_numa
*numa
;
4290 HMAP_FOR_EACH_POP (numa
, node
, &rr
->numas
) {
4294 hmap_destroy(&rr
->numas
);
4297 /* Sort Rx Queues by the processing cycles they are consuming. */
4299 compare_rxq_cycles(const void *a
, const void *b
)
4301 struct dp_netdev_rxq
*qa
;
4302 struct dp_netdev_rxq
*qb
;
4303 uint64_t cycles_qa
, cycles_qb
;
4305 qa
= *(struct dp_netdev_rxq
**) a
;
4306 qb
= *(struct dp_netdev_rxq
**) b
;
4308 cycles_qa
= dp_netdev_rxq_get_cycles(qa
, RXQ_CYCLES_PROC_HIST
);
4309 cycles_qb
= dp_netdev_rxq_get_cycles(qb
, RXQ_CYCLES_PROC_HIST
);
4311 if (cycles_qa
!= cycles_qb
) {
4312 return (cycles_qa
< cycles_qb
) ? 1 : -1;
4314 /* Cycles are the same so tiebreak on port/queue id.
4315 * Tiebreaking (as opposed to return 0) ensures consistent
4316 * sort results across multiple OS's. */
4317 uint32_t port_qa
= odp_to_u32(qa
->port
->port_no
);
4318 uint32_t port_qb
= odp_to_u32(qb
->port
->port_no
);
4319 if (port_qa
!= port_qb
) {
4320 return port_qa
> port_qb
? 1 : -1;
4322 return netdev_rxq_get_queue_id(qa
->rx
)
4323 - netdev_rxq_get_queue_id(qb
->rx
);
4328 /* Assign pmds to queues. If 'pinned' is true, assign pmds to pinned
4329 * queues and marks the pmds as isolated. Otherwise, assign non isolated
4330 * pmds to unpinned queues.
4332 * If 'pinned' is false queues will be sorted by processing cycles they are
4333 * consuming and then assigned to pmds in round robin order.
4335 * The function doesn't touch the pmd threads, it just stores the assignment
4336 * in the 'pmd' member of each rxq. */
4338 rxq_scheduling(struct dp_netdev
*dp
, bool pinned
) OVS_REQUIRES(dp
->port_mutex
)
4340 struct dp_netdev_port
*port
;
4341 struct rr_numa_list rr
;
4342 struct rr_numa
*non_local_numa
= NULL
;
4343 struct dp_netdev_rxq
** rxqs
= NULL
;
4345 struct rr_numa
*numa
= NULL
;
4348 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4349 if (!netdev_is_pmd(port
->netdev
)) {
4353 for (int qid
= 0; qid
< port
->n_rxq
; qid
++) {
4354 struct dp_netdev_rxq
*q
= &port
->rxqs
[qid
];
4356 if (pinned
&& q
->core_id
!= OVS_CORE_UNSPEC
) {
4357 struct dp_netdev_pmd_thread
*pmd
;
4359 pmd
= dp_netdev_get_pmd(dp
, q
->core_id
);
4361 VLOG_WARN("There is no PMD thread on core %d. Queue "
4362 "%d on port \'%s\' will not be polled.",
4363 q
->core_id
, qid
, netdev_get_name(port
->netdev
));
4366 pmd
->isolated
= true;
4367 dp_netdev_pmd_unref(pmd
);
4369 } else if (!pinned
&& q
->core_id
== OVS_CORE_UNSPEC
) {
4370 uint64_t cycle_hist
= 0;
4373 rxqs
= xmalloc(sizeof *rxqs
);
4375 rxqs
= xrealloc(rxqs
, sizeof *rxqs
* (n_rxqs
+ 1));
4377 /* Sum the queue intervals and store the cycle history. */
4378 for (unsigned i
= 0; i
< PMD_RXQ_INTERVAL_MAX
; i
++) {
4379 cycle_hist
+= dp_netdev_rxq_get_intrvl_cycles(q
, i
);
4381 dp_netdev_rxq_set_cycles(q
, RXQ_CYCLES_PROC_HIST
, cycle_hist
);
4383 /* Store the queue. */
4390 /* Sort the queues in order of the processing cycles
4391 * they consumed during their last pmd interval. */
4392 qsort(rxqs
, n_rxqs
, sizeof *rxqs
, compare_rxq_cycles
);
4395 rr_numa_list_populate(dp
, &rr
);
4396 /* Assign the sorted queues to pmds in round robin. */
4397 for (int i
= 0; i
< n_rxqs
; i
++) {
4398 numa_id
= netdev_get_numa_id(rxqs
[i
]->port
->netdev
);
4399 numa
= rr_numa_list_lookup(&rr
, numa_id
);
4401 /* There are no pmds on the queue's local NUMA node.
4402 Round robin on the NUMA nodes that do have pmds. */
4403 non_local_numa
= rr_numa_list_next(&rr
, non_local_numa
);
4404 if (!non_local_numa
) {
4405 VLOG_ERR("There is no available (non-isolated) pmd "
4406 "thread for port \'%s\' queue %d. This queue "
4407 "will not be polled. Is pmd-cpu-mask set to "
4408 "zero? Or are all PMDs isolated to other "
4409 "queues?", netdev_rxq_get_name(rxqs
[i
]->rx
),
4410 netdev_rxq_get_queue_id(rxqs
[i
]->rx
));
4413 rxqs
[i
]->pmd
= rr_numa_get_pmd(non_local_numa
);
4414 VLOG_WARN("There's no available (non-isolated) pmd thread "
4415 "on numa node %d. Queue %d on port \'%s\' will "
4416 "be assigned to the pmd on core %d "
4417 "(numa node %d). Expect reduced performance.",
4418 numa_id
, netdev_rxq_get_queue_id(rxqs
[i
]->rx
),
4419 netdev_rxq_get_name(rxqs
[i
]->rx
),
4420 rxqs
[i
]->pmd
->core_id
, rxqs
[i
]->pmd
->numa_id
);
4422 rxqs
[i
]->pmd
= rr_numa_get_pmd(numa
);
4423 VLOG_INFO("Core %d on numa node %d assigned port \'%s\' "
4424 "rx queue %d (measured processing cycles %"PRIu64
").",
4425 rxqs
[i
]->pmd
->core_id
, numa_id
,
4426 netdev_rxq_get_name(rxqs
[i
]->rx
),
4427 netdev_rxq_get_queue_id(rxqs
[i
]->rx
),
4428 dp_netdev_rxq_get_cycles(rxqs
[i
], RXQ_CYCLES_PROC_HIST
));
4432 rr_numa_list_destroy(&rr
);
4437 reload_affected_pmds(struct dp_netdev
*dp
)
4439 struct dp_netdev_pmd_thread
*pmd
;
4441 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4442 if (pmd
->need_reload
) {
4443 flow_mark_flush(pmd
);
4444 dp_netdev_reload_pmd__(pmd
);
4445 pmd
->need_reload
= false;
4451 reconfigure_pmd_threads(struct dp_netdev
*dp
)
4452 OVS_REQUIRES(dp
->port_mutex
)
4454 struct dp_netdev_pmd_thread
*pmd
;
4455 struct ovs_numa_dump
*pmd_cores
;
4456 struct ovs_numa_info_core
*core
;
4457 struct hmapx to_delete
= HMAPX_INITIALIZER(&to_delete
);
4458 struct hmapx_node
*node
;
4459 bool changed
= false;
4460 bool need_to_adjust_static_tx_qids
= false;
4462 /* The pmd threads should be started only if there's a pmd port in the
4463 * datapath. If the user didn't provide any "pmd-cpu-mask", we start
4464 * NR_PMD_THREADS per numa node. */
4465 if (!has_pmd_port(dp
)) {
4466 pmd_cores
= ovs_numa_dump_n_cores_per_numa(0);
4467 } else if (dp
->pmd_cmask
&& dp
->pmd_cmask
[0]) {
4468 pmd_cores
= ovs_numa_dump_cores_with_cmask(dp
->pmd_cmask
);
4470 pmd_cores
= ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS
);
4473 /* We need to adjust 'static_tx_qid's only if we're reducing number of
4474 * PMD threads. Otherwise, new threads will allocate all the freed ids. */
4475 if (ovs_numa_dump_count(pmd_cores
) < cmap_count(&dp
->poll_threads
) - 1) {
4476 /* Adjustment is required to keep 'static_tx_qid's sequential and
4477 * avoid possible issues, for example, imbalanced tx queue usage
4478 * and unnecessary locking caused by remapping on netdev level. */
4479 need_to_adjust_static_tx_qids
= true;
4482 /* Check for unwanted pmd threads */
4483 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4484 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
4487 if (!ovs_numa_dump_contains_core(pmd_cores
, pmd
->numa_id
,
4489 hmapx_add(&to_delete
, pmd
);
4490 } else if (need_to_adjust_static_tx_qids
) {
4491 pmd
->need_reload
= true;
4495 HMAPX_FOR_EACH (node
, &to_delete
) {
4496 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
4497 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d destroyed.",
4498 pmd
->numa_id
, pmd
->core_id
);
4499 dp_netdev_del_pmd(dp
, pmd
);
4501 changed
= !hmapx_is_empty(&to_delete
);
4502 hmapx_destroy(&to_delete
);
4504 if (need_to_adjust_static_tx_qids
) {
4505 /* 'static_tx_qid's are not sequential now.
4506 * Reload remaining threads to fix this. */
4507 reload_affected_pmds(dp
);
4510 /* Check for required new pmd threads */
4511 FOR_EACH_CORE_ON_DUMP(core
, pmd_cores
) {
4512 pmd
= dp_netdev_get_pmd(dp
, core
->core_id
);
4514 pmd
= xzalloc(sizeof *pmd
);
4515 dp_netdev_configure_pmd(pmd
, dp
, core
->core_id
, core
->numa_id
);
4516 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
4517 VLOG_INFO("PMD thread on numa_id: %d, core id: %2d created.",
4518 pmd
->numa_id
, pmd
->core_id
);
4521 dp_netdev_pmd_unref(pmd
);
4526 struct ovs_numa_info_numa
*numa
;
4528 /* Log the number of pmd threads per numa node. */
4529 FOR_EACH_NUMA_ON_DUMP (numa
, pmd_cores
) {
4530 VLOG_INFO("There are %"PRIuSIZE
" pmd threads on numa node %d",
4531 numa
->n_cores
, numa
->numa_id
);
4535 ovs_numa_dump_destroy(pmd_cores
);
4539 pmd_remove_stale_ports(struct dp_netdev
*dp
,
4540 struct dp_netdev_pmd_thread
*pmd
)
4541 OVS_EXCLUDED(pmd
->port_mutex
)
4542 OVS_REQUIRES(dp
->port_mutex
)
4544 struct rxq_poll
*poll
, *poll_next
;
4545 struct tx_port
*tx
, *tx_next
;
4547 ovs_mutex_lock(&pmd
->port_mutex
);
4548 HMAP_FOR_EACH_SAFE (poll
, poll_next
, node
, &pmd
->poll_list
) {
4549 struct dp_netdev_port
*port
= poll
->rxq
->port
;
4551 if (port
->need_reconfigure
4552 || !hmap_contains(&dp
->ports
, &port
->node
)) {
4553 dp_netdev_del_rxq_from_pmd(pmd
, poll
);
4556 HMAP_FOR_EACH_SAFE (tx
, tx_next
, node
, &pmd
->tx_ports
) {
4557 struct dp_netdev_port
*port
= tx
->port
;
4559 if (port
->need_reconfigure
4560 || !hmap_contains(&dp
->ports
, &port
->node
)) {
4561 dp_netdev_del_port_tx_from_pmd(pmd
, tx
);
4564 ovs_mutex_unlock(&pmd
->port_mutex
);
4567 /* Must be called each time a port is added/removed or the cmask changes.
4568 * This creates and destroys pmd threads, reconfigures ports, opens their
4569 * rxqs and assigns all rxqs/txqs to pmd threads. */
4571 reconfigure_datapath(struct dp_netdev
*dp
)
4572 OVS_REQUIRES(dp
->port_mutex
)
4574 struct dp_netdev_pmd_thread
*pmd
;
4575 struct dp_netdev_port
*port
;
4578 dp
->last_reconfigure_seq
= seq_read(dp
->reconfigure_seq
);
4580 /* Step 1: Adjust the pmd threads based on the datapath ports, the cores
4581 * on the system and the user configuration. */
4582 reconfigure_pmd_threads(dp
);
4584 wanted_txqs
= cmap_count(&dp
->poll_threads
);
4586 /* The number of pmd threads might have changed, or a port can be new:
4587 * adjust the txqs. */
4588 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4589 netdev_set_tx_multiq(port
->netdev
, wanted_txqs
);
4592 /* Step 2: Remove from the pmd threads ports that have been removed or
4593 * need reconfiguration. */
4595 /* Check for all the ports that need reconfiguration. We cache this in
4596 * 'port->need_reconfigure', because netdev_is_reconf_required() can
4597 * change at any time. */
4598 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4599 if (netdev_is_reconf_required(port
->netdev
)) {
4600 port
->need_reconfigure
= true;
4604 /* Remove from the pmd threads all the ports that have been deleted or
4605 * need reconfiguration. */
4606 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4607 pmd_remove_stale_ports(dp
, pmd
);
4610 /* Reload affected pmd threads. We must wait for the pmd threads before
4611 * reconfiguring the ports, because a port cannot be reconfigured while
4612 * it's being used. */
4613 reload_affected_pmds(dp
);
4615 /* Step 3: Reconfigure ports. */
4617 /* We only reconfigure the ports that we determined above, because they're
4618 * not being used by any pmd thread at the moment. If a port fails to
4619 * reconfigure we remove it from the datapath. */
4620 struct dp_netdev_port
*next_port
;
4621 HMAP_FOR_EACH_SAFE (port
, next_port
, node
, &dp
->ports
) {
4624 if (!port
->need_reconfigure
) {
4628 err
= port_reconfigure(port
);
4630 hmap_remove(&dp
->ports
, &port
->node
);
4631 seq_change(dp
->port_seq
);
4634 port
->dynamic_txqs
= netdev_n_txq(port
->netdev
) < wanted_txqs
;
4638 /* Step 4: Compute new rxq scheduling. We don't touch the pmd threads
4639 * for now, we just update the 'pmd' pointer in each rxq to point to the
4640 * wanted thread according to the scheduling policy. */
4642 /* Reset all the pmd threads to non isolated. */
4643 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4644 pmd
->isolated
= false;
4647 /* Reset all the queues to unassigned */
4648 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4649 for (int i
= 0; i
< port
->n_rxq
; i
++) {
4650 port
->rxqs
[i
].pmd
= NULL
;
4654 /* Add pinned queues and mark pmd threads isolated. */
4655 rxq_scheduling(dp
, true);
4657 /* Add non-pinned queues. */
4658 rxq_scheduling(dp
, false);
4660 /* Step 5: Remove queues not compliant with new scheduling. */
4661 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4662 struct rxq_poll
*poll
, *poll_next
;
4664 ovs_mutex_lock(&pmd
->port_mutex
);
4665 HMAP_FOR_EACH_SAFE (poll
, poll_next
, node
, &pmd
->poll_list
) {
4666 if (poll
->rxq
->pmd
!= pmd
) {
4667 dp_netdev_del_rxq_from_pmd(pmd
, poll
);
4670 ovs_mutex_unlock(&pmd
->port_mutex
);
4673 /* Reload affected pmd threads. We must wait for the pmd threads to remove
4674 * the old queues before readding them, otherwise a queue can be polled by
4675 * two threads at the same time. */
4676 reload_affected_pmds(dp
);
4678 /* Step 6: Add queues from scheduling, if they're not there already. */
4679 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4680 if (!netdev_is_pmd(port
->netdev
)) {
4684 for (int qid
= 0; qid
< port
->n_rxq
; qid
++) {
4685 struct dp_netdev_rxq
*q
= &port
->rxqs
[qid
];
4688 ovs_mutex_lock(&q
->pmd
->port_mutex
);
4689 dp_netdev_add_rxq_to_pmd(q
->pmd
, q
);
4690 ovs_mutex_unlock(&q
->pmd
->port_mutex
);
4695 /* Add every port to the tx cache of every pmd thread, if it's not
4696 * there already and if this pmd has at least one rxq to poll. */
4697 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
4698 ovs_mutex_lock(&pmd
->port_mutex
);
4699 if (hmap_count(&pmd
->poll_list
) || pmd
->core_id
== NON_PMD_CORE_ID
) {
4700 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4701 dp_netdev_add_port_tx_to_pmd(pmd
, port
);
4704 ovs_mutex_unlock(&pmd
->port_mutex
);
4707 /* Reload affected pmd threads. */
4708 reload_affected_pmds(dp
);
4711 /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
4713 ports_require_restart(const struct dp_netdev
*dp
)
4714 OVS_REQUIRES(dp
->port_mutex
)
4716 struct dp_netdev_port
*port
;
4718 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4719 if (netdev_is_reconf_required(port
->netdev
)) {
4727 /* Return true if needs to revalidate datapath flows. */
4729 dpif_netdev_run(struct dpif
*dpif
)
4731 struct dp_netdev_port
*port
;
4732 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4733 struct dp_netdev_pmd_thread
*non_pmd
;
4734 uint64_t new_tnl_seq
;
4735 bool need_to_flush
= true;
4737 ovs_mutex_lock(&dp
->port_mutex
);
4738 non_pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
4740 ovs_mutex_lock(&dp
->non_pmd_mutex
);
4741 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4742 if (!netdev_is_pmd(port
->netdev
)) {
4745 for (i
= 0; i
< port
->n_rxq
; i
++) {
4746 if (dp_netdev_process_rxq_port(non_pmd
,
4749 need_to_flush
= false;
4754 if (need_to_flush
) {
4755 /* We didn't receive anything in the process loop.
4756 * Check if we need to send something.
4757 * There was no time updates on current iteration. */
4758 pmd_thread_ctx_time_update(non_pmd
);
4759 dp_netdev_pmd_flush_output_packets(non_pmd
, false);
4762 dpif_netdev_xps_revalidate_pmd(non_pmd
, false);
4763 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
4765 dp_netdev_pmd_unref(non_pmd
);
4768 if (dp_netdev_is_reconf_required(dp
) || ports_require_restart(dp
)) {
4769 reconfigure_datapath(dp
);
4771 ovs_mutex_unlock(&dp
->port_mutex
);
4773 tnl_neigh_cache_run();
4775 new_tnl_seq
= seq_read(tnl_conf_seq
);
4777 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
4778 dp
->last_tnl_conf_seq
= new_tnl_seq
;
4785 dpif_netdev_wait(struct dpif
*dpif
)
4787 struct dp_netdev_port
*port
;
4788 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
4790 ovs_mutex_lock(&dp_netdev_mutex
);
4791 ovs_mutex_lock(&dp
->port_mutex
);
4792 HMAP_FOR_EACH (port
, node
, &dp
->ports
) {
4793 netdev_wait_reconf_required(port
->netdev
);
4794 if (!netdev_is_pmd(port
->netdev
)) {
4797 for (i
= 0; i
< port
->n_rxq
; i
++) {
4798 netdev_rxq_wait(port
->rxqs
[i
].rx
);
4802 ovs_mutex_unlock(&dp
->port_mutex
);
4803 ovs_mutex_unlock(&dp_netdev_mutex
);
4804 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
4808 pmd_free_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
4810 struct tx_port
*tx_port_cached
;
4812 /* Flush all the queued packets. */
4813 dp_netdev_pmd_flush_output_packets(pmd
, true);
4814 /* Free all used tx queue ids. */
4815 dpif_netdev_xps_revalidate_pmd(pmd
, true);
4817 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->tnl_port_cache
) {
4818 free(tx_port_cached
);
4820 HMAP_FOR_EACH_POP (tx_port_cached
, node
, &pmd
->send_port_cache
) {
4821 free(tx_port_cached
);
4825 /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to
4826 * thread-local copies. Copy to 'pmd->tnl_port_cache' if it is a tunnel
4827 * device, otherwise to 'pmd->send_port_cache' if the port has at least
4830 pmd_load_cached_ports(struct dp_netdev_pmd_thread
*pmd
)
4831 OVS_REQUIRES(pmd
->port_mutex
)
4833 struct tx_port
*tx_port
, *tx_port_cached
;
4835 pmd_free_cached_ports(pmd
);
4836 hmap_shrink(&pmd
->send_port_cache
);
4837 hmap_shrink(&pmd
->tnl_port_cache
);
4839 HMAP_FOR_EACH (tx_port
, node
, &pmd
->tx_ports
) {
4840 if (netdev_has_tunnel_push_pop(tx_port
->port
->netdev
)) {
4841 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
4842 hmap_insert(&pmd
->tnl_port_cache
, &tx_port_cached
->node
,
4843 hash_port_no(tx_port_cached
->port
->port_no
));
4846 if (netdev_n_txq(tx_port
->port
->netdev
)) {
4847 tx_port_cached
= xmemdup(tx_port
, sizeof *tx_port_cached
);
4848 hmap_insert(&pmd
->send_port_cache
, &tx_port_cached
->node
,
4849 hash_port_no(tx_port_cached
->port
->port_no
));
4855 pmd_alloc_static_tx_qid(struct dp_netdev_pmd_thread
*pmd
)
4857 ovs_mutex_lock(&pmd
->dp
->tx_qid_pool_mutex
);
4858 if (!id_pool_alloc_id(pmd
->dp
->tx_qid_pool
, &pmd
->static_tx_qid
)) {
4859 VLOG_ABORT("static_tx_qid allocation failed for PMD on core %2d"
4860 ", numa_id %d.", pmd
->core_id
, pmd
->numa_id
);
4862 ovs_mutex_unlock(&pmd
->dp
->tx_qid_pool_mutex
);
4864 VLOG_DBG("static_tx_qid = %d allocated for PMD thread on core %2d"
4865 ", numa_id %d.", pmd
->static_tx_qid
, pmd
->core_id
, pmd
->numa_id
);
4869 pmd_free_static_tx_qid(struct dp_netdev_pmd_thread
*pmd
)
4871 ovs_mutex_lock(&pmd
->dp
->tx_qid_pool_mutex
);
4872 id_pool_free_id(pmd
->dp
->tx_qid_pool
, pmd
->static_tx_qid
);
4873 ovs_mutex_unlock(&pmd
->dp
->tx_qid_pool_mutex
);
4877 pmd_load_queues_and_ports(struct dp_netdev_pmd_thread
*pmd
,
4878 struct polled_queue
**ppoll_list
)
4880 struct polled_queue
*poll_list
= *ppoll_list
;
4881 struct rxq_poll
*poll
;
4884 ovs_mutex_lock(&pmd
->port_mutex
);
4885 poll_list
= xrealloc(poll_list
, hmap_count(&pmd
->poll_list
)
4886 * sizeof *poll_list
);
4889 HMAP_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
4890 poll_list
[i
].rxq
= poll
->rxq
;
4891 poll_list
[i
].port_no
= poll
->rxq
->port
->port_no
;
4895 pmd_load_cached_ports(pmd
);
4897 ovs_mutex_unlock(&pmd
->port_mutex
);
4899 *ppoll_list
= poll_list
;
4904 pmd_thread_main(void *f_
)
4906 struct dp_netdev_pmd_thread
*pmd
= f_
;
4907 struct pmd_perf_stats
*s
= &pmd
->perf_stats
;
4908 unsigned int lc
= 0;
4909 struct polled_queue
*poll_list
;
4913 int process_packets
= 0;
4917 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
4918 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
4919 ovs_numa_thread_setaffinity_core(pmd
->core_id
);
4920 dpdk_set_lcore_id(pmd
->core_id
);
4921 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
4922 dfc_cache_init(&pmd
->flow_cache
);
4924 pmd_alloc_static_tx_qid(pmd
);
4926 /* List port/core affinity */
4927 for (i
= 0; i
< poll_cnt
; i
++) {
4928 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
4929 pmd
->core_id
, netdev_rxq_get_name(poll_list
[i
].rxq
->rx
),
4930 netdev_rxq_get_queue_id(poll_list
[i
].rxq
->rx
));
4931 /* Reset the rxq current cycles counter. */
4932 dp_netdev_rxq_set_cycles(poll_list
[i
].rxq
, RXQ_CYCLES_PROC_CURR
, 0);
4936 while (seq_read(pmd
->reload_seq
) == pmd
->last_reload_seq
) {
4937 seq_wait(pmd
->reload_seq
, pmd
->last_reload_seq
);
4943 pmd
->intrvl_tsc_prev
= 0;
4944 atomic_store_relaxed(&pmd
->intrvl_cycles
, 0);
4945 cycles_counter_update(s
);
4946 /* Protect pmd stats from external clearing while polling. */
4947 ovs_mutex_lock(&pmd
->perf_stats
.stats_mutex
);
4949 uint64_t rx_packets
= 0, tx_packets
= 0;
4951 pmd_perf_start_iteration(s
);
4953 for (i
= 0; i
< poll_cnt
; i
++) {
4955 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].rxq
,
4956 poll_list
[i
].port_no
);
4957 rx_packets
+= process_packets
;
4961 /* We didn't receive anything in the process loop.
4962 * Check if we need to send something.
4963 * There was no time updates on current iteration. */
4964 pmd_thread_ctx_time_update(pmd
);
4965 tx_packets
= dp_netdev_pmd_flush_output_packets(pmd
, false);
4973 coverage_try_clear();
4974 dp_netdev_pmd_try_optimize(pmd
, poll_list
, poll_cnt
);
4975 if (!ovsrcu_try_quiesce()) {
4976 emc_cache_slow_sweep(&((pmd
->flow_cache
).emc_cache
));
4979 atomic_read_relaxed(&pmd
->reload
, &reload
);
4984 pmd_perf_end_iteration(s
, rx_packets
, tx_packets
,
4985 pmd_perf_metrics_enabled(pmd
));
4987 ovs_mutex_unlock(&pmd
->perf_stats
.stats_mutex
);
4989 poll_cnt
= pmd_load_queues_and_ports(pmd
, &poll_list
);
4990 exiting
= latch_is_set(&pmd
->exit_latch
);
4991 /* Signal here to make sure the pmd finishes
4992 * reloading the updated configuration. */
4993 dp_netdev_pmd_reload_done(pmd
);
4995 pmd_free_static_tx_qid(pmd
);
5001 dfc_cache_uninit(&pmd
->flow_cache
);
5003 pmd_free_cached_ports(pmd
);
5008 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
5009 OVS_ACQUIRES(dp
->upcall_rwlock
)
5011 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
5017 dpif_netdev_meter_get_features(const struct dpif
* dpif OVS_UNUSED
,
5018 struct ofputil_meter_features
*features
)
5020 features
->max_meters
= MAX_METERS
;
5021 features
->band_types
= DP_SUPPORTED_METER_BAND_TYPES
;
5022 features
->capabilities
= DP_SUPPORTED_METER_FLAGS_MASK
;
5023 features
->max_bands
= MAX_BANDS
;
5024 features
->max_color
= 0;
5027 /* Applies the meter identified by 'meter_id' to 'packets_'. Packets
5028 * that exceed a band are dropped in-place. */
5030 dp_netdev_run_meter(struct dp_netdev
*dp
, struct dp_packet_batch
*packets_
,
5031 uint32_t meter_id
, long long int now
)
5033 struct dp_meter
*meter
;
5034 struct dp_meter_band
*band
;
5035 struct dp_packet
*packet
;
5036 long long int long_delta_t
; /* msec */
5037 uint32_t delta_t
; /* msec */
5038 const size_t cnt
= dp_packet_batch_size(packets_
);
5039 uint32_t bytes
, volume
;
5040 int exceeded_band
[NETDEV_MAX_BURST
];
5041 uint32_t exceeded_rate
[NETDEV_MAX_BURST
];
5042 int exceeded_pkt
= cnt
; /* First packet that exceeded a band rate. */
5044 if (meter_id
>= MAX_METERS
) {
5048 meter_lock(dp
, meter_id
);
5049 meter
= dp
->meters
[meter_id
];
5054 /* Initialize as negative values. */
5055 memset(exceeded_band
, 0xff, cnt
* sizeof *exceeded_band
);
5056 /* Initialize as zeroes. */
5057 memset(exceeded_rate
, 0, cnt
* sizeof *exceeded_rate
);
5059 /* All packets will hit the meter at the same time. */
5060 long_delta_t
= (now
- meter
->used
) / 1000; /* msec */
5062 /* Make sure delta_t will not be too large, so that bucket will not
5063 * wrap around below. */
5064 delta_t
= (long_delta_t
> (long long int)meter
->max_delta_t
)
5065 ? meter
->max_delta_t
: (uint32_t)long_delta_t
;
5067 /* Update meter stats. */
5069 meter
->packet_count
+= cnt
;
5071 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5072 bytes
+= dp_packet_size(packet
);
5074 meter
->byte_count
+= bytes
;
5076 /* Meters can operate in terms of packets per second or kilobits per
5078 if (meter
->flags
& OFPMF13_PKTPS
) {
5079 /* Rate in packets/second, bucket 1/1000 packets. */
5080 /* msec * packets/sec = 1/1000 packets. */
5081 volume
= cnt
* 1000; /* Take 'cnt' packets from the bucket. */
5083 /* Rate in kbps, bucket in bits. */
5084 /* msec * kbps = bits */
5088 /* Update all bands and find the one hit with the highest rate for each
5089 * packet (if any). */
5090 for (int m
= 0; m
< meter
->n_bands
; ++m
) {
5091 band
= &meter
->bands
[m
];
5093 /* Update band's bucket. */
5094 band
->bucket
+= delta_t
* band
->up
.rate
;
5095 if (band
->bucket
> band
->up
.burst_size
) {
5096 band
->bucket
= band
->up
.burst_size
;
5099 /* Drain the bucket for all the packets, if possible. */
5100 if (band
->bucket
>= volume
) {
5101 band
->bucket
-= volume
;
5103 int band_exceeded_pkt
;
5105 /* Band limit hit, must process packet-by-packet. */
5106 if (meter
->flags
& OFPMF13_PKTPS
) {
5107 band_exceeded_pkt
= band
->bucket
/ 1000;
5108 band
->bucket
%= 1000; /* Remainder stays in bucket. */
5110 /* Update the exceeding band for each exceeding packet.
5111 * (Only one band will be fired by a packet, and that
5112 * can be different for each packet.) */
5113 for (int i
= band_exceeded_pkt
; i
< cnt
; i
++) {
5114 if (band
->up
.rate
> exceeded_rate
[i
]) {
5115 exceeded_rate
[i
] = band
->up
.rate
;
5116 exceeded_band
[i
] = m
;
5120 /* Packet sizes differ, must process one-by-one. */
5121 band_exceeded_pkt
= cnt
;
5122 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
5123 uint32_t bits
= dp_packet_size(packet
) * 8;
5125 if (band
->bucket
>= bits
) {
5126 band
->bucket
-= bits
;
5128 if (i
< band_exceeded_pkt
) {
5129 band_exceeded_pkt
= i
;
5131 /* Update the exceeding band for the exceeding packet.
5132 * (Only one band will be fired by a packet, and that
5133 * can be different for each packet.) */
5134 if (band
->up
.rate
> exceeded_rate
[i
]) {
5135 exceeded_rate
[i
] = band
->up
.rate
;
5136 exceeded_band
[i
] = m
;
5141 /* Remember the first exceeding packet. */
5142 if (exceeded_pkt
> band_exceeded_pkt
) {
5143 exceeded_pkt
= band_exceeded_pkt
;
5148 /* Fire the highest rate band exceeded by each packet, and drop
5149 * packets if needed. */
5151 DP_PACKET_BATCH_REFILL_FOR_EACH (j
, cnt
, packet
, packets_
) {
5152 if (exceeded_band
[j
] >= 0) {
5153 /* Meter drop packet. */
5154 band
= &meter
->bands
[exceeded_band
[j
]];
5155 band
->packet_count
+= 1;
5156 band
->byte_count
+= dp_packet_size(packet
);
5158 dp_packet_delete(packet
);
5160 /* Meter accepts packet. */
5161 dp_packet_batch_refill(packets_
, packet
, j
);
5165 meter_unlock(dp
, meter_id
);
5168 /* Meter set/get/del processing is still single-threaded. */
5170 dpif_netdev_meter_set(struct dpif
*dpif
, ofproto_meter_id meter_id
,
5171 struct ofputil_meter_config
*config
)
5173 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
5174 uint32_t mid
= meter_id
.uint32
;
5175 struct dp_meter
*meter
;
5178 if (mid
>= MAX_METERS
) {
5179 return EFBIG
; /* Meter_id out of range. */
5182 if (config
->flags
& ~DP_SUPPORTED_METER_FLAGS_MASK
) {
5183 return EBADF
; /* Unsupported flags set */
5186 if (config
->n_bands
> MAX_BANDS
) {
5190 for (i
= 0; i
< config
->n_bands
; ++i
) {
5191 switch (config
->bands
[i
].type
) {
5195 return ENODEV
; /* Unsupported band type */
5199 /* Allocate meter */
5200 meter
= xzalloc(sizeof *meter
5201 + config
->n_bands
* sizeof(struct dp_meter_band
));
5203 meter
->flags
= config
->flags
;
5204 meter
->n_bands
= config
->n_bands
;
5205 meter
->max_delta_t
= 0;
5206 meter
->used
= time_usec();
5209 for (i
= 0; i
< config
->n_bands
; ++i
) {
5210 uint32_t band_max_delta_t
;
5212 /* Set burst size to a workable value if none specified. */
5213 if (config
->bands
[i
].burst_size
== 0) {
5214 config
->bands
[i
].burst_size
= config
->bands
[i
].rate
;
5217 meter
->bands
[i
].up
= config
->bands
[i
];
5218 /* Convert burst size to the bucket units: */
5219 /* pkts => 1/1000 packets, kilobits => bits. */
5220 meter
->bands
[i
].up
.burst_size
*= 1000;
5221 /* Initialize bucket to empty. */
5222 meter
->bands
[i
].bucket
= 0;
5224 /* Figure out max delta_t that is enough to fill any bucket. */
5226 = meter
->bands
[i
].up
.burst_size
/ meter
->bands
[i
].up
.rate
;
5227 if (band_max_delta_t
> meter
->max_delta_t
) {
5228 meter
->max_delta_t
= band_max_delta_t
;
5232 meter_lock(dp
, mid
);
5233 dp_delete_meter(dp
, mid
); /* Free existing meter, if any */
5234 dp
->meters
[mid
] = meter
;
5235 meter_unlock(dp
, mid
);
5243 dpif_netdev_meter_get(const struct dpif
*dpif
,
5244 ofproto_meter_id meter_id_
,
5245 struct ofputil_meter_stats
*stats
, uint16_t n_bands
)
5247 const struct dp_netdev
*dp
= get_dp_netdev(dpif
);
5248 const struct dp_meter
*meter
;
5249 uint32_t meter_id
= meter_id_
.uint32
;
5251 if (meter_id
>= MAX_METERS
) {
5254 meter
= dp
->meters
[meter_id
];
5261 meter_lock(dp
, meter_id
);
5262 stats
->packet_in_count
= meter
->packet_count
;
5263 stats
->byte_in_count
= meter
->byte_count
;
5265 for (i
= 0; i
< n_bands
&& i
< meter
->n_bands
; ++i
) {
5266 stats
->bands
[i
].packet_count
= meter
->bands
[i
].packet_count
;
5267 stats
->bands
[i
].byte_count
= meter
->bands
[i
].byte_count
;
5269 meter_unlock(dp
, meter_id
);
5277 dpif_netdev_meter_del(struct dpif
*dpif
,
5278 ofproto_meter_id meter_id_
,
5279 struct ofputil_meter_stats
*stats
, uint16_t n_bands
)
5281 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
5284 error
= dpif_netdev_meter_get(dpif
, meter_id_
, stats
, n_bands
);
5286 uint32_t meter_id
= meter_id_
.uint32
;
5288 meter_lock(dp
, meter_id
);
5289 dp_delete_meter(dp
, meter_id
);
5290 meter_unlock(dp
, meter_id
);
5297 dpif_netdev_disable_upcall(struct dpif
*dpif
)
5298 OVS_NO_THREAD_SAFETY_ANALYSIS
5300 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
5301 dp_netdev_disable_upcall(dp
);
5305 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
5306 OVS_RELEASES(dp
->upcall_rwlock
)
5308 fat_rwlock_unlock(&dp
->upcall_rwlock
);
5312 dpif_netdev_enable_upcall(struct dpif
*dpif
)
5313 OVS_NO_THREAD_SAFETY_ANALYSIS
5315 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
5316 dp_netdev_enable_upcall(dp
);
5320 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
5322 ovs_mutex_lock(&pmd
->cond_mutex
);
5323 atomic_store_relaxed(&pmd
->reload
, false);
5324 pmd
->last_reload_seq
= seq_read(pmd
->reload_seq
);
5325 xpthread_cond_signal(&pmd
->cond
);
5326 ovs_mutex_unlock(&pmd
->cond_mutex
);
5329 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
5330 * the pointer if succeeds, otherwise, NULL (it can return NULL even if
5331 * 'core_id' is NON_PMD_CORE_ID).
5333 * Caller must unrefs the returned reference. */
5334 static struct dp_netdev_pmd_thread
*
5335 dp_netdev_get_pmd(struct dp_netdev
*dp
, unsigned core_id
)
5337 struct dp_netdev_pmd_thread
*pmd
;
5338 const struct cmap_node
*pnode
;
5340 pnode
= cmap_find(&dp
->poll_threads
, hash_int(core_id
, 0));
5344 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
5346 return dp_netdev_pmd_try_ref(pmd
) ? pmd
: NULL
;
5349 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
5351 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
5352 OVS_REQUIRES(dp
->port_mutex
)
5354 struct dp_netdev_pmd_thread
*non_pmd
;
5356 non_pmd
= xzalloc(sizeof *non_pmd
);
5357 dp_netdev_configure_pmd(non_pmd
, dp
, NON_PMD_CORE_ID
, OVS_NUMA_UNSPEC
);
5360 /* Caller must have valid pointer to 'pmd'. */
5362 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
)
5364 return ovs_refcount_try_ref_rcu(&pmd
->ref_cnt
);
5368 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
)
5370 if (pmd
&& ovs_refcount_unref(&pmd
->ref_cnt
) == 1) {
5371 ovsrcu_postpone(dp_netdev_destroy_pmd
, pmd
);
5375 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
5376 * fails, keeps checking for next node until reaching the end of cmap.
5378 * Caller must unrefs the returned reference. */
5379 static struct dp_netdev_pmd_thread
*
5380 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
)
5382 struct dp_netdev_pmd_thread
*next
;
5385 struct cmap_node
*node
;
5387 node
= cmap_next_position(&dp
->poll_threads
, pos
);
5388 next
= node
? CONTAINER_OF(node
, struct dp_netdev_pmd_thread
, node
)
5390 } while (next
&& !dp_netdev_pmd_try_ref(next
));
5395 /* Configures the 'pmd' based on the input argument. */
5397 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
5398 unsigned core_id
, int numa_id
)
5401 pmd
->core_id
= core_id
;
5402 pmd
->numa_id
= numa_id
;
5403 pmd
->need_reload
= false;
5404 pmd
->n_output_batches
= 0;
5406 ovs_refcount_init(&pmd
->ref_cnt
);
5407 latch_init(&pmd
->exit_latch
);
5408 pmd
->reload_seq
= seq_create();
5409 pmd
->last_reload_seq
= seq_read(pmd
->reload_seq
);
5410 atomic_init(&pmd
->reload
, false);
5411 xpthread_cond_init(&pmd
->cond
, NULL
);
5412 ovs_mutex_init(&pmd
->cond_mutex
);
5413 ovs_mutex_init(&pmd
->flow_mutex
);
5414 ovs_mutex_init(&pmd
->port_mutex
);
5415 cmap_init(&pmd
->flow_table
);
5416 cmap_init(&pmd
->classifiers
);
5417 pmd
->ctx
.last_rxq
= NULL
;
5418 pmd_thread_ctx_time_update(pmd
);
5419 pmd
->next_optimization
= pmd
->ctx
.now
+ DPCLS_OPTIMIZATION_INTERVAL
;
5420 pmd
->rxq_next_cycle_store
= pmd
->ctx
.now
+ PMD_RXQ_INTERVAL_LEN
;
5421 hmap_init(&pmd
->poll_list
);
5422 hmap_init(&pmd
->tx_ports
);
5423 hmap_init(&pmd
->tnl_port_cache
);
5424 hmap_init(&pmd
->send_port_cache
);
5425 /* init the 'flow_cache' since there is no
5426 * actual thread created for NON_PMD_CORE_ID. */
5427 if (core_id
== NON_PMD_CORE_ID
) {
5428 dfc_cache_init(&pmd
->flow_cache
);
5429 pmd_alloc_static_tx_qid(pmd
);
5431 pmd_perf_stats_init(&pmd
->perf_stats
);
5432 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
5433 hash_int(core_id
, 0));
5437 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
)
5441 dp_netdev_pmd_flow_flush(pmd
);
5442 hmap_destroy(&pmd
->send_port_cache
);
5443 hmap_destroy(&pmd
->tnl_port_cache
);
5444 hmap_destroy(&pmd
->tx_ports
);
5445 hmap_destroy(&pmd
->poll_list
);
5446 /* All flows (including their dpcls_rules) have been deleted already */
5447 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
5449 ovsrcu_postpone(free
, cls
);
5451 cmap_destroy(&pmd
->classifiers
);
5452 cmap_destroy(&pmd
->flow_table
);
5453 ovs_mutex_destroy(&pmd
->flow_mutex
);
5454 latch_destroy(&pmd
->exit_latch
);
5455 seq_destroy(pmd
->reload_seq
);
5456 xpthread_cond_destroy(&pmd
->cond
);
5457 ovs_mutex_destroy(&pmd
->cond_mutex
);
5458 ovs_mutex_destroy(&pmd
->port_mutex
);
5462 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
5463 * and unrefs the struct. */
5465 dp_netdev_del_pmd(struct dp_netdev
*dp
, struct dp_netdev_pmd_thread
*pmd
)
5467 /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize,
5468 * but extra cleanup is necessary */
5469 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
5470 ovs_mutex_lock(&dp
->non_pmd_mutex
);
5471 dfc_cache_uninit(&pmd
->flow_cache
);
5472 pmd_free_cached_ports(pmd
);
5473 pmd_free_static_tx_qid(pmd
);
5474 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
5476 latch_set(&pmd
->exit_latch
);
5477 dp_netdev_reload_pmd__(pmd
);
5478 xpthread_join(pmd
->thread
, NULL
);
5481 dp_netdev_pmd_clear_ports(pmd
);
5483 /* Purges the 'pmd''s flows after stopping the thread, but before
5484 * destroying the flows, so that the flow stats can be collected. */
5485 if (dp
->dp_purge_cb
) {
5486 dp
->dp_purge_cb(dp
->dp_purge_aux
, pmd
->core_id
);
5488 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
5489 dp_netdev_pmd_unref(pmd
);
5492 /* Destroys all pmd threads. If 'non_pmd' is true it also destroys the non pmd
5495 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
, bool non_pmd
)
5497 struct dp_netdev_pmd_thread
*pmd
;
5498 struct dp_netdev_pmd_thread
**pmd_list
;
5499 size_t k
= 0, n_pmds
;
5501 n_pmds
= cmap_count(&dp
->poll_threads
);
5502 pmd_list
= xcalloc(n_pmds
, sizeof *pmd_list
);
5504 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
5505 if (!non_pmd
&& pmd
->core_id
== NON_PMD_CORE_ID
) {
5508 /* We cannot call dp_netdev_del_pmd(), since it alters
5509 * 'dp->poll_threads' (while we're iterating it) and it
5511 ovs_assert(k
< n_pmds
);
5512 pmd_list
[k
++] = pmd
;
5515 for (size_t i
= 0; i
< k
; i
++) {
5516 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
5521 /* Deletes all rx queues from pmd->poll_list and all the ports from
5524 dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread
*pmd
)
5526 struct rxq_poll
*poll
;
5527 struct tx_port
*port
;
5529 ovs_mutex_lock(&pmd
->port_mutex
);
5530 HMAP_FOR_EACH_POP (poll
, node
, &pmd
->poll_list
) {
5533 HMAP_FOR_EACH_POP (port
, node
, &pmd
->tx_ports
) {
5536 ovs_mutex_unlock(&pmd
->port_mutex
);
5539 /* Adds rx queue to poll_list of PMD thread, if it's not there already. */
5541 dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
5542 struct dp_netdev_rxq
*rxq
)
5543 OVS_REQUIRES(pmd
->port_mutex
)
5545 int qid
= netdev_rxq_get_queue_id(rxq
->rx
);
5546 uint32_t hash
= hash_2words(odp_to_u32(rxq
->port
->port_no
), qid
);
5547 struct rxq_poll
*poll
;
5549 HMAP_FOR_EACH_WITH_HASH (poll
, node
, hash
, &pmd
->poll_list
) {
5550 if (poll
->rxq
== rxq
) {
5551 /* 'rxq' is already polled by this thread. Do nothing. */
5556 poll
= xmalloc(sizeof *poll
);
5558 hmap_insert(&pmd
->poll_list
, &poll
->node
, hash
);
5560 pmd
->need_reload
= true;
5563 /* Delete 'poll' from poll_list of PMD thread. */
5565 dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
5566 struct rxq_poll
*poll
)
5567 OVS_REQUIRES(pmd
->port_mutex
)
5569 hmap_remove(&pmd
->poll_list
, &poll
->node
);
5572 pmd
->need_reload
= true;
5575 /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the
5576 * changes to take effect. */
5578 dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
5579 struct dp_netdev_port
*port
)
5580 OVS_REQUIRES(pmd
->port_mutex
)
5584 tx
= tx_port_lookup(&pmd
->tx_ports
, port
->port_no
);
5586 /* 'port' is already on this thread tx cache. Do nothing. */
5590 tx
= xzalloc(sizeof *tx
);
5594 tx
->flush_time
= 0LL;
5595 dp_packet_batch_init(&tx
->output_pkts
);
5597 hmap_insert(&pmd
->tx_ports
, &tx
->node
, hash_port_no(tx
->port
->port_no
));
5598 pmd
->need_reload
= true;
5601 /* Del 'tx' from the tx port cache of 'pmd', which must be reloaded for the
5602 * changes to take effect. */
5604 dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread
*pmd
,
5606 OVS_REQUIRES(pmd
->port_mutex
)
5608 hmap_remove(&pmd
->tx_ports
, &tx
->node
);
5610 pmd
->need_reload
= true;
5614 dpif_netdev_get_datapath_version(void)
5616 return xstrdup("<built-in>");
5620 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
, int cnt
, int size
,
5621 uint16_t tcp_flags
, long long now
)
5625 atomic_store_relaxed(&netdev_flow
->stats
.used
, now
);
5626 non_atomic_ullong_add(&netdev_flow
->stats
.packet_count
, cnt
);
5627 non_atomic_ullong_add(&netdev_flow
->stats
.byte_count
, size
);
5628 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
5630 atomic_store_relaxed(&netdev_flow
->stats
.tcp_flags
, flags
);
5634 dp_netdev_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet_
,
5635 struct flow
*flow
, struct flow_wildcards
*wc
, ovs_u128
*ufid
,
5636 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
5637 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
5639 struct dp_netdev
*dp
= pmd
->dp
;
5641 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
5645 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
5646 struct ds ds
= DS_EMPTY_INITIALIZER
;
5649 struct odp_flow_key_parms odp_parms
= {
5651 .mask
= wc
? &wc
->masks
: NULL
,
5652 .support
= dp_netdev_support
,
5655 ofpbuf_init(&key
, 0);
5656 odp_flow_key_from_flow(&odp_parms
, &key
);
5657 packet_str
= ofp_dp_packet_to_string(packet_
);
5659 odp_flow_key_format(key
.data
, key
.size
, &ds
);
5661 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
5662 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
5664 ofpbuf_uninit(&key
);
5670 return dp
->upcall_cb(packet_
, flow
, ufid
, pmd
->core_id
, type
, userdata
,
5671 actions
, wc
, put_actions
, dp
->upcall_aux
);
5674 static inline uint32_t
5675 dpif_netdev_packet_get_rss_hash_orig_pkt(struct dp_packet
*packet
,
5676 const struct miniflow
*mf
)
5680 if (OVS_LIKELY(dp_packet_rss_valid(packet
))) {
5681 hash
= dp_packet_get_rss_hash(packet
);
5683 hash
= miniflow_hash_5tuple(mf
, 0);
5684 dp_packet_set_rss_hash(packet
, hash
);
5690 static inline uint32_t
5691 dpif_netdev_packet_get_rss_hash(struct dp_packet
*packet
,
5692 const struct miniflow
*mf
)
5694 uint32_t hash
, recirc_depth
;
5696 if (OVS_LIKELY(dp_packet_rss_valid(packet
))) {
5697 hash
= dp_packet_get_rss_hash(packet
);
5699 hash
= miniflow_hash_5tuple(mf
, 0);
5700 dp_packet_set_rss_hash(packet
, hash
);
5703 /* The RSS hash must account for the recirculation depth to avoid
5704 * collisions in the exact match cache */
5705 recirc_depth
= *recirc_depth_get_unsafe();
5706 if (OVS_UNLIKELY(recirc_depth
)) {
5707 hash
= hash_finish(hash
, recirc_depth
);
5708 dp_packet_set_rss_hash(packet
, hash
);
5713 struct packet_batch_per_flow
{
5714 unsigned int byte_count
;
5716 struct dp_netdev_flow
*flow
;
5718 struct dp_packet_batch array
;
5722 packet_batch_per_flow_update(struct packet_batch_per_flow
*batch
,
5723 struct dp_packet
*packet
,
5726 batch
->byte_count
+= dp_packet_size(packet
);
5727 batch
->tcp_flags
|= tcp_flags
;
5728 batch
->array
.packets
[batch
->array
.count
++] = packet
;
5732 packet_batch_per_flow_init(struct packet_batch_per_flow
*batch
,
5733 struct dp_netdev_flow
*flow
)
5735 flow
->batch
= batch
;
5738 dp_packet_batch_init(&batch
->array
);
5739 batch
->byte_count
= 0;
5740 batch
->tcp_flags
= 0;
5744 packet_batch_per_flow_execute(struct packet_batch_per_flow
*batch
,
5745 struct dp_netdev_pmd_thread
*pmd
)
5747 struct dp_netdev_actions
*actions
;
5748 struct dp_netdev_flow
*flow
= batch
->flow
;
5750 dp_netdev_flow_used(flow
, batch
->array
.count
, batch
->byte_count
,
5751 batch
->tcp_flags
, pmd
->ctx
.now
/ 1000);
5753 actions
= dp_netdev_flow_get_actions(flow
);
5755 dp_netdev_execute_actions(pmd
, &batch
->array
, true, &flow
->flow
,
5756 actions
->actions
, actions
->size
);
5760 dp_netdev_queue_batches(struct dp_packet
*pkt
,
5761 struct dp_netdev_flow
*flow
, uint16_t tcp_flags
,
5762 struct packet_batch_per_flow
*batches
,
5765 struct packet_batch_per_flow
*batch
= flow
->batch
;
5767 if (OVS_UNLIKELY(!batch
)) {
5768 batch
= &batches
[(*n_batches
)++];
5769 packet_batch_per_flow_init(batch
, flow
);
5772 packet_batch_per_flow_update(batch
, pkt
, tcp_flags
);
5776 packet_enqueue_to_flow_map(struct dp_packet
*packet
,
5777 struct dp_netdev_flow
*flow
,
5779 struct dp_packet_flow_map
*flow_map
,
5782 struct dp_packet_flow_map
*map
= &flow_map
[index
];
5784 map
->packet
= packet
;
5785 map
->tcp_flags
= tcp_flags
;
5788 /* SMC lookup function for a batch of packets.
5789 * By doing batching SMC lookup, we can use prefetch
5790 * to hide memory access latency.
5793 smc_lookup_batch(struct dp_netdev_pmd_thread
*pmd
,
5794 struct netdev_flow_key
*keys
,
5795 struct netdev_flow_key
**missed_keys
,
5796 struct dp_packet_batch
*packets_
,
5798 struct dp_packet_flow_map
*flow_map
,
5802 struct dp_packet
*packet
;
5803 size_t n_smc_hit
= 0, n_missed
= 0;
5804 struct dfc_cache
*cache
= &pmd
->flow_cache
;
5805 struct smc_cache
*smc_cache
= &cache
->smc_cache
;
5806 const struct cmap_node
*flow_node
;
5810 /* Prefetch buckets for all packets */
5811 for (i
= 0; i
< cnt
; i
++) {
5812 OVS_PREFETCH(&smc_cache
->buckets
[keys
[i
].hash
& SMC_MASK
]);
5815 DP_PACKET_BATCH_REFILL_FOR_EACH (i
, cnt
, packet
, packets_
) {
5816 struct dp_netdev_flow
*flow
= NULL
;
5817 flow_node
= smc_entry_get(pmd
, keys
[i
].hash
);
5819 /* Get the original order of this packet in received batch. */
5820 recv_idx
= index_map
[i
];
5822 if (OVS_LIKELY(flow_node
!= NULL
)) {
5823 CMAP_NODE_FOR_EACH (flow
, node
, flow_node
) {
5824 /* Since we dont have per-port megaflow to check the port
5825 * number, we need to verify that the input ports match. */
5826 if (OVS_LIKELY(dpcls_rule_matches_key(&flow
->cr
, &keys
[i
]) &&
5827 flow
->flow
.in_port
.odp_port
== packet
->md
.in_port
.odp_port
)) {
5828 tcp_flags
= miniflow_get_tcp_flags(&keys
[i
].mf
);
5830 /* SMC hit and emc miss, we insert into EMC */
5832 netdev_flow_key_size(miniflow_n_values(&keys
[i
].mf
));
5833 emc_probabilistic_insert(pmd
, &keys
[i
], flow
);
5834 /* Add these packets into the flow map in the same order
5837 packet_enqueue_to_flow_map(packet
, flow
, tcp_flags
,
5838 flow_map
, recv_idx
);
5849 /* SMC missed. Group missed packets together at
5850 * the beginning of the 'packets' array. */
5851 dp_packet_batch_refill(packets_
, packet
, i
);
5853 /* Preserve the order of packet for flow batching. */
5854 index_map
[n_missed
] = recv_idx
;
5856 /* Put missed keys to the pointer arrays return to the caller */
5857 missed_keys
[n_missed
++] = &keys
[i
];
5860 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_SMC_HIT
, n_smc_hit
);
5863 /* Try to process all ('cnt') the 'packets' using only the datapath flow cache
5864 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
5865 * miniflow is copied into 'keys' and the packet pointer is moved at the
5866 * beginning of the 'packets' array. The pointers of missed keys are put in the
5867 * missed_keys pointer array for future processing.
5869 * The function returns the number of packets that needs to be processed in the
5870 * 'packets' array (they have been moved to the beginning of the vector).
5872 * For performance reasons a caller may choose not to initialize the metadata
5873 * in 'packets_'. If 'md_is_valid' is false, the metadata in 'packets'
5874 * is not valid and must be initialized by this function using 'port_no'.
5875 * If 'md_is_valid' is true, the metadata is already valid and 'port_no'
5878 static inline size_t
5879 dfc_processing(struct dp_netdev_pmd_thread
*pmd
,
5880 struct dp_packet_batch
*packets_
,
5881 struct netdev_flow_key
*keys
,
5882 struct netdev_flow_key
**missed_keys
,
5883 struct packet_batch_per_flow batches
[], size_t *n_batches
,
5884 struct dp_packet_flow_map
*flow_map
,
5885 size_t *n_flows
, uint8_t *index_map
,
5886 bool md_is_valid
, odp_port_t port_no
)
5888 struct netdev_flow_key
*key
= &keys
[0];
5889 size_t n_missed
= 0, n_emc_hit
= 0;
5890 struct dfc_cache
*cache
= &pmd
->flow_cache
;
5891 struct dp_packet
*packet
;
5892 const size_t cnt
= dp_packet_batch_size(packets_
);
5898 bool batch_enable
= true;
5900 atomic_read_relaxed(&pmd
->dp
->smc_enable_db
, &smc_enable_db
);
5901 atomic_read_relaxed(&pmd
->dp
->emc_insert_min
, &cur_min
);
5902 pmd_perf_update_counter(&pmd
->perf_stats
,
5903 md_is_valid
? PMD_STAT_RECIRC
: PMD_STAT_RECV
,
5906 DP_PACKET_BATCH_REFILL_FOR_EACH (i
, cnt
, packet
, packets_
) {
5907 struct dp_netdev_flow
*flow
;
5910 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
5911 dp_packet_delete(packet
);
5916 struct dp_packet
**packets
= packets_
->packets
;
5917 /* Prefetch next packet data and metadata. */
5918 OVS_PREFETCH(dp_packet_data(packets
[i
+1]));
5919 pkt_metadata_prefetch_init(&packets
[i
+1]->md
);
5923 pkt_metadata_init(&packet
->md
, port_no
);
5926 if ((*recirc_depth_get() == 0) &&
5927 dp_packet_has_flow_mark(packet
, &mark
)) {
5928 flow
= mark_to_flow_find(pmd
, mark
);
5929 if (OVS_LIKELY(flow
)) {
5930 tcp_flags
= parse_tcp_flags(packet
);
5931 if (OVS_LIKELY(batch_enable
)) {
5932 dp_netdev_queue_batches(packet
, flow
, tcp_flags
, batches
,
5935 /* Flow batching should be performed only after fast-path
5936 * processing is also completed for packets with emc miss
5937 * or else it will result in reordering of packets with
5938 * same datapath flows. */
5939 packet_enqueue_to_flow_map(packet
, flow
, tcp_flags
,
5940 flow_map
, map_cnt
++);
5946 miniflow_extract(packet
, &key
->mf
);
5947 key
->len
= 0; /* Not computed yet. */
5948 /* If EMC and SMC disabled skip hash computation */
5949 if (smc_enable_db
== true || cur_min
!= 0) {
5951 key
->hash
= dpif_netdev_packet_get_rss_hash_orig_pkt(packet
,
5954 key
->hash
= dpif_netdev_packet_get_rss_hash(packet
, &key
->mf
);
5958 flow
= emc_lookup(&cache
->emc_cache
, key
);
5962 if (OVS_LIKELY(flow
)) {
5963 tcp_flags
= miniflow_get_tcp_flags(&key
->mf
);
5965 if (OVS_LIKELY(batch_enable
)) {
5966 dp_netdev_queue_batches(packet
, flow
, tcp_flags
, batches
,
5969 /* Flow batching should be performed only after fast-path
5970 * processing is also completed for packets with emc miss
5971 * or else it will result in reordering of packets with
5972 * same datapath flows. */
5973 packet_enqueue_to_flow_map(packet
, flow
, tcp_flags
,
5974 flow_map
, map_cnt
++);
5977 /* Exact match cache missed. Group missed packets together at
5978 * the beginning of the 'packets' array. */
5979 dp_packet_batch_refill(packets_
, packet
, i
);
5981 /* Preserve the order of packet for flow batching. */
5982 index_map
[n_missed
] = map_cnt
;
5983 flow_map
[map_cnt
++].flow
= NULL
;
5985 /* 'key[n_missed]' contains the key of the current packet and it
5986 * will be passed to SMC lookup. The next key should be extracted
5987 * to 'keys[n_missed + 1]'.
5988 * We also maintain a pointer array to keys missed both SMC and EMC
5989 * which will be returned to the caller for future processing. */
5990 missed_keys
[n_missed
] = key
;
5991 key
= &keys
[++n_missed
];
5993 /* Skip batching for subsequent packets to avoid reordering. */
5994 batch_enable
= false;
5997 /* Count of packets which are not flow batched. */
6000 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_EXACT_HIT
, n_emc_hit
);
6002 if (!smc_enable_db
) {
6003 return dp_packet_batch_size(packets_
);
6006 /* Packets miss EMC will do a batch lookup in SMC if enabled */
6007 smc_lookup_batch(pmd
, keys
, missed_keys
, packets_
,
6008 n_missed
, flow_map
, index_map
);
6010 return dp_packet_batch_size(packets_
);
6014 handle_packet_upcall(struct dp_netdev_pmd_thread
*pmd
,
6015 struct dp_packet
*packet
,
6016 const struct netdev_flow_key
*key
,
6017 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
6019 struct ofpbuf
*add_actions
;
6020 struct dp_packet_batch b
;
6024 uint64_t cycles
= cycles_counter_update(&pmd
->perf_stats
);
6026 match
.tun_md
.valid
= false;
6027 miniflow_expand(&key
->mf
, &match
.flow
);
6029 ofpbuf_clear(actions
);
6030 ofpbuf_clear(put_actions
);
6032 dpif_flow_hash(pmd
->dp
->dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
6033 error
= dp_netdev_upcall(pmd
, packet
, &match
.flow
, &match
.wc
,
6034 &ufid
, DPIF_UC_MISS
, NULL
, actions
,
6036 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
6037 dp_packet_delete(packet
);
6041 /* The Netlink encoding of datapath flow keys cannot express
6042 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
6043 * tag is interpreted as exact match on the fact that there is no
6044 * VLAN. Unless we refactor a lot of code that translates between
6045 * Netlink and struct flow representations, we have to do the same
6047 if (!match
.wc
.masks
.vlans
[0].tci
) {
6048 match
.wc
.masks
.vlans
[0].tci
= htons(0xffff);
6051 /* We can't allow the packet batching in the next loop to execute
6052 * the actions. Otherwise, if there are any slow path actions,
6053 * we'll send the packet up twice. */
6054 dp_packet_batch_init_packet(&b
, packet
);
6055 dp_netdev_execute_actions(pmd
, &b
, true, &match
.flow
,
6056 actions
->data
, actions
->size
);
6058 add_actions
= put_actions
->size
? put_actions
: actions
;
6059 if (OVS_LIKELY(error
!= ENOSPC
)) {
6060 struct dp_netdev_flow
*netdev_flow
;
6062 /* XXX: There's a race window where a flow covering this packet
6063 * could have already been installed since we last did the flow
6064 * lookup before upcall. This could be solved by moving the
6065 * mutex lock outside the loop, but that's an awful long time
6066 * to be locking everyone out of making flow installs. If we
6067 * move to a per-core classifier, it would be reasonable. */
6068 ovs_mutex_lock(&pmd
->flow_mutex
);
6069 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
, NULL
);
6070 if (OVS_LIKELY(!netdev_flow
)) {
6071 netdev_flow
= dp_netdev_flow_add(pmd
, &match
, &ufid
,
6075 ovs_mutex_unlock(&pmd
->flow_mutex
);
6076 uint32_t hash
= dp_netdev_flow_hash(&netdev_flow
->ufid
);
6077 smc_insert(pmd
, key
, hash
);
6078 emc_probabilistic_insert(pmd
, key
, netdev_flow
);
6080 if (pmd_perf_metrics_enabled(pmd
)) {
6081 /* Update upcall stats. */
6082 cycles
= cycles_counter_update(&pmd
->perf_stats
) - cycles
;
6083 struct pmd_perf_stats
*s
= &pmd
->perf_stats
;
6084 s
->current
.upcalls
++;
6085 s
->current
.upcall_cycles
+= cycles
;
6086 histogram_add_sample(&s
->cycles_per_upcall
, cycles
);
6092 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
6093 struct dp_packet_batch
*packets_
,
6094 struct netdev_flow_key
**keys
,
6095 struct dp_packet_flow_map
*flow_map
,
6099 const size_t cnt
= dp_packet_batch_size(packets_
);
6100 #if !defined(__CHECKER__) && !defined(_WIN32)
6101 const size_t PKT_ARRAY_SIZE
= cnt
;
6103 /* Sparse or MSVC doesn't like variable length array. */
6104 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
6106 struct dp_packet
*packet
;
6108 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
6109 struct dp_netdev
*dp
= pmd
->dp
;
6110 int upcall_ok_cnt
= 0, upcall_fail_cnt
= 0;
6111 int lookup_cnt
= 0, add_lookup_cnt
;
6114 for (size_t i
= 0; i
< cnt
; i
++) {
6115 /* Key length is needed in all the cases, hash computed on demand. */
6116 keys
[i
]->len
= netdev_flow_key_size(miniflow_n_values(&keys
[i
]->mf
));
6118 /* Get the classifier for the in_port */
6119 cls
= dp_netdev_pmd_lookup_dpcls(pmd
, in_port
);
6120 if (OVS_LIKELY(cls
)) {
6121 any_miss
= !dpcls_lookup(cls
, (const struct netdev_flow_key
**)keys
,
6122 rules
, cnt
, &lookup_cnt
);
6125 memset(rules
, 0, sizeof(rules
));
6127 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
6128 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
6129 struct ofpbuf actions
, put_actions
;
6131 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
6132 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
6134 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
6135 struct dp_netdev_flow
*netdev_flow
;
6137 if (OVS_LIKELY(rules
[i
])) {
6141 /* It's possible that an earlier slow path execution installed
6142 * a rule covering this flow. In this case, it's a lot cheaper
6143 * to catch it here than execute a miss. */
6144 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, keys
[i
],
6147 lookup_cnt
+= add_lookup_cnt
;
6148 rules
[i
] = &netdev_flow
->cr
;
6152 int error
= handle_packet_upcall(pmd
, packet
, keys
[i
],
6153 &actions
, &put_actions
);
6155 if (OVS_UNLIKELY(error
)) {
6162 ofpbuf_uninit(&actions
);
6163 ofpbuf_uninit(&put_actions
);
6164 fat_rwlock_unlock(&dp
->upcall_rwlock
);
6165 } else if (OVS_UNLIKELY(any_miss
)) {
6166 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
6167 if (OVS_UNLIKELY(!rules
[i
])) {
6168 dp_packet_delete(packet
);
6174 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
6175 struct dp_netdev_flow
*flow
;
6176 /* Get the original order of this packet in received batch. */
6177 int recv_idx
= index_map
[i
];
6180 if (OVS_UNLIKELY(!rules
[i
])) {
6184 flow
= dp_netdev_flow_cast(rules
[i
]);
6185 uint32_t hash
= dp_netdev_flow_hash(&flow
->ufid
);
6186 smc_insert(pmd
, keys
[i
], hash
);
6188 emc_probabilistic_insert(pmd
, keys
[i
], flow
);
6189 /* Add these packets into the flow map in the same order
6192 tcp_flags
= miniflow_get_tcp_flags(&keys
[i
]->mf
);
6193 packet_enqueue_to_flow_map(packet
, flow
, tcp_flags
,
6194 flow_map
, recv_idx
);
6197 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_MASKED_HIT
,
6198 cnt
- upcall_ok_cnt
- upcall_fail_cnt
);
6199 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_MASKED_LOOKUP
,
6201 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_MISS
,
6203 pmd_perf_update_counter(&pmd
->perf_stats
, PMD_STAT_LOST
,
6207 /* Packets enter the datapath from a port (or from recirculation) here.
6209 * When 'md_is_valid' is true the metadata in 'packets' are already valid.
6210 * When false the metadata in 'packets' need to be initialized. */
6212 dp_netdev_input__(struct dp_netdev_pmd_thread
*pmd
,
6213 struct dp_packet_batch
*packets
,
6214 bool md_is_valid
, odp_port_t port_no
)
6216 #if !defined(__CHECKER__) && !defined(_WIN32)
6217 const size_t PKT_ARRAY_SIZE
= dp_packet_batch_size(packets
);
6219 /* Sparse or MSVC doesn't like variable length array. */
6220 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
6222 OVS_ALIGNED_VAR(CACHE_LINE_SIZE
)
6223 struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
6224 struct netdev_flow_key
*missed_keys
[PKT_ARRAY_SIZE
];
6225 struct packet_batch_per_flow batches
[PKT_ARRAY_SIZE
];
6227 struct dp_packet_flow_map flow_map
[PKT_ARRAY_SIZE
];
6228 uint8_t index_map
[PKT_ARRAY_SIZE
];
6234 dfc_processing(pmd
, packets
, keys
, missed_keys
, batches
, &n_batches
,
6235 flow_map
, &n_flows
, index_map
, md_is_valid
, port_no
);
6237 if (!dp_packet_batch_is_empty(packets
)) {
6238 /* Get ingress port from first packet's metadata. */
6239 in_port
= packets
->packets
[0]->md
.in_port
.odp_port
;
6240 fast_path_processing(pmd
, packets
, missed_keys
,
6241 flow_map
, index_map
, in_port
);
6244 /* Batch rest of packets which are in flow map. */
6245 for (i
= 0; i
< n_flows
; i
++) {
6246 struct dp_packet_flow_map
*map
= &flow_map
[i
];
6248 if (OVS_UNLIKELY(!map
->flow
)) {
6251 dp_netdev_queue_batches(map
->packet
, map
->flow
, map
->tcp_flags
,
6252 batches
, &n_batches
);
6255 /* All the flow batches need to be reset before any call to
6256 * packet_batch_per_flow_execute() as it could potentially trigger
6257 * recirculation. When a packet matching flow ‘j’ happens to be
6258 * recirculated, the nested call to dp_netdev_input__() could potentially
6259 * classify the packet as matching another flow - say 'k'. It could happen
6260 * that in the previous call to dp_netdev_input__() that same flow 'k' had
6261 * already its own batches[k] still waiting to be served. So if its
6262 * ‘batch’ member is not reset, the recirculated packet would be wrongly
6263 * appended to batches[k] of the 1st call to dp_netdev_input__(). */
6264 for (i
= 0; i
< n_batches
; i
++) {
6265 batches
[i
].flow
->batch
= NULL
;
6268 for (i
= 0; i
< n_batches
; i
++) {
6269 packet_batch_per_flow_execute(&batches
[i
], pmd
);
6274 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
6275 struct dp_packet_batch
*packets
,
6278 dp_netdev_input__(pmd
, packets
, false, port_no
);
6282 dp_netdev_recirculate(struct dp_netdev_pmd_thread
*pmd
,
6283 struct dp_packet_batch
*packets
)
6285 dp_netdev_input__(pmd
, packets
, true, 0);
6288 struct dp_netdev_execute_aux
{
6289 struct dp_netdev_pmd_thread
*pmd
;
6290 const struct flow
*flow
;
6294 dpif_netdev_register_dp_purge_cb(struct dpif
*dpif
, dp_purge_callback
*cb
,
6297 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6298 dp
->dp_purge_aux
= aux
;
6299 dp
->dp_purge_cb
= cb
;
6303 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
6306 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6307 dp
->upcall_aux
= aux
;
6312 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
*pmd
,
6316 struct dp_netdev_port
*port
;
6319 HMAP_FOR_EACH (tx
, node
, &pmd
->send_port_cache
) {
6320 if (!tx
->port
->dynamic_txqs
) {
6323 interval
= pmd
->ctx
.now
- tx
->last_used
;
6324 if (tx
->qid
>= 0 && (purge
|| interval
>= XPS_TIMEOUT
)) {
6326 ovs_mutex_lock(&port
->txq_used_mutex
);
6327 port
->txq_used
[tx
->qid
]--;
6328 ovs_mutex_unlock(&port
->txq_used_mutex
);
6335 dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread
*pmd
,
6338 struct dp_netdev_port
*port
;
6340 int i
, min_cnt
, min_qid
;
6342 interval
= pmd
->ctx
.now
- tx
->last_used
;
6343 tx
->last_used
= pmd
->ctx
.now
;
6345 if (OVS_LIKELY(tx
->qid
>= 0 && interval
< XPS_TIMEOUT
)) {
6351 ovs_mutex_lock(&port
->txq_used_mutex
);
6353 port
->txq_used
[tx
->qid
]--;
6359 for (i
= 0; i
< netdev_n_txq(port
->netdev
); i
++) {
6360 if (port
->txq_used
[i
] < min_cnt
|| min_cnt
== -1) {
6361 min_cnt
= port
->txq_used
[i
];
6366 port
->txq_used
[min_qid
]++;
6369 ovs_mutex_unlock(&port
->txq_used_mutex
);
6371 dpif_netdev_xps_revalidate_pmd(pmd
, false);
6373 VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
6374 pmd
->core_id
, tx
->qid
, netdev_get_name(tx
->port
->netdev
));
6378 static struct tx_port
*
6379 pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
6382 return tx_port_lookup(&pmd
->tnl_port_cache
, port_no
);
6385 static struct tx_port
*
6386 pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread
*pmd
,
6389 return tx_port_lookup(&pmd
->send_port_cache
, port_no
);
6393 push_tnl_action(const struct dp_netdev_pmd_thread
*pmd
,
6394 const struct nlattr
*attr
,
6395 struct dp_packet_batch
*batch
)
6397 struct tx_port
*tun_port
;
6398 const struct ovs_action_push_tnl
*data
;
6401 data
= nl_attr_get(attr
);
6403 tun_port
= pmd_tnl_port_cache_lookup(pmd
, data
->tnl_port
);
6408 err
= netdev_push_header(tun_port
->port
->netdev
, batch
, data
);
6413 dp_packet_delete_batch(batch
, true);
6418 dp_execute_userspace_action(struct dp_netdev_pmd_thread
*pmd
,
6419 struct dp_packet
*packet
, bool should_steal
,
6420 struct flow
*flow
, ovs_u128
*ufid
,
6421 struct ofpbuf
*actions
,
6422 const struct nlattr
*userdata
)
6424 struct dp_packet_batch b
;
6427 ofpbuf_clear(actions
);
6429 error
= dp_netdev_upcall(pmd
, packet
, flow
, NULL
, ufid
,
6430 DPIF_UC_ACTION
, userdata
, actions
,
6432 if (!error
|| error
== ENOSPC
) {
6433 dp_packet_batch_init_packet(&b
, packet
);
6434 dp_netdev_execute_actions(pmd
, &b
, should_steal
, flow
,
6435 actions
->data
, actions
->size
);
6436 } else if (should_steal
) {
6437 dp_packet_delete(packet
);
6442 dp_execute_cb(void *aux_
, struct dp_packet_batch
*packets_
,
6443 const struct nlattr
*a
, bool should_steal
)
6444 OVS_NO_THREAD_SAFETY_ANALYSIS
6446 struct dp_netdev_execute_aux
*aux
= aux_
;
6447 uint32_t *depth
= recirc_depth_get();
6448 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
6449 struct dp_netdev
*dp
= pmd
->dp
;
6450 int type
= nl_attr_type(a
);
6453 switch ((enum ovs_action_attr
)type
) {
6454 case OVS_ACTION_ATTR_OUTPUT
:
6455 p
= pmd_send_port_cache_lookup(pmd
, nl_attr_get_odp_port(a
));
6456 if (OVS_LIKELY(p
)) {
6457 struct dp_packet
*packet
;
6458 struct dp_packet_batch out
;
6460 if (!should_steal
) {
6461 dp_packet_batch_clone(&out
, packets_
);
6462 dp_packet_batch_reset_cutlen(packets_
);
6465 dp_packet_batch_apply_cutlen(packets_
);
6468 if (OVS_UNLIKELY(!dp_packet_batch_is_empty(&p
->output_pkts
)
6469 && packets_
->packets
[0]->source
6470 != p
->output_pkts
.packets
[0]->source
)) {
6471 /* XXX: netdev-dpdk assumes that all packets in a single
6472 * output batch has the same source. Flush here to
6473 * avoid memory access issues. */
6474 dp_netdev_pmd_flush_output_on_port(pmd
, p
);
6477 if (dp_packet_batch_size(&p
->output_pkts
)
6478 + dp_packet_batch_size(packets_
) > NETDEV_MAX_BURST
) {
6479 /* Flush here to avoid overflow. */
6480 dp_netdev_pmd_flush_output_on_port(pmd
, p
);
6483 if (dp_packet_batch_is_empty(&p
->output_pkts
)) {
6484 pmd
->n_output_batches
++;
6487 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
6488 p
->output_pkts_rxqs
[dp_packet_batch_size(&p
->output_pkts
)] =
6490 dp_packet_batch_add(&p
->output_pkts
, packet
);
6496 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
6498 /* We're requested to push tunnel header, but also we need to take
6499 * the ownership of these packets. Thus, we can avoid performing
6500 * the action, because the caller will not use the result anyway.
6501 * Just break to free the batch. */
6504 dp_packet_batch_apply_cutlen(packets_
);
6505 push_tnl_action(pmd
, a
, packets_
);
6508 case OVS_ACTION_ATTR_TUNNEL_POP
:
6509 if (*depth
< MAX_RECIRC_DEPTH
) {
6510 struct dp_packet_batch
*orig_packets_
= packets_
;
6511 odp_port_t portno
= nl_attr_get_odp_port(a
);
6513 p
= pmd_tnl_port_cache_lookup(pmd
, portno
);
6515 struct dp_packet_batch tnl_pkt
;
6517 if (!should_steal
) {
6518 dp_packet_batch_clone(&tnl_pkt
, packets_
);
6519 packets_
= &tnl_pkt
;
6520 dp_packet_batch_reset_cutlen(orig_packets_
);
6523 dp_packet_batch_apply_cutlen(packets_
);
6525 netdev_pop_header(p
->port
->netdev
, packets_
);
6526 if (dp_packet_batch_is_empty(packets_
)) {
6530 struct dp_packet
*packet
;
6531 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
6532 packet
->md
.in_port
.odp_port
= portno
;
6536 dp_netdev_recirculate(pmd
, packets_
);
6543 case OVS_ACTION_ATTR_USERSPACE
:
6544 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
6545 struct dp_packet_batch
*orig_packets_
= packets_
;
6546 const struct nlattr
*userdata
;
6547 struct dp_packet_batch usr_pkt
;
6548 struct ofpbuf actions
;
6553 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
6554 ofpbuf_init(&actions
, 0);
6556 if (packets_
->trunc
) {
6557 if (!should_steal
) {
6558 dp_packet_batch_clone(&usr_pkt
, packets_
);
6559 packets_
= &usr_pkt
;
6561 dp_packet_batch_reset_cutlen(orig_packets_
);
6564 dp_packet_batch_apply_cutlen(packets_
);
6567 struct dp_packet
*packet
;
6568 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
6569 flow_extract(packet
, &flow
);
6570 dpif_flow_hash(dp
->dpif
, &flow
, sizeof flow
, &ufid
);
6571 dp_execute_userspace_action(pmd
, packet
, should_steal
, &flow
,
6572 &ufid
, &actions
, userdata
);
6576 dp_packet_delete_batch(packets_
, true);
6579 ofpbuf_uninit(&actions
);
6580 fat_rwlock_unlock(&dp
->upcall_rwlock
);
6586 case OVS_ACTION_ATTR_RECIRC
:
6587 if (*depth
< MAX_RECIRC_DEPTH
) {
6588 struct dp_packet_batch recirc_pkts
;
6590 if (!should_steal
) {
6591 dp_packet_batch_clone(&recirc_pkts
, packets_
);
6592 packets_
= &recirc_pkts
;
6595 struct dp_packet
*packet
;
6596 DP_PACKET_BATCH_FOR_EACH (i
, packet
, packets_
) {
6597 packet
->md
.recirc_id
= nl_attr_get_u32(a
);
6601 dp_netdev_recirculate(pmd
, packets_
);
6607 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
6610 case OVS_ACTION_ATTR_CT
: {
6611 const struct nlattr
*b
;
6613 bool commit
= false;
6616 const char *helper
= NULL
;
6617 const uint32_t *setmark
= NULL
;
6618 const struct ovs_key_ct_labels
*setlabel
= NULL
;
6619 struct nat_action_info_t nat_action_info
;
6620 struct nat_action_info_t
*nat_action_info_ref
= NULL
;
6621 bool nat_config
= false;
6623 NL_ATTR_FOR_EACH_UNSAFE (b
, left
, nl_attr_get(a
),
6624 nl_attr_get_size(a
)) {
6625 enum ovs_ct_attr sub_type
= nl_attr_type(b
);
6628 case OVS_CT_ATTR_FORCE_COMMIT
:
6631 case OVS_CT_ATTR_COMMIT
:
6634 case OVS_CT_ATTR_ZONE
:
6635 zone
= nl_attr_get_u16(b
);
6637 case OVS_CT_ATTR_HELPER
:
6638 helper
= nl_attr_get_string(b
);
6640 case OVS_CT_ATTR_MARK
:
6641 setmark
= nl_attr_get(b
);
6643 case OVS_CT_ATTR_LABELS
:
6644 setlabel
= nl_attr_get(b
);
6646 case OVS_CT_ATTR_EVENTMASK
:
6647 /* Silently ignored, as userspace datapath does not generate
6648 * netlink events. */
6650 case OVS_CT_ATTR_NAT
: {
6651 const struct nlattr
*b_nest
;
6652 unsigned int left_nest
;
6653 bool ip_min_specified
= false;
6654 bool proto_num_min_specified
= false;
6655 bool ip_max_specified
= false;
6656 bool proto_num_max_specified
= false;
6657 memset(&nat_action_info
, 0, sizeof nat_action_info
);
6658 nat_action_info_ref
= &nat_action_info
;
6660 NL_NESTED_FOR_EACH_UNSAFE (b_nest
, left_nest
, b
) {
6661 enum ovs_nat_attr sub_type_nest
= nl_attr_type(b_nest
);
6663 switch (sub_type_nest
) {
6664 case OVS_NAT_ATTR_SRC
:
6665 case OVS_NAT_ATTR_DST
:
6667 nat_action_info
.nat_action
|=
6668 ((sub_type_nest
== OVS_NAT_ATTR_SRC
)
6669 ? NAT_ACTION_SRC
: NAT_ACTION_DST
);
6671 case OVS_NAT_ATTR_IP_MIN
:
6672 memcpy(&nat_action_info
.min_addr
,
6673 nl_attr_get(b_nest
),
6674 nl_attr_get_size(b_nest
));
6675 ip_min_specified
= true;
6677 case OVS_NAT_ATTR_IP_MAX
:
6678 memcpy(&nat_action_info
.max_addr
,
6679 nl_attr_get(b_nest
),
6680 nl_attr_get_size(b_nest
));
6681 ip_max_specified
= true;
6683 case OVS_NAT_ATTR_PROTO_MIN
:
6684 nat_action_info
.min_port
=
6685 nl_attr_get_u16(b_nest
);
6686 proto_num_min_specified
= true;
6688 case OVS_NAT_ATTR_PROTO_MAX
:
6689 nat_action_info
.max_port
=
6690 nl_attr_get_u16(b_nest
);
6691 proto_num_max_specified
= true;
6693 case OVS_NAT_ATTR_PERSISTENT
:
6694 case OVS_NAT_ATTR_PROTO_HASH
:
6695 case OVS_NAT_ATTR_PROTO_RANDOM
:
6697 case OVS_NAT_ATTR_UNSPEC
:
6698 case __OVS_NAT_ATTR_MAX
:
6703 if (ip_min_specified
&& !ip_max_specified
) {
6704 nat_action_info
.max_addr
= nat_action_info
.min_addr
;
6706 if (proto_num_min_specified
&& !proto_num_max_specified
) {
6707 nat_action_info
.max_port
= nat_action_info
.min_port
;
6709 if (proto_num_min_specified
|| proto_num_max_specified
) {
6710 if (nat_action_info
.nat_action
& NAT_ACTION_SRC
) {
6711 nat_action_info
.nat_action
|= NAT_ACTION_SRC_PORT
;
6712 } else if (nat_action_info
.nat_action
& NAT_ACTION_DST
) {
6713 nat_action_info
.nat_action
|= NAT_ACTION_DST_PORT
;
6718 case OVS_CT_ATTR_UNSPEC
:
6719 case __OVS_CT_ATTR_MAX
:
6724 /* We won't be able to function properly in this case, hence
6725 * complain loudly. */
6726 if (nat_config
&& !commit
) {
6727 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 5);
6728 VLOG_WARN_RL(&rl
, "NAT specified without commit.");
6731 conntrack_execute(&dp
->conntrack
, packets_
, aux
->flow
->dl_type
, force
,
6732 commit
, zone
, setmark
, setlabel
, aux
->flow
->tp_src
,
6733 aux
->flow
->tp_dst
, helper
, nat_action_info_ref
,
6734 pmd
->ctx
.now
/ 1000);
6738 case OVS_ACTION_ATTR_METER
:
6739 dp_netdev_run_meter(pmd
->dp
, packets_
, nl_attr_get_u32(a
),
6743 case OVS_ACTION_ATTR_PUSH_VLAN
:
6744 case OVS_ACTION_ATTR_POP_VLAN
:
6745 case OVS_ACTION_ATTR_PUSH_MPLS
:
6746 case OVS_ACTION_ATTR_POP_MPLS
:
6747 case OVS_ACTION_ATTR_SET
:
6748 case OVS_ACTION_ATTR_SET_MASKED
:
6749 case OVS_ACTION_ATTR_SAMPLE
:
6750 case OVS_ACTION_ATTR_HASH
:
6751 case OVS_ACTION_ATTR_UNSPEC
:
6752 case OVS_ACTION_ATTR_TRUNC
:
6753 case OVS_ACTION_ATTR_PUSH_ETH
:
6754 case OVS_ACTION_ATTR_POP_ETH
:
6755 case OVS_ACTION_ATTR_CLONE
:
6756 case OVS_ACTION_ATTR_PUSH_NSH
:
6757 case OVS_ACTION_ATTR_POP_NSH
:
6758 case OVS_ACTION_ATTR_CT_CLEAR
:
6759 case __OVS_ACTION_ATTR_MAX
:
6763 dp_packet_delete_batch(packets_
, should_steal
);
6767 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
6768 struct dp_packet_batch
*packets
,
6769 bool should_steal
, const struct flow
*flow
,
6770 const struct nlattr
*actions
, size_t actions_len
)
6772 struct dp_netdev_execute_aux aux
= { pmd
, flow
};
6774 odp_execute_actions(&aux
, packets
, should_steal
, actions
,
6775 actions_len
, dp_execute_cb
);
6778 struct dp_netdev_ct_dump
{
6779 struct ct_dpif_dump_state up
;
6780 struct conntrack_dump dump
;
6781 struct conntrack
*ct
;
6782 struct dp_netdev
*dp
;
6786 dpif_netdev_ct_dump_start(struct dpif
*dpif
, struct ct_dpif_dump_state
**dump_
,
6787 const uint16_t *pzone
, int *ptot_bkts
)
6789 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6790 struct dp_netdev_ct_dump
*dump
;
6792 dump
= xzalloc(sizeof *dump
);
6794 dump
->ct
= &dp
->conntrack
;
6796 conntrack_dump_start(&dp
->conntrack
, &dump
->dump
, pzone
, ptot_bkts
);
6804 dpif_netdev_ct_dump_next(struct dpif
*dpif OVS_UNUSED
,
6805 struct ct_dpif_dump_state
*dump_
,
6806 struct ct_dpif_entry
*entry
)
6808 struct dp_netdev_ct_dump
*dump
;
6810 INIT_CONTAINER(dump
, dump_
, up
);
6812 return conntrack_dump_next(&dump
->dump
, entry
);
6816 dpif_netdev_ct_dump_done(struct dpif
*dpif OVS_UNUSED
,
6817 struct ct_dpif_dump_state
*dump_
)
6819 struct dp_netdev_ct_dump
*dump
;
6822 INIT_CONTAINER(dump
, dump_
, up
);
6824 err
= conntrack_dump_done(&dump
->dump
);
6832 dpif_netdev_ct_flush(struct dpif
*dpif
, const uint16_t *zone
,
6833 const struct ct_dpif_tuple
*tuple
)
6835 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6838 return conntrack_flush_tuple(&dp
->conntrack
, tuple
, zone
? *zone
: 0);
6840 return conntrack_flush(&dp
->conntrack
, zone
);
6844 dpif_netdev_ct_set_maxconns(struct dpif
*dpif
, uint32_t maxconns
)
6846 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6848 return conntrack_set_maxconns(&dp
->conntrack
, maxconns
);
6852 dpif_netdev_ct_get_maxconns(struct dpif
*dpif
, uint32_t *maxconns
)
6854 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6856 return conntrack_get_maxconns(&dp
->conntrack
, maxconns
);
6860 dpif_netdev_ct_get_nconns(struct dpif
*dpif
, uint32_t *nconns
)
6862 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
6864 return conntrack_get_nconns(&dp
->conntrack
, nconns
);
6867 const struct dpif_class dpif_netdev_class
= {
6870 dpif_netdev_enumerate
,
6871 dpif_netdev_port_open_type
,
6874 dpif_netdev_destroy
,
6877 dpif_netdev_get_stats
,
6878 dpif_netdev_port_add
,
6879 dpif_netdev_port_del
,
6880 dpif_netdev_port_set_config
,
6881 dpif_netdev_port_query_by_number
,
6882 dpif_netdev_port_query_by_name
,
6883 NULL
, /* port_get_pid */
6884 dpif_netdev_port_dump_start
,
6885 dpif_netdev_port_dump_next
,
6886 dpif_netdev_port_dump_done
,
6887 dpif_netdev_port_poll
,
6888 dpif_netdev_port_poll_wait
,
6889 dpif_netdev_flow_flush
,
6890 dpif_netdev_flow_dump_create
,
6891 dpif_netdev_flow_dump_destroy
,
6892 dpif_netdev_flow_dump_thread_create
,
6893 dpif_netdev_flow_dump_thread_destroy
,
6894 dpif_netdev_flow_dump_next
,
6895 dpif_netdev_operate
,
6896 NULL
, /* recv_set */
6897 NULL
, /* handlers_set */
6898 dpif_netdev_set_config
,
6899 dpif_netdev_queue_to_priority
,
6901 NULL
, /* recv_wait */
6902 NULL
, /* recv_purge */
6903 dpif_netdev_register_dp_purge_cb
,
6904 dpif_netdev_register_upcall_cb
,
6905 dpif_netdev_enable_upcall
,
6906 dpif_netdev_disable_upcall
,
6907 dpif_netdev_get_datapath_version
,
6908 dpif_netdev_ct_dump_start
,
6909 dpif_netdev_ct_dump_next
,
6910 dpif_netdev_ct_dump_done
,
6911 dpif_netdev_ct_flush
,
6912 dpif_netdev_ct_set_maxconns
,
6913 dpif_netdev_ct_get_maxconns
,
6914 dpif_netdev_ct_get_nconns
,
6915 NULL
, /* ct_set_limits */
6916 NULL
, /* ct_get_limits */
6917 NULL
, /* ct_del_limits */
6918 dpif_netdev_meter_get_features
,
6919 dpif_netdev_meter_set
,
6920 dpif_netdev_meter_get
,
6921 dpif_netdev_meter_del
,
6925 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
6926 const char *argv
[], void *aux OVS_UNUSED
)
6928 struct dp_netdev_port
*port
;
6929 struct dp_netdev
*dp
;
6932 ovs_mutex_lock(&dp_netdev_mutex
);
6933 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
6934 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
6935 ovs_mutex_unlock(&dp_netdev_mutex
);
6936 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
6939 ovs_refcount_ref(&dp
->ref_cnt
);
6940 ovs_mutex_unlock(&dp_netdev_mutex
);
6942 ovs_mutex_lock(&dp
->port_mutex
);
6943 if (get_port_by_name(dp
, argv
[2], &port
)) {
6944 unixctl_command_reply_error(conn
, "unknown port");
6948 port_no
= u32_to_odp(atoi(argv
[3]));
6949 if (!port_no
|| port_no
== ODPP_NONE
) {
6950 unixctl_command_reply_error(conn
, "bad port number");
6953 if (dp_netdev_lookup_port(dp
, port_no
)) {
6954 unixctl_command_reply_error(conn
, "port number already in use");
6959 hmap_remove(&dp
->ports
, &port
->node
);
6960 reconfigure_datapath(dp
);
6962 /* Reinsert with new port number. */
6963 port
->port_no
= port_no
;
6964 hmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
6965 reconfigure_datapath(dp
);
6967 seq_change(dp
->port_seq
);
6968 unixctl_command_reply(conn
, NULL
);
6971 ovs_mutex_unlock(&dp
->port_mutex
);
6972 dp_netdev_unref(dp
);
6976 dpif_dummy_register__(const char *type
)
6978 struct dpif_class
*class;
6980 class = xmalloc(sizeof *class);
6981 *class = dpif_netdev_class
;
6982 class->type
= xstrdup(type
);
6983 dp_register_provider(class);
6987 dpif_dummy_override(const char *type
)
6992 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
6993 * a userland-only build. It's useful for testsuite.
6995 error
= dp_unregister_provider(type
);
6996 if (error
== 0 || error
== EAFNOSUPPORT
) {
6997 dpif_dummy_register__(type
);
7002 dpif_dummy_register(enum dummy_level level
)
7004 if (level
== DUMMY_OVERRIDE_ALL
) {
7009 dp_enumerate_types(&types
);
7010 SSET_FOR_EACH (type
, &types
) {
7011 dpif_dummy_override(type
);
7013 sset_destroy(&types
);
7014 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
7015 dpif_dummy_override("system");
7018 dpif_dummy_register__("dummy");
7020 unixctl_command_register("dpif-dummy/change-port-number",
7021 "dp port new-number",
7022 3, 3, dpif_dummy_change_port_number
, NULL
);
7025 /* Datapath Classifier. */
7027 /* A set of rules that all have the same fields wildcarded. */
7028 struct dpcls_subtable
{
7029 /* The fields are only used by writers. */
7030 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
7032 /* These fields are accessed by readers. */
7033 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
7034 uint32_t hit_cnt
; /* Number of match hits in subtable in current
7035 optimization interval. */
7036 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
7037 /* 'mask' must be the last field, additional space is allocated here. */
7040 /* Initializes 'cls' as a classifier that initially contains no classification
7043 dpcls_init(struct dpcls
*cls
)
7045 cmap_init(&cls
->subtables_map
);
7046 pvector_init(&cls
->subtables
);
7050 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
7052 VLOG_DBG("Destroying subtable %p for in_port %d", subtable
, cls
->in_port
);
7053 pvector_remove(&cls
->subtables
, subtable
);
7054 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
7055 subtable
->mask
.hash
);
7056 cmap_destroy(&subtable
->rules
);
7057 ovsrcu_postpone(free
, subtable
);
7060 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
7061 * caller's responsibility.
7062 * May only be called after all the readers have been terminated. */
7064 dpcls_destroy(struct dpcls
*cls
)
7067 struct dpcls_subtable
*subtable
;
7069 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
7070 ovs_assert(cmap_count(&subtable
->rules
) == 0);
7071 dpcls_destroy_subtable(cls
, subtable
);
7073 cmap_destroy(&cls
->subtables_map
);
7074 pvector_destroy(&cls
->subtables
);
7078 static struct dpcls_subtable
*
7079 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
7081 struct dpcls_subtable
*subtable
;
7083 /* Need to add one. */
7084 subtable
= xmalloc(sizeof *subtable
7085 - sizeof subtable
->mask
.mf
+ mask
->len
);
7086 cmap_init(&subtable
->rules
);
7087 subtable
->hit_cnt
= 0;
7088 netdev_flow_key_clone(&subtable
->mask
, mask
);
7089 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
7090 /* Add the new subtable at the end of the pvector (with no hits yet) */
7091 pvector_insert(&cls
->subtables
, subtable
, 0);
7092 VLOG_DBG("Creating %"PRIuSIZE
". subtable %p for in_port %d",
7093 cmap_count(&cls
->subtables_map
), subtable
, cls
->in_port
);
7094 pvector_publish(&cls
->subtables
);
7099 static inline struct dpcls_subtable
*
7100 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
7102 struct dpcls_subtable
*subtable
;
7104 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
7105 &cls
->subtables_map
) {
7106 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
7110 return dpcls_create_subtable(cls
, mask
);
7114 /* Periodically sort the dpcls subtable vectors according to hit counts */
7116 dpcls_sort_subtable_vector(struct dpcls
*cls
)
7118 struct pvector
*pvec
= &cls
->subtables
;
7119 struct dpcls_subtable
*subtable
;
7121 PVECTOR_FOR_EACH (subtable
, pvec
) {
7122 pvector_change_priority(pvec
, subtable
, subtable
->hit_cnt
);
7123 subtable
->hit_cnt
= 0;
7125 pvector_publish(pvec
);
7129 dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread
*pmd
,
7130 struct polled_queue
*poll_list
, int poll_cnt
)
7134 if (pmd
->ctx
.now
> pmd
->rxq_next_cycle_store
) {
7136 /* Get the cycles that were used to process each queue and store. */
7137 for (unsigned i
= 0; i
< poll_cnt
; i
++) {
7138 uint64_t rxq_cyc_curr
= dp_netdev_rxq_get_cycles(poll_list
[i
].rxq
,
7139 RXQ_CYCLES_PROC_CURR
);
7140 dp_netdev_rxq_set_intrvl_cycles(poll_list
[i
].rxq
, rxq_cyc_curr
);
7141 dp_netdev_rxq_set_cycles(poll_list
[i
].rxq
, RXQ_CYCLES_PROC_CURR
,
7144 curr_tsc
= cycles_counter_update(&pmd
->perf_stats
);
7145 if (pmd
->intrvl_tsc_prev
) {
7146 /* There is a prev timestamp, store a new intrvl cycle count. */
7147 atomic_store_relaxed(&pmd
->intrvl_cycles
,
7148 curr_tsc
- pmd
->intrvl_tsc_prev
);
7150 pmd
->intrvl_tsc_prev
= curr_tsc
;
7151 /* Start new measuring interval */
7152 pmd
->rxq_next_cycle_store
= pmd
->ctx
.now
+ PMD_RXQ_INTERVAL_LEN
;
7155 if (pmd
->ctx
.now
> pmd
->next_optimization
) {
7156 /* Try to obtain the flow lock to block out revalidator threads.
7157 * If not possible, just try next time. */
7158 if (!ovs_mutex_trylock(&pmd
->flow_mutex
)) {
7159 /* Optimize each classifier */
7160 CMAP_FOR_EACH (cls
, node
, &pmd
->classifiers
) {
7161 dpcls_sort_subtable_vector(cls
);
7163 ovs_mutex_unlock(&pmd
->flow_mutex
);
7164 /* Start new measuring interval */
7165 pmd
->next_optimization
= pmd
->ctx
.now
7166 + DPCLS_OPTIMIZATION_INTERVAL
;
7171 /* Insert 'rule' into 'cls'. */
7173 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
7174 const struct netdev_flow_key
*mask
)
7176 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
7178 /* Refer to subtable's mask, also for later removal. */
7179 rule
->mask
= &subtable
->mask
;
7180 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
7183 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
7185 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
7187 struct dpcls_subtable
*subtable
;
7189 ovs_assert(rule
->mask
);
7191 /* Get subtable from reference in rule->mask. */
7192 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
7193 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
7195 /* Delete empty subtable. */
7196 dpcls_destroy_subtable(cls
, subtable
);
7197 pvector_publish(&cls
->subtables
);
7201 /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
7202 * in 'mask' the values in 'key' and 'target' are the same. */
7204 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
7205 const struct netdev_flow_key
*target
)
7207 const uint64_t *keyp
= miniflow_get_values(&rule
->flow
.mf
);
7208 const uint64_t *maskp
= miniflow_get_values(&rule
->mask
->mf
);
7211 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, target
, rule
->flow
.mf
.map
) {
7212 if (OVS_UNLIKELY((value
& *maskp
++) != *keyp
++)) {
7219 /* For each miniflow in 'keys' performs a classifier lookup writing the result
7220 * into the corresponding slot in 'rules'. If a particular entry in 'keys' is
7221 * NULL it is skipped.
7223 * This function is optimized for use in the userspace datapath and therefore
7224 * does not implement a lot of features available in the standard
7225 * classifier_lookup() function. Specifically, it does not implement
7226 * priorities, instead returning any rule which matches the flow.
7228 * Returns true if all miniflows found a corresponding rule. */
7230 dpcls_lookup(struct dpcls
*cls
, const struct netdev_flow_key
*keys
[],
7231 struct dpcls_rule
**rules
, const size_t cnt
,
7234 /* The received 'cnt' miniflows are the search-keys that will be processed
7235 * to find a matching entry into the available subtables.
7236 * The number of bits in map_type is equal to NETDEV_MAX_BURST. */
7237 typedef uint32_t map_type
;
7238 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
7239 BUILD_ASSERT_DECL(MAP_BITS
>= NETDEV_MAX_BURST
);
7241 struct dpcls_subtable
*subtable
;
7243 map_type keys_map
= TYPE_MAXIMUM(map_type
); /* Set all bits. */
7245 uint32_t hashes
[MAP_BITS
];
7246 const struct cmap_node
*nodes
[MAP_BITS
];
7248 if (cnt
!= MAP_BITS
) {
7249 keys_map
>>= MAP_BITS
- cnt
; /* Clear extra bits. */
7251 memset(rules
, 0, cnt
* sizeof *rules
);
7253 int lookups_match
= 0, subtable_pos
= 1;
7255 /* The Datapath classifier - aka dpcls - is composed of subtables.
7256 * Subtables are dynamically created as needed when new rules are inserted.
7257 * Each subtable collects rules with matches on a specific subset of packet
7258 * fields as defined by the subtable's mask. We proceed to process every
7259 * search-key against each subtable, but when a match is found for a
7260 * search-key, the search for that key can stop because the rules are
7261 * non-overlapping. */
7262 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
7265 /* Compute hashes for the remaining keys. Each search-key is
7266 * masked with the subtable's mask to avoid hashing the wildcarded
7268 ULLONG_FOR_EACH_1(i
, keys_map
) {
7269 hashes
[i
] = netdev_flow_key_hash_in_mask(keys
[i
],
7273 found_map
= cmap_find_batch(&subtable
->rules
, keys_map
, hashes
, nodes
);
7274 /* Check results. When the i-th bit of found_map is set, it means
7275 * that a set of nodes with a matching hash value was found for the
7276 * i-th search-key. Due to possible hash collisions we need to check
7277 * which of the found rules, if any, really matches our masked
7279 ULLONG_FOR_EACH_1(i
, found_map
) {
7280 struct dpcls_rule
*rule
;
7282 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
7283 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, keys
[i
]))) {
7285 /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap
7286 * within one second optimization interval. */
7287 subtable
->hit_cnt
++;
7288 lookups_match
+= subtable_pos
;
7292 /* None of the found rules was a match. Reset the i-th bit to
7293 * keep searching this key in the next subtable. */
7294 ULLONG_SET0(found_map
, i
); /* Did not match. */
7296 ; /* Keep Sparse happy. */
7298 keys_map
&= ~found_map
; /* Clear the found rules. */
7300 if (num_lookups_p
) {
7301 *num_lookups_p
= lookups_match
;
7303 return true; /* All found. */
7307 if (num_lookups_p
) {
7308 *num_lookups_p
= lookups_match
;
7310 return false; /* Some misses. */