2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
24 #include <netinet/in.h>
25 #include <sys/socket.h>
30 #include <sys/ioctl.h>
36 #include "dp-packet.h"
38 #include "dpif-provider.h"
40 #include "dynamic-string.h"
41 #include "fat-rwlock.h"
47 #include "meta-flow.h"
49 #include "netdev-dpdk.h"
50 #include "netdev-vport.h"
52 #include "odp-execute.h"
54 #include "ofp-print.h"
59 #include "poll-loop.h"
66 #include "tnl-arp-cache.h"
69 #include "openvswitch/vlog.h"
71 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
73 #define FLOW_DUMP_MAX_BATCH 50
74 /* Use per thread recirc_depth to prevent recirculation loop. */
75 #define MAX_RECIRC_DEPTH 5
76 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
78 /* Configuration parameters. */
79 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
81 /* Protects against changes to 'dp_netdevs'. */
82 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
84 /* Contains all 'struct dp_netdev's. */
85 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
86 = SHASH_INITIALIZER(&dp_netdevs
);
88 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
90 /* Stores a miniflow with inline values */
92 struct netdev_flow_key
{
93 uint32_t hash
; /* Hash function differs for different users. */
94 uint32_t len
; /* Length of the following miniflow (incl. map). */
96 uint64_t buf
[FLOW_MAX_PACKET_U64S
- MINI_N_INLINE
];
99 /* Exact match cache for frequently used flows
101 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
102 * search its entries for a miniflow that matches exactly the miniflow of the
103 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
105 * A cache entry holds a reference to its 'dp_netdev_flow'.
107 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
108 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
109 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
110 * value is the index of a cache entry where the miniflow could be.
116 * Each pmd_thread has its own private exact match cache.
117 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
120 #define EM_FLOW_HASH_SHIFT 13
121 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
122 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
123 #define EM_FLOW_HASH_SEGS 2
126 struct dp_netdev_flow
*flow
;
127 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
131 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
132 int sweep_idx
; /* For emc_cache_slow_sweep(). */
135 /* Iterate in the exact match cache through every entry that might contain a
136 * miniflow with hash 'HASH'. */
137 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
138 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
139 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
140 i__ < EM_FLOW_HASH_SEGS; \
141 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
143 /* Simple non-wildcarding single-priority classifier. */
146 struct cmap subtables_map
;
147 struct pvector subtables
;
150 /* A rule to be inserted to the classifier. */
152 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
153 struct netdev_flow_key
*mask
; /* Subtable's mask. */
154 struct netdev_flow_key flow
; /* Matching key. */
155 /* 'flow' must be the last field, additional space is allocated here. */
158 static void dpcls_init(struct dpcls
*);
159 static void dpcls_destroy(struct dpcls
*);
160 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
161 const struct netdev_flow_key
*mask
);
162 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
163 static bool dpcls_lookup(const struct dpcls
*cls
,
164 const struct netdev_flow_key keys
[],
165 struct dpcls_rule
**rules
, size_t cnt
);
167 /* Datapath based on the network device interface from netdev.h.
173 * Some members, marked 'const', are immutable. Accessing other members
174 * requires synchronization, as noted in more detail below.
176 * Acquisition order is, from outermost to innermost:
178 * dp_netdev_mutex (global)
182 const struct dpif_class
*const class;
183 const char *const name
;
185 struct ovs_refcount ref_cnt
;
186 atomic_flag destroyed
;
190 * Protected by RCU. Take the mutex to add or remove ports. */
191 struct ovs_mutex port_mutex
;
193 struct seq
*port_seq
; /* Incremented whenever a port changes. */
195 /* Protects access to ofproto-dpif-upcall interface during revalidator
196 * thread synchronization. */
197 struct fat_rwlock upcall_rwlock
;
198 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
201 /* Stores all 'struct dp_netdev_pmd_thread's. */
202 struct cmap poll_threads
;
204 /* Protects the access of the 'struct dp_netdev_pmd_thread'
205 * instance for non-pmd thread. */
206 struct ovs_mutex non_pmd_mutex
;
208 /* Each pmd thread will store its pointer to
209 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
210 ovsthread_key_t per_pmd_key
;
212 /* Number of rx queues for each dpdk interface and the cpu mask
213 * for pin of pmd threads. */
216 uint64_t last_tnl_conf_seq
;
219 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
223 DP_STAT_EXACT_HIT
, /* Packets that had an exact match (emc). */
224 DP_STAT_MASKED_HIT
, /* Packets that matched in the flow table. */
225 DP_STAT_MISS
, /* Packets that did not match. */
226 DP_STAT_LOST
, /* Packets not passed up to the client. */
230 enum pmd_cycles_counter_type
{
231 PMD_CYCLES_POLLING
, /* Cycles spent polling NICs. */
232 PMD_CYCLES_PROCESSING
, /* Cycles spent processing packets */
236 /* A port in a netdev-based datapath. */
237 struct dp_netdev_port
{
238 struct pkt_metadata md
;
239 struct netdev
*netdev
;
240 struct cmap_node node
; /* Node in dp_netdev's 'ports'. */
241 struct netdev_saved_flags
*sf
;
242 struct netdev_rxq
**rxq
;
243 struct ovs_refcount ref_cnt
;
244 char *type
; /* Port type as requested by user. */
247 /* Contained by struct dp_netdev_flow's 'stats' member. */
248 struct dp_netdev_flow_stats
{
249 atomic_llong used
; /* Last used time, in monotonic msecs. */
250 atomic_ullong packet_count
; /* Number of packets matched. */
251 atomic_ullong byte_count
; /* Number of bytes matched. */
252 atomic_uint16_t tcp_flags
; /* Bitwise-OR of seen tcp_flags values. */
255 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
261 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
262 * its pmd thread's classifier. The text below calls this classifier 'cls'.
267 * The thread safety rules described here for "struct dp_netdev_flow" are
268 * motivated by two goals:
270 * - Prevent threads that read members of "struct dp_netdev_flow" from
271 * reading bad data due to changes by some thread concurrently modifying
274 * - Prevent two threads making changes to members of a given "struct
275 * dp_netdev_flow" from interfering with each other.
281 * A flow 'flow' may be accessed without a risk of being freed during an RCU
282 * grace period. Code that needs to hold onto a flow for a while
283 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
285 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
286 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
289 * Some members, marked 'const', are immutable. Accessing other members
290 * requires synchronization, as noted in more detail below.
292 struct dp_netdev_flow
{
293 const struct flow flow
; /* Unmasked flow that created this entry. */
294 /* Hash table index by unmasked flow. */
295 const struct cmap_node node
; /* In owning dp_netdev_pmd_thread's */
297 const ovs_u128 ufid
; /* Unique flow identifier. */
298 const unsigned pmd_id
; /* The 'core_id' of pmd thread owning this */
301 /* Number of references.
302 * The classifier owns one reference.
303 * Any thread trying to keep a rule from being freed should hold its own
305 struct ovs_refcount ref_cnt
;
310 struct dp_netdev_flow_stats stats
;
313 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
315 /* While processing a group of input packets, the datapath uses the next
316 * member to store a pointer to the output batch for the flow. It is
317 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
318 * packet_batch_init() and packet_batch_execute()). */
319 struct packet_batch
*batch
;
321 /* Packet classification. */
322 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
323 /* 'cr' must be the last member. */
326 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
327 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
328 static int dpif_netdev_flow_from_nlattrs(const struct nlattr
*, uint32_t,
331 /* A set of datapath actions within a "struct dp_netdev_flow".
337 * A struct dp_netdev_actions 'actions' is protected with RCU. */
338 struct dp_netdev_actions
{
339 /* These members are immutable: they do not change during the struct's
341 unsigned int size
; /* Size of 'actions', in bytes. */
342 struct nlattr actions
[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
345 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
347 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
348 const struct dp_netdev_flow
*);
349 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
351 /* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
352 struct dp_netdev_pmd_stats
{
353 /* Indexed by DP_STAT_*. */
354 atomic_ullong n
[DP_N_STATS
];
357 /* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */
358 struct dp_netdev_pmd_cycles
{
359 /* Indexed by PMD_CYCLES_*. */
360 atomic_ullong n
[PMD_N_CYCLES
];
363 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
364 * the performance overhead of interrupt processing. Therefore netdev can
365 * not implement rx-wait for these devices. dpif-netdev needs to poll
366 * these device to check for recv buffer. pmd-thread does polling for
367 * devices assigned to itself.
369 * DPDK used PMD for accessing NIC.
371 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
372 * I/O of all non-pmd threads. There will be no actual thread created
375 * Each struct has its own flow table and classifier. Packets received
376 * from managed ports are looked up in the corresponding pmd thread's
377 * flow table, and are executed with the found actions.
379 struct dp_netdev_pmd_thread
{
380 struct dp_netdev
*dp
;
381 struct ovs_refcount ref_cnt
; /* Every reference must be refcount'ed. */
382 struct cmap_node node
; /* In 'dp->poll_threads'. */
384 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
385 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
387 /* Per thread exact-match cache. Note, the instance for cpu core
388 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
389 * need to be protected (e.g. by 'dp_netdev_mutex'). All other
390 * instances will only be accessed by its own pmd thread. */
391 struct emc_cache flow_cache
;
393 /* Classifier and Flow-Table.
395 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
396 * changes to 'cls' must be made while still holding the 'flow_mutex'.
398 struct ovs_mutex flow_mutex
;
400 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
403 struct dp_netdev_pmd_stats stats
;
405 /* Cycles counters */
406 struct dp_netdev_pmd_cycles cycles
;
408 /* Used to count cicles. See 'cycles_counter_end()' */
409 unsigned long long last_cycles
;
411 struct latch exit_latch
; /* For terminating the pmd thread. */
412 atomic_uint change_seq
; /* For reloading pmd ports. */
414 int index
; /* Idx of this pmd thread among pmd*/
415 /* threads on same numa node. */
416 unsigned core_id
; /* CPU core id of this pmd thread. */
417 int numa_id
; /* numa node id of this pmd thread. */
418 int tx_qid
; /* Queue id used by this pmd thread to
419 * send packets on all netdevs */
421 /* Only a pmd thread can write on its own 'cycles' and 'stats'.
422 * The main thread keeps 'stats_zero' and 'cycles_zero' as base
423 * values and subtracts them from 'stats' and 'cycles' before
424 * reporting to the user */
425 unsigned long long stats_zero
[DP_N_STATS
];
426 uint64_t cycles_zero
[PMD_N_CYCLES
];
429 #define PMD_INITIAL_SEQ 1
431 /* Interface to netdev-based datapath. */
434 struct dp_netdev
*dp
;
435 uint64_t last_port_seq
;
438 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
439 struct dp_netdev_port
**portp
);
440 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
441 struct dp_netdev_port
**portp
);
442 static void dp_netdev_free(struct dp_netdev
*)
443 OVS_REQUIRES(dp_netdev_mutex
);
444 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
445 const char *type
, odp_port_t port_no
)
446 OVS_REQUIRES(dp
->port_mutex
);
447 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
448 OVS_REQUIRES(dp
->port_mutex
);
449 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
450 bool create
, struct dpif
**);
451 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
452 struct dp_packet
**, int c
,
454 const struct nlattr
*actions
,
456 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
457 struct dp_packet
**, int cnt
);
459 static void dp_netdev_disable_upcall(struct dp_netdev
*);
460 void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
461 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
462 struct dp_netdev
*dp
, int index
,
463 unsigned core_id
, int numa_id
);
464 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
);
465 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
);
466 static struct dp_netdev_pmd_thread
*dp_netdev_get_pmd(struct dp_netdev
*dp
,
468 static struct dp_netdev_pmd_thread
*
469 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
);
470 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
);
471 static void dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
472 static void dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
473 static void dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
);
474 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
);
475 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
);
476 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
);
478 static inline bool emc_entry_alive(struct emc_entry
*ce
);
479 static void emc_clear_entry(struct emc_entry
*ce
);
482 emc_cache_init(struct emc_cache
*flow_cache
)
486 BUILD_ASSERT(offsetof(struct miniflow
, inline_values
) == sizeof(uint64_t));
488 flow_cache
->sweep_idx
= 0;
489 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
490 flow_cache
->entries
[i
].flow
= NULL
;
491 flow_cache
->entries
[i
].key
.hash
= 0;
492 flow_cache
->entries
[i
].key
.len
493 = offsetof(struct miniflow
, inline_values
);
494 miniflow_initialize(&flow_cache
->entries
[i
].key
.mf
,
495 flow_cache
->entries
[i
].key
.buf
);
500 emc_cache_uninit(struct emc_cache
*flow_cache
)
504 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
505 emc_clear_entry(&flow_cache
->entries
[i
]);
509 /* Check and clear dead flow references slowly (one entry at each
512 emc_cache_slow_sweep(struct emc_cache
*flow_cache
)
514 struct emc_entry
*entry
= &flow_cache
->entries
[flow_cache
->sweep_idx
];
516 if (!emc_entry_alive(entry
)) {
517 emc_clear_entry(entry
);
519 flow_cache
->sweep_idx
= (flow_cache
->sweep_idx
+ 1) & EM_FLOW_HASH_MASK
;
522 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
524 dpif_is_netdev(const struct dpif
*dpif
)
526 return dpif
->dpif_class
->open
== dpif_netdev_open
;
529 static struct dpif_netdev
*
530 dpif_netdev_cast(const struct dpif
*dpif
)
532 ovs_assert(dpif_is_netdev(dpif
));
533 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
536 static struct dp_netdev
*
537 get_dp_netdev(const struct dpif
*dpif
)
539 return dpif_netdev_cast(dpif
)->dp
;
543 PMD_INFO_SHOW_STATS
, /* show how cpu cycles are spent */
544 PMD_INFO_CLEAR_STATS
/* set the cycles count to 0 */
548 pmd_info_show_stats(struct ds
*reply
,
549 struct dp_netdev_pmd_thread
*pmd
,
550 unsigned long long stats
[DP_N_STATS
],
551 uint64_t cycles
[PMD_N_CYCLES
])
553 unsigned long long total_packets
= 0;
554 uint64_t total_cycles
= 0;
557 /* These loops subtracts reference values ('*_zero') from the counters.
558 * Since loads and stores are relaxed, it might be possible for a '*_zero'
559 * value to be more recent than the current value we're reading from the
560 * counter. This is not a big problem, since these numbers are not
561 * supposed to be too accurate, but we should at least make sure that
562 * the result is not negative. */
563 for (i
= 0; i
< DP_N_STATS
; i
++) {
564 if (stats
[i
] > pmd
->stats_zero
[i
]) {
565 stats
[i
] -= pmd
->stats_zero
[i
];
570 if (i
!= DP_STAT_LOST
) {
571 /* Lost packets are already included in DP_STAT_MISS */
572 total_packets
+= stats
[i
];
576 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
577 if (cycles
[i
] > pmd
->cycles_zero
[i
]) {
578 cycles
[i
] -= pmd
->cycles_zero
[i
];
583 total_cycles
+= cycles
[i
];
586 ds_put_cstr(reply
, (pmd
->core_id
== NON_PMD_CORE_ID
)
587 ? "main thread" : "pmd thread");
589 if (pmd
->numa_id
!= OVS_NUMA_UNSPEC
) {
590 ds_put_format(reply
, " numa_id %d", pmd
->numa_id
);
592 if (pmd
->core_id
!= OVS_CORE_UNSPEC
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
593 ds_put_format(reply
, " core_id %u", pmd
->core_id
);
595 ds_put_cstr(reply
, ":\n");
598 "\temc hits:%llu\n\tmegaflow hits:%llu\n"
599 "\tmiss:%llu\n\tlost:%llu\n",
600 stats
[DP_STAT_EXACT_HIT
], stats
[DP_STAT_MASKED_HIT
],
601 stats
[DP_STAT_MISS
], stats
[DP_STAT_LOST
]);
603 if (total_cycles
== 0) {
608 "\tpolling cycles:%"PRIu64
" (%.02f%%)\n"
609 "\tprocessing cycles:%"PRIu64
" (%.02f%%)\n",
610 cycles
[PMD_CYCLES_POLLING
],
611 cycles
[PMD_CYCLES_POLLING
] / (double)total_cycles
* 100,
612 cycles
[PMD_CYCLES_PROCESSING
],
613 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_cycles
* 100);
615 if (total_packets
== 0) {
620 "\tavg cycles per packet: %.02f (%"PRIu64
"/%llu)\n",
621 total_cycles
/ (double)total_packets
,
622 total_cycles
, total_packets
);
625 "\tavg processing cycles per packet: "
626 "%.02f (%"PRIu64
"/%llu)\n",
627 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_packets
,
628 cycles
[PMD_CYCLES_PROCESSING
], total_packets
);
632 pmd_info_clear_stats(struct ds
*reply OVS_UNUSED
,
633 struct dp_netdev_pmd_thread
*pmd
,
634 unsigned long long stats
[DP_N_STATS
],
635 uint64_t cycles
[PMD_N_CYCLES
])
639 /* We cannot write 'stats' and 'cycles' (because they're written by other
640 * threads) and we shouldn't change 'stats' (because they're used to count
641 * datapath stats, which must not be cleared here). Instead, we save the
642 * current values and subtract them from the values to be displayed in the
644 for (i
= 0; i
< DP_N_STATS
; i
++) {
645 pmd
->stats_zero
[i
] = stats
[i
];
647 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
648 pmd
->cycles_zero
[i
] = cycles
[i
];
653 dpif_netdev_pmd_info(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
656 struct ds reply
= DS_EMPTY_INITIALIZER
;
657 struct dp_netdev_pmd_thread
*pmd
;
658 struct dp_netdev
*dp
= NULL
;
659 enum pmd_info_type type
= *(enum pmd_info_type
*) aux
;
661 ovs_mutex_lock(&dp_netdev_mutex
);
664 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
665 } else if (shash_count(&dp_netdevs
) == 1) {
666 /* There's only one datapath */
667 dp
= shash_first(&dp_netdevs
)->data
;
671 ovs_mutex_unlock(&dp_netdev_mutex
);
672 unixctl_command_reply_error(conn
,
673 "please specify an existing datapath");
677 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
678 unsigned long long stats
[DP_N_STATS
];
679 uint64_t cycles
[PMD_N_CYCLES
];
682 /* Read current stats and cycle counters */
683 for (i
= 0; i
< ARRAY_SIZE(stats
); i
++) {
684 atomic_read_relaxed(&pmd
->stats
.n
[i
], &stats
[i
]);
686 for (i
= 0; i
< ARRAY_SIZE(cycles
); i
++) {
687 atomic_read_relaxed(&pmd
->cycles
.n
[i
], &cycles
[i
]);
690 if (type
== PMD_INFO_CLEAR_STATS
) {
691 pmd_info_clear_stats(&reply
, pmd
, stats
, cycles
);
692 } else if (type
== PMD_INFO_SHOW_STATS
) {
693 pmd_info_show_stats(&reply
, pmd
, stats
, cycles
);
697 ovs_mutex_unlock(&dp_netdev_mutex
);
699 unixctl_command_reply(conn
, ds_cstr(&reply
));
704 dpif_netdev_init(void)
706 static enum pmd_info_type show_aux
= PMD_INFO_SHOW_STATS
,
707 clear_aux
= PMD_INFO_CLEAR_STATS
;
709 unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
710 0, 1, dpif_netdev_pmd_info
,
712 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
713 0, 1, dpif_netdev_pmd_info
,
719 dpif_netdev_enumerate(struct sset
*all_dps
,
720 const struct dpif_class
*dpif_class
)
722 struct shash_node
*node
;
724 ovs_mutex_lock(&dp_netdev_mutex
);
725 SHASH_FOR_EACH(node
, &dp_netdevs
) {
726 struct dp_netdev
*dp
= node
->data
;
727 if (dpif_class
!= dp
->class) {
728 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
729 * If the class doesn't match, skip this dpif. */
732 sset_add(all_dps
, node
->name
);
734 ovs_mutex_unlock(&dp_netdev_mutex
);
740 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
742 return class != &dpif_netdev_class
;
746 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
748 return strcmp(type
, "internal") ? type
749 : dpif_netdev_class_is_dummy(class) ? "dummy"
754 create_dpif_netdev(struct dp_netdev
*dp
)
756 uint16_t netflow_id
= hash_string(dp
->name
, 0);
757 struct dpif_netdev
*dpif
;
759 ovs_refcount_ref(&dp
->ref_cnt
);
761 dpif
= xmalloc(sizeof *dpif
);
762 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
764 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
769 /* Choose an unused, non-zero port number and return it on success.
770 * Return ODPP_NONE on failure. */
772 choose_port(struct dp_netdev
*dp
, const char *name
)
773 OVS_REQUIRES(dp
->port_mutex
)
777 if (dp
->class != &dpif_netdev_class
) {
781 /* If the port name begins with "br", start the number search at
782 * 100 to make writing tests easier. */
783 if (!strncmp(name
, "br", 2)) {
787 /* If the port name contains a number, try to assign that port number.
788 * This can make writing unit tests easier because port numbers are
790 for (p
= name
; *p
!= '\0'; p
++) {
791 if (isdigit((unsigned char) *p
)) {
792 port_no
= start_no
+ strtol(p
, NULL
, 10);
793 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
794 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
795 return u32_to_odp(port_no
);
802 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
803 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
804 return u32_to_odp(port_no
);
812 create_dp_netdev(const char *name
, const struct dpif_class
*class,
813 struct dp_netdev
**dpp
)
814 OVS_REQUIRES(dp_netdev_mutex
)
816 struct dp_netdev
*dp
;
819 dp
= xzalloc(sizeof *dp
);
820 shash_add(&dp_netdevs
, name
, dp
);
822 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
823 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
824 ovs_refcount_init(&dp
->ref_cnt
);
825 atomic_flag_clear(&dp
->destroyed
);
827 ovs_mutex_init(&dp
->port_mutex
);
828 cmap_init(&dp
->ports
);
829 dp
->port_seq
= seq_create();
830 fat_rwlock_init(&dp
->upcall_rwlock
);
832 /* Disable upcalls by default. */
833 dp_netdev_disable_upcall(dp
);
834 dp
->upcall_aux
= NULL
;
835 dp
->upcall_cb
= NULL
;
837 cmap_init(&dp
->poll_threads
);
838 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
839 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
841 dp_netdev_set_nonpmd(dp
);
842 dp
->n_dpdk_rxqs
= NR_QUEUE
;
844 ovs_mutex_lock(&dp
->port_mutex
);
845 error
= do_add_port(dp
, name
, "internal", ODPP_LOCAL
);
846 ovs_mutex_unlock(&dp
->port_mutex
);
852 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
858 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
859 bool create
, struct dpif
**dpifp
)
861 struct dp_netdev
*dp
;
864 ovs_mutex_lock(&dp_netdev_mutex
);
865 dp
= shash_find_data(&dp_netdevs
, name
);
867 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
869 error
= (dp
->class != class ? EINVAL
874 *dpifp
= create_dpif_netdev(dp
);
877 ovs_mutex_unlock(&dp_netdev_mutex
);
883 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
884 OVS_NO_THREAD_SAFETY_ANALYSIS
886 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
887 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
889 /* Before freeing a lock we should release it */
890 fat_rwlock_unlock(&dp
->upcall_rwlock
);
891 fat_rwlock_destroy(&dp
->upcall_rwlock
);
894 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
895 * through the 'dp_netdevs' shash while freeing 'dp'. */
897 dp_netdev_free(struct dp_netdev
*dp
)
898 OVS_REQUIRES(dp_netdev_mutex
)
900 struct dp_netdev_port
*port
;
902 shash_find_and_delete(&dp_netdevs
, dp
->name
);
904 dp_netdev_destroy_all_pmds(dp
);
905 cmap_destroy(&dp
->poll_threads
);
906 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
907 ovsthread_key_delete(dp
->per_pmd_key
);
909 ovs_mutex_lock(&dp
->port_mutex
);
910 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
911 do_del_port(dp
, port
);
913 ovs_mutex_unlock(&dp
->port_mutex
);
915 seq_destroy(dp
->port_seq
);
916 cmap_destroy(&dp
->ports
);
918 /* Upcalls must be disabled at this point */
919 dp_netdev_destroy_upcall_lock(dp
);
922 free(CONST_CAST(char *, dp
->name
));
927 dp_netdev_unref(struct dp_netdev
*dp
)
930 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
931 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
932 ovs_mutex_lock(&dp_netdev_mutex
);
933 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
936 ovs_mutex_unlock(&dp_netdev_mutex
);
941 dpif_netdev_close(struct dpif
*dpif
)
943 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
950 dpif_netdev_destroy(struct dpif
*dpif
)
952 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
954 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
955 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
956 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
964 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
965 * load/store semantics. While the increment is not atomic, the load and
966 * store operations are, making it impossible to read inconsistent values.
968 * This is used to update thread local stats counters. */
970 non_atomic_ullong_add(atomic_ullong
*var
, unsigned long long n
)
972 unsigned long long tmp
;
974 atomic_read_relaxed(var
, &tmp
);
976 atomic_store_relaxed(var
, tmp
);
980 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
982 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
983 struct dp_netdev_pmd_thread
*pmd
;
985 stats
->n_flows
= stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
986 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
987 unsigned long long n
;
988 stats
->n_flows
+= cmap_count(&pmd
->flow_table
);
990 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MASKED_HIT
], &n
);
992 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_EXACT_HIT
], &n
);
994 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MISS
], &n
);
995 stats
->n_missed
+= n
;
996 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_LOST
], &n
);
999 stats
->n_masks
= UINT32_MAX
;
1000 stats
->n_mask_hit
= UINT64_MAX
;
1006 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
1010 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1014 ovs_mutex_lock(&pmd
->cond_mutex
);
1015 atomic_add_relaxed(&pmd
->change_seq
, 1, &old_seq
);
1016 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
1017 ovs_mutex_unlock(&pmd
->cond_mutex
);
1020 /* Causes all pmd threads to reload its tx/rx devices.
1021 * Must be called after adding/removing ports. */
1023 dp_netdev_reload_pmds(struct dp_netdev
*dp
)
1025 struct dp_netdev_pmd_thread
*pmd
;
1027 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1028 dp_netdev_reload_pmd__(pmd
);
1033 hash_port_no(odp_port_t port_no
)
1035 return hash_int(odp_to_u32(port_no
), 0);
1039 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
1041 OVS_REQUIRES(dp
->port_mutex
)
1043 struct netdev_saved_flags
*sf
;
1044 struct dp_netdev_port
*port
;
1045 struct netdev
*netdev
;
1046 enum netdev_flags flags
;
1047 const char *open_type
;
1051 /* Reject devices already in 'dp'. */
1052 if (!get_port_by_name(dp
, devname
, &port
)) {
1056 /* Open and validate network device. */
1057 open_type
= dpif_netdev_port_open_type(dp
->class, type
);
1058 error
= netdev_open(devname
, open_type
, &netdev
);
1062 /* XXX reject non-Ethernet devices */
1064 netdev_get_flags(netdev
, &flags
);
1065 if (flags
& NETDEV_LOOPBACK
) {
1066 VLOG_ERR("%s: cannot add a loopback device", devname
);
1067 netdev_close(netdev
);
1071 if (netdev_is_pmd(netdev
)) {
1072 int n_cores
= ovs_numa_get_n_cores();
1074 if (n_cores
== OVS_CORE_UNSPEC
) {
1075 VLOG_ERR("%s, cannot get cpu core info", devname
);
1078 /* There can only be ovs_numa_get_n_cores() pmd threads,
1079 * so creates a txq for each, and one extra for the non
1081 error
= netdev_set_multiq(netdev
, n_cores
+ 1, dp
->n_dpdk_rxqs
);
1082 if (error
&& (error
!= EOPNOTSUPP
)) {
1083 VLOG_ERR("%s, cannot set multiq", devname
);
1087 port
= xzalloc(sizeof *port
);
1088 port
->md
= PKT_METADATA_INITIALIZER(port_no
);
1089 port
->netdev
= netdev
;
1090 port
->rxq
= xmalloc(sizeof *port
->rxq
* netdev_n_rxq(netdev
));
1091 port
->type
= xstrdup(type
);
1092 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
1093 error
= netdev_rxq_open(netdev
, &port
->rxq
[i
], i
);
1095 && !(error
== EOPNOTSUPP
&& dpif_netdev_class_is_dummy(dp
->class))) {
1096 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
1097 devname
, ovs_strerror(errno
));
1098 netdev_close(netdev
);
1106 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
1108 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
1109 netdev_rxq_close(port
->rxq
[i
]);
1111 netdev_close(netdev
);
1119 ovs_refcount_init(&port
->ref_cnt
);
1120 cmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
1122 if (netdev_is_pmd(netdev
)) {
1123 dp_netdev_set_pmds_on_numa(dp
, netdev_get_numa_id(netdev
));
1124 dp_netdev_reload_pmds(dp
);
1126 seq_change(dp
->port_seq
);
1132 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
1133 odp_port_t
*port_nop
)
1135 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1136 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1137 const char *dpif_port
;
1141 ovs_mutex_lock(&dp
->port_mutex
);
1142 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1143 if (*port_nop
!= ODPP_NONE
) {
1144 port_no
= *port_nop
;
1145 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
1147 port_no
= choose_port(dp
, dpif_port
);
1148 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
1151 *port_nop
= port_no
;
1152 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
1154 ovs_mutex_unlock(&dp
->port_mutex
);
1160 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
1162 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1165 ovs_mutex_lock(&dp
->port_mutex
);
1166 if (port_no
== ODPP_LOCAL
) {
1169 struct dp_netdev_port
*port
;
1171 error
= get_port_by_number(dp
, port_no
, &port
);
1173 do_del_port(dp
, port
);
1176 ovs_mutex_unlock(&dp
->port_mutex
);
1182 is_valid_port_number(odp_port_t port_no
)
1184 return port_no
!= ODPP_NONE
;
1187 static struct dp_netdev_port
*
1188 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
1190 struct dp_netdev_port
*port
;
1192 CMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
1193 if (port
->md
.in_port
.odp_port
== port_no
) {
1201 get_port_by_number(struct dp_netdev
*dp
,
1202 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1204 if (!is_valid_port_number(port_no
)) {
1208 *portp
= dp_netdev_lookup_port(dp
, port_no
);
1209 return *portp
? 0 : ENOENT
;
1214 port_ref(struct dp_netdev_port
*port
)
1217 ovs_refcount_ref(&port
->ref_cnt
);
1222 port_try_ref(struct dp_netdev_port
*port
)
1225 return ovs_refcount_try_ref_rcu(&port
->ref_cnt
);
1232 port_unref(struct dp_netdev_port
*port
)
1234 if (port
&& ovs_refcount_unref_relaxed(&port
->ref_cnt
) == 1) {
1235 int n_rxq
= netdev_n_rxq(port
->netdev
);
1238 netdev_close(port
->netdev
);
1239 netdev_restore_flags(port
->sf
);
1241 for (i
= 0; i
< n_rxq
; i
++) {
1242 netdev_rxq_close(port
->rxq
[i
]);
1251 get_port_by_name(struct dp_netdev
*dp
,
1252 const char *devname
, struct dp_netdev_port
**portp
)
1253 OVS_REQUIRES(dp
->port_mutex
)
1255 struct dp_netdev_port
*port
;
1257 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1258 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1267 get_n_pmd_threads_on_numa(struct dp_netdev
*dp
, int numa_id
)
1269 struct dp_netdev_pmd_thread
*pmd
;
1272 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1273 if (pmd
->numa_id
== numa_id
) {
1281 /* Returns 'true' if there is a port with pmd netdev and the netdev
1282 * is on numa node 'numa_id'. */
1284 has_pmd_port_for_numa(struct dp_netdev
*dp
, int numa_id
)
1286 struct dp_netdev_port
*port
;
1288 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1289 if (netdev_is_pmd(port
->netdev
)
1290 && netdev_get_numa_id(port
->netdev
) == numa_id
) {
1300 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1301 OVS_REQUIRES(dp
->port_mutex
)
1303 cmap_remove(&dp
->ports
, &port
->node
,
1304 hash_odp_port(port
->md
.in_port
.odp_port
));
1305 seq_change(dp
->port_seq
);
1306 if (netdev_is_pmd(port
->netdev
)) {
1307 int numa_id
= netdev_get_numa_id(port
->netdev
);
1309 /* If there is no netdev on the numa node, deletes the pmd threads
1310 * for that numa. Else, just reloads the queues. */
1311 if (!has_pmd_port_for_numa(dp
, numa_id
)) {
1312 dp_netdev_del_pmds_on_numa(dp
, numa_id
);
1314 dp_netdev_reload_pmds(dp
);
1321 answer_port_query(const struct dp_netdev_port
*port
,
1322 struct dpif_port
*dpif_port
)
1324 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1325 dpif_port
->type
= xstrdup(port
->type
);
1326 dpif_port
->port_no
= port
->md
.in_port
.odp_port
;
1330 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1331 struct dpif_port
*dpif_port
)
1333 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1334 struct dp_netdev_port
*port
;
1337 error
= get_port_by_number(dp
, port_no
, &port
);
1338 if (!error
&& dpif_port
) {
1339 answer_port_query(port
, dpif_port
);
1346 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1347 struct dpif_port
*dpif_port
)
1349 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1350 struct dp_netdev_port
*port
;
1353 ovs_mutex_lock(&dp
->port_mutex
);
1354 error
= get_port_by_name(dp
, devname
, &port
);
1355 if (!error
&& dpif_port
) {
1356 answer_port_query(port
, dpif_port
);
1358 ovs_mutex_unlock(&dp
->port_mutex
);
1364 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1366 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1370 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1372 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1373 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1378 dp_netdev_flow_hash(const ovs_u128
*ufid
)
1380 return ufid
->u32
[0];
1384 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread
*pmd
,
1385 struct dp_netdev_flow
*flow
)
1386 OVS_REQUIRES(pmd
->flow_mutex
)
1388 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1390 dpcls_remove(&pmd
->cls
, &flow
->cr
);
1391 cmap_remove(&pmd
->flow_table
, node
, dp_netdev_flow_hash(&flow
->ufid
));
1394 dp_netdev_flow_unref(flow
);
1398 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
)
1400 struct dp_netdev_flow
*netdev_flow
;
1402 ovs_mutex_lock(&pmd
->flow_mutex
);
1403 CMAP_FOR_EACH (netdev_flow
, node
, &pmd
->flow_table
) {
1404 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
1406 ovs_mutex_unlock(&pmd
->flow_mutex
);
1410 dpif_netdev_flow_flush(struct dpif
*dpif
)
1412 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1413 struct dp_netdev_pmd_thread
*pmd
;
1415 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1416 dp_netdev_pmd_flow_flush(pmd
);
1422 struct dp_netdev_port_state
{
1423 struct cmap_position position
;
1428 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1430 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
1435 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
1436 struct dpif_port
*dpif_port
)
1438 struct dp_netdev_port_state
*state
= state_
;
1439 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1440 struct cmap_node
*node
;
1443 node
= cmap_next_position(&dp
->ports
, &state
->position
);
1445 struct dp_netdev_port
*port
;
1447 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
1450 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
1451 dpif_port
->name
= state
->name
;
1452 dpif_port
->type
= port
->type
;
1453 dpif_port
->port_no
= port
->md
.in_port
.odp_port
;
1464 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
1466 struct dp_netdev_port_state
*state
= state_
;
1473 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
1475 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1476 uint64_t new_port_seq
;
1479 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
1480 if (dpif
->last_port_seq
!= new_port_seq
) {
1481 dpif
->last_port_seq
= new_port_seq
;
1491 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
1493 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1495 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
1498 static struct dp_netdev_flow
*
1499 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
1501 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
1504 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
1506 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
1509 /* netdev_flow_key utilities.
1511 * netdev_flow_key is basically a miniflow. We use these functions
1512 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1513 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1515 * - Since we are dealing exclusively with miniflows created by
1516 * miniflow_extract(), if the map is different the miniflow is different.
1517 * Therefore we can be faster by comparing the map and the miniflow in a
1519 * _ netdev_flow_key's miniflow has always inline values.
1520 * - These functions can be inlined by the compiler.
1522 * The following assertions make sure that what we're doing with miniflow is
1525 BUILD_ASSERT_DECL(offsetof(struct miniflow
, inline_values
)
1526 == sizeof(uint64_t));
1528 /* Given the number of bits set in the miniflow map, returns the size of the
1529 * 'netdev_flow_key.mf' */
1530 static inline uint32_t
1531 netdev_flow_key_size(uint32_t flow_u32s
)
1533 return offsetof(struct miniflow
, inline_values
) +
1534 MINIFLOW_VALUES_SIZE(flow_u32s
);
1538 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
1539 const struct netdev_flow_key
*b
)
1541 /* 'b->len' may be not set yet. */
1542 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
1545 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
1546 * The maps are compared bitwise, so both 'key->mf' 'mf' must have been
1547 * generated by miniflow_extract. */
1549 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
1550 const struct miniflow
*mf
)
1552 return !memcmp(&key
->mf
, mf
, key
->len
);
1556 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
1557 const struct netdev_flow_key
*src
)
1560 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
1565 netdev_flow_key_from_flow(struct netdev_flow_key
*dst
,
1566 const struct flow
*src
)
1568 struct dp_packet packet
;
1569 uint64_t buf_stub
[512 / 8];
1571 miniflow_initialize(&dst
->mf
, dst
->buf
);
1573 dp_packet_use_stub(&packet
, buf_stub
, sizeof buf_stub
);
1574 pkt_metadata_from_flow(&packet
.md
, src
);
1575 flow_compose(&packet
, src
);
1576 miniflow_extract(&packet
, &dst
->mf
);
1577 dp_packet_uninit(&packet
);
1579 dst
->len
= netdev_flow_key_size(count_1bits(dst
->mf
.map
));
1580 dst
->hash
= 0; /* Not computed yet. */
1583 /* Initialize a netdev_flow_key 'mask' from 'match'. */
1585 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
1586 const struct match
*match
)
1588 const uint64_t *mask_u64
= (const uint64_t *) &match
->wc
.masks
;
1589 uint64_t *dst
= mask
->mf
.inline_values
;
1590 uint64_t map
, mask_map
= 0;
1594 /* Only check masks that make sense for the flow. */
1595 map
= flow_wc_map(&match
->flow
);
1598 uint64_t rm1bit
= rightmost_1bit(map
);
1599 int i
= raw_ctz(map
);
1603 *dst
++ = mask_u64
[i
];
1604 hash
= hash_add64(hash
, mask_u64
[i
]);
1609 mask
->mf
.values_inline
= true;
1610 mask
->mf
.map
= mask_map
;
1612 hash
= hash_add64(hash
, mask_map
);
1614 n
= dst
- mask
->mf
.inline_values
;
1616 mask
->hash
= hash_finish(hash
, n
* 8);
1617 mask
->len
= netdev_flow_key_size(n
);
1620 /* Initializes 'dst' as a copy of 'src' masked with 'mask'. */
1622 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
1623 const struct flow
*flow
,
1624 const struct netdev_flow_key
*mask
)
1626 uint64_t *dst_u64
= dst
->mf
.inline_values
;
1627 const uint64_t *mask_u64
= mask
->mf
.inline_values
;
1631 dst
->len
= mask
->len
;
1632 dst
->mf
.values_inline
= true;
1633 dst
->mf
.map
= mask
->mf
.map
;
1635 FLOW_FOR_EACH_IN_MAP(value
, flow
, mask
->mf
.map
) {
1636 *dst_u64
= value
& *mask_u64
++;
1637 hash
= hash_add64(hash
, *dst_u64
++);
1639 dst
->hash
= hash_finish(hash
, (dst_u64
- dst
->mf
.inline_values
) * 8);
1642 /* Iterate through all netdev_flow_key u64 values specified by 'MAP' */
1643 #define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \
1644 for (struct mf_for_each_in_map_aux aux__ \
1645 = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \
1646 mf_get_next_in_map(&aux__, &(VALUE)); \
1649 /* Returns a hash value for the bits of 'key' where there are 1-bits in
1651 static inline uint32_t
1652 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
1653 const struct netdev_flow_key
*mask
)
1655 const uint64_t *p
= mask
->mf
.inline_values
;
1659 NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u64
, key
, mask
->mf
.map
) {
1660 hash
= hash_add64(hash
, key_u64
& *p
++);
1663 return hash_finish(hash
, (p
- mask
->mf
.inline_values
) * 8);
1667 emc_entry_alive(struct emc_entry
*ce
)
1669 return ce
->flow
&& !ce
->flow
->dead
;
1673 emc_clear_entry(struct emc_entry
*ce
)
1676 dp_netdev_flow_unref(ce
->flow
);
1682 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
1683 const struct netdev_flow_key
*key
)
1685 if (ce
->flow
!= flow
) {
1687 dp_netdev_flow_unref(ce
->flow
);
1690 if (dp_netdev_flow_ref(flow
)) {
1697 netdev_flow_key_clone(&ce
->key
, key
);
1702 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
1703 struct dp_netdev_flow
*flow
)
1705 struct emc_entry
*to_be_replaced
= NULL
;
1706 struct emc_entry
*current_entry
;
1708 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1709 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
1710 /* We found the entry with the 'mf' miniflow */
1711 emc_change_entry(current_entry
, flow
, NULL
);
1715 /* Replacement policy: put the flow in an empty (not alive) entry, or
1716 * in the first entry where it can be */
1718 || (emc_entry_alive(to_be_replaced
)
1719 && !emc_entry_alive(current_entry
))
1720 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
1721 to_be_replaced
= current_entry
;
1724 /* We didn't find the miniflow in the cache.
1725 * The 'to_be_replaced' entry is where the new flow will be stored */
1727 emc_change_entry(to_be_replaced
, flow
, key
);
1730 static inline struct dp_netdev_flow
*
1731 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
1733 struct emc_entry
*current_entry
;
1735 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1736 if (current_entry
->key
.hash
== key
->hash
1737 && emc_entry_alive(current_entry
)
1738 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
1740 /* We found the entry with the 'key->mf' miniflow */
1741 return current_entry
->flow
;
1748 static struct dp_netdev_flow
*
1749 dp_netdev_pmd_lookup_flow(const struct dp_netdev_pmd_thread
*pmd
,
1750 const struct netdev_flow_key
*key
)
1752 struct dp_netdev_flow
*netdev_flow
;
1753 struct dpcls_rule
*rule
;
1755 dpcls_lookup(&pmd
->cls
, key
, &rule
, 1);
1756 netdev_flow
= dp_netdev_flow_cast(rule
);
1761 static struct dp_netdev_flow
*
1762 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread
*pmd
,
1763 const ovs_u128
*ufidp
, const struct nlattr
*key
,
1766 struct dp_netdev_flow
*netdev_flow
;
1770 /* If a UFID is not provided, determine one based on the key. */
1771 if (!ufidp
&& key
&& key_len
1772 && !dpif_netdev_flow_from_nlattrs(key
, key_len
, &flow
)) {
1773 dpif_flow_hash(pmd
->dp
->dpif
, &flow
, sizeof flow
, &ufid
);
1778 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, dp_netdev_flow_hash(ufidp
),
1780 if (ovs_u128_equals(&netdev_flow
->ufid
, ufidp
)) {
1790 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_
,
1791 struct dpif_flow_stats
*stats
)
1793 struct dp_netdev_flow
*netdev_flow
;
1794 unsigned long long n
;
1798 netdev_flow
= CONST_CAST(struct dp_netdev_flow
*, netdev_flow_
);
1800 atomic_read_relaxed(&netdev_flow
->stats
.packet_count
, &n
);
1801 stats
->n_packets
= n
;
1802 atomic_read_relaxed(&netdev_flow
->stats
.byte_count
, &n
);
1804 atomic_read_relaxed(&netdev_flow
->stats
.used
, &used
);
1806 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
1807 stats
->tcp_flags
= flags
;
1810 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
1811 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
1812 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
1815 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
1816 struct ofpbuf
*key_buf
, struct ofpbuf
*mask_buf
,
1817 struct dpif_flow
*flow
, bool terse
)
1820 memset(flow
, 0, sizeof *flow
);
1822 struct flow_wildcards wc
;
1823 struct dp_netdev_actions
*actions
;
1826 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
1829 offset
= key_buf
->size
;
1830 flow
->key
= ofpbuf_tail(key_buf
);
1831 odp_flow_key_from_flow(key_buf
, &netdev_flow
->flow
, &wc
.masks
,
1832 netdev_flow
->flow
.in_port
.odp_port
, true);
1833 flow
->key_len
= key_buf
->size
- offset
;
1836 offset
= mask_buf
->size
;
1837 flow
->mask
= ofpbuf_tail(mask_buf
);
1838 odp_flow_key_from_mask(mask_buf
, &wc
.masks
, &netdev_flow
->flow
,
1839 odp_to_u32(wc
.masks
.in_port
.odp_port
),
1841 flow
->mask_len
= mask_buf
->size
- offset
;
1844 actions
= dp_netdev_flow_get_actions(netdev_flow
);
1845 flow
->actions
= actions
->actions
;
1846 flow
->actions_len
= actions
->size
;
1849 flow
->ufid
= netdev_flow
->ufid
;
1850 flow
->ufid_present
= true;
1851 flow
->pmd_id
= netdev_flow
->pmd_id
;
1852 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
1856 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1857 const struct nlattr
*mask_key
,
1858 uint32_t mask_key_len
, const struct flow
*flow
,
1862 enum odp_key_fitness fitness
;
1864 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, mask
, flow
);
1866 /* This should not happen: it indicates that
1867 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
1868 * disagree on the acceptable form of a mask. Log the problem
1869 * as an error, with enough details to enable debugging. */
1870 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1872 if (!VLOG_DROP_ERR(&rl
)) {
1876 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
1878 VLOG_ERR("internal error parsing flow mask %s (%s)",
1879 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
1886 enum mf_field_id id
;
1887 /* No mask key, unwildcard everything except fields whose
1888 * prerequisities are not met. */
1889 memset(mask
, 0x0, sizeof *mask
);
1891 for (id
= 0; id
< MFF_N_IDS
; ++id
) {
1892 /* Skip registers and metadata. */
1893 if (!(id
>= MFF_REG0
&& id
< MFF_REG0
+ FLOW_N_REGS
)
1894 && id
!= MFF_METADATA
) {
1895 const struct mf_field
*mf
= mf_from_id(id
);
1896 if (mf_are_prereqs_ok(mf
, flow
)) {
1897 mf_mask_field(mf
, mask
);
1903 /* Force unwildcard the in_port.
1905 * We need to do this even in the case where we unwildcard "everything"
1906 * above because "everything" only includes the 16-bit OpenFlow port number
1907 * mask->in_port.ofp_port, which only covers half of the 32-bit datapath
1908 * port number mask->in_port.odp_port. */
1909 mask
->in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
1915 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1920 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
1921 /* This should not happen: it indicates that odp_flow_key_from_flow()
1922 * and odp_flow_key_to_flow() disagree on the acceptable form of a
1923 * flow. Log the problem as an error, with enough details to enable
1925 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1927 if (!VLOG_DROP_ERR(&rl
)) {
1931 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
1932 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
1939 in_port
= flow
->in_port
.odp_port
;
1940 if (!is_valid_port_number(in_port
) && in_port
!= ODPP_NONE
) {
1948 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
1950 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1951 struct dp_netdev_flow
*netdev_flow
;
1952 struct dp_netdev_pmd_thread
*pmd
;
1953 unsigned pmd_id
= get
->pmd_id
== PMD_ID_NULL
1954 ? NON_PMD_CORE_ID
: get
->pmd_id
;
1957 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
1962 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, get
->ufid
, get
->key
,
1965 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->buffer
,
1970 dp_netdev_pmd_unref(pmd
);
1976 static struct dp_netdev_flow
*
1977 dp_netdev_flow_add(struct dp_netdev_pmd_thread
*pmd
,
1978 struct match
*match
, const ovs_u128
*ufid
,
1979 const struct nlattr
*actions
, size_t actions_len
)
1980 OVS_REQUIRES(pmd
->flow_mutex
)
1982 struct dp_netdev_flow
*flow
;
1983 struct netdev_flow_key mask
;
1985 netdev_flow_mask_init(&mask
, match
);
1986 /* Make sure wc does not have metadata. */
1987 ovs_assert(!(mask
.mf
.map
& (MINIFLOW_MAP(metadata
) | MINIFLOW_MAP(regs
))));
1989 /* Do not allocate extra space. */
1990 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
1991 memset(&flow
->stats
, 0, sizeof flow
->stats
);
1994 *CONST_CAST(unsigned *, &flow
->pmd_id
) = pmd
->core_id
;
1995 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
1996 *CONST_CAST(ovs_u128
*, &flow
->ufid
) = *ufid
;
1997 ovs_refcount_init(&flow
->ref_cnt
);
1998 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
2000 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
2001 dpcls_insert(&pmd
->cls
, &flow
->cr
, &mask
);
2003 cmap_insert(&pmd
->flow_table
, CONST_CAST(struct cmap_node
*, &flow
->node
),
2004 dp_netdev_flow_hash(&flow
->ufid
));
2006 if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
2008 struct ds ds
= DS_EMPTY_INITIALIZER
;
2010 match
.flow
= flow
->flow
;
2011 miniflow_expand(&flow
->cr
.mask
->mf
, &match
.wc
.masks
);
2013 ds_put_cstr(&ds
, "flow_add: ");
2014 odp_format_ufid(ufid
, &ds
);
2015 ds_put_cstr(&ds
, " ");
2016 match_format(&match
, &ds
, OFP_DEFAULT_PRIORITY
);
2017 ds_put_cstr(&ds
, ", actions:");
2018 format_odp_actions(&ds
, actions
, actions_len
);
2020 VLOG_DBG_RL(&upcall_rl
, "%s", ds_cstr(&ds
));
2029 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
2031 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2032 struct dp_netdev_flow
*netdev_flow
;
2033 struct netdev_flow_key key
;
2034 struct dp_netdev_pmd_thread
*pmd
;
2037 unsigned pmd_id
= put
->pmd_id
== PMD_ID_NULL
2038 ? NON_PMD_CORE_ID
: put
->pmd_id
;
2041 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
);
2045 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
2046 put
->mask
, put
->mask_len
,
2047 &match
.flow
, &match
.wc
.masks
);
2052 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2057 /* Must produce a netdev_flow_key for lookup.
2058 * This interface is no longer performance critical, since it is not used
2059 * for upcall processing any more. */
2060 netdev_flow_key_from_flow(&key
, &match
.flow
);
2065 dpif_flow_hash(dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
2068 ovs_mutex_lock(&pmd
->flow_mutex
);
2069 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &key
);
2071 if (put
->flags
& DPIF_FP_CREATE
) {
2072 if (cmap_count(&pmd
->flow_table
) < MAX_FLOWS
) {
2074 memset(put
->stats
, 0, sizeof *put
->stats
);
2076 dp_netdev_flow_add(pmd
, &match
, &ufid
, put
->actions
,
2086 if (put
->flags
& DPIF_FP_MODIFY
2087 && flow_equal(&match
.flow
, &netdev_flow
->flow
)) {
2088 struct dp_netdev_actions
*new_actions
;
2089 struct dp_netdev_actions
*old_actions
;
2091 new_actions
= dp_netdev_actions_create(put
->actions
,
2094 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
2095 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
2098 get_dpif_flow_stats(netdev_flow
, put
->stats
);
2100 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
2101 /* XXX: The userspace datapath uses thread local statistics
2102 * (for flows), which should be updated only by the owning
2103 * thread. Since we cannot write on stats memory here,
2104 * we choose not to support this flag. Please note:
2105 * - This feature is currently used only by dpctl commands with
2107 * - Should the need arise, this operation can be implemented
2108 * by keeping a base value (to be update here) for each
2109 * counter, and subtracting it before outputting the stats */
2113 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
2114 } else if (put
->flags
& DPIF_FP_CREATE
) {
2117 /* Overlapping flow. */
2121 ovs_mutex_unlock(&pmd
->flow_mutex
);
2122 dp_netdev_pmd_unref(pmd
);
2128 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
2130 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2131 struct dp_netdev_flow
*netdev_flow
;
2132 struct dp_netdev_pmd_thread
*pmd
;
2133 unsigned pmd_id
= del
->pmd_id
== PMD_ID_NULL
2134 ? NON_PMD_CORE_ID
: del
->pmd_id
;
2137 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2142 ovs_mutex_lock(&pmd
->flow_mutex
);
2143 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, del
->ufid
, del
->key
,
2147 get_dpif_flow_stats(netdev_flow
, del
->stats
);
2149 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
2153 ovs_mutex_unlock(&pmd
->flow_mutex
);
2154 dp_netdev_pmd_unref(pmd
);
2159 struct dpif_netdev_flow_dump
{
2160 struct dpif_flow_dump up
;
2161 struct cmap_position poll_thread_pos
;
2162 struct cmap_position flow_pos
;
2163 struct dp_netdev_pmd_thread
*cur_pmd
;
2165 struct ovs_mutex mutex
;
2168 static struct dpif_netdev_flow_dump
*
2169 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
2171 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
2174 static struct dpif_flow_dump
*
2175 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
, bool terse
)
2177 struct dpif_netdev_flow_dump
*dump
;
2179 dump
= xzalloc(sizeof *dump
);
2180 dpif_flow_dump_init(&dump
->up
, dpif_
);
2181 dump
->up
.terse
= terse
;
2182 ovs_mutex_init(&dump
->mutex
);
2188 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
2190 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2192 ovs_mutex_destroy(&dump
->mutex
);
2197 struct dpif_netdev_flow_dump_thread
{
2198 struct dpif_flow_dump_thread up
;
2199 struct dpif_netdev_flow_dump
*dump
;
2200 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
2201 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
2204 static struct dpif_netdev_flow_dump_thread
*
2205 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
2207 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
2210 static struct dpif_flow_dump_thread
*
2211 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
2213 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2214 struct dpif_netdev_flow_dump_thread
*thread
;
2216 thread
= xmalloc(sizeof *thread
);
2217 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
2218 thread
->dump
= dump
;
2223 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
2225 struct dpif_netdev_flow_dump_thread
*thread
2226 = dpif_netdev_flow_dump_thread_cast(thread_
);
2232 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
2233 struct dpif_flow
*flows
, int max_flows
)
2235 struct dpif_netdev_flow_dump_thread
*thread
2236 = dpif_netdev_flow_dump_thread_cast(thread_
);
2237 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
2238 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
2242 ovs_mutex_lock(&dump
->mutex
);
2243 if (!dump
->status
) {
2244 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
2245 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
2246 struct dp_netdev_pmd_thread
*pmd
= dump
->cur_pmd
;
2247 int flow_limit
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
2249 /* First call to dump_next(), extracts the first pmd thread.
2250 * If there is no pmd thread, returns immediately. */
2252 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2254 ovs_mutex_unlock(&dump
->mutex
);
2261 for (n_flows
= 0; n_flows
< flow_limit
; n_flows
++) {
2262 struct cmap_node
*node
;
2264 node
= cmap_next_position(&pmd
->flow_table
, &dump
->flow_pos
);
2268 netdev_flows
[n_flows
] = CONTAINER_OF(node
,
2269 struct dp_netdev_flow
,
2272 /* When finishing dumping the current pmd thread, moves to
2274 if (n_flows
< flow_limit
) {
2275 memset(&dump
->flow_pos
, 0, sizeof dump
->flow_pos
);
2276 dp_netdev_pmd_unref(pmd
);
2277 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2283 /* Keeps the reference to next caller. */
2284 dump
->cur_pmd
= pmd
;
2286 /* If the current dump is empty, do not exit the loop, since the
2287 * remaining pmds could have flows to be dumped. Just dumps again
2288 * on the new 'pmd'. */
2291 ovs_mutex_unlock(&dump
->mutex
);
2293 for (i
= 0; i
< n_flows
; i
++) {
2294 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
2295 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
2296 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
2297 struct dpif_flow
*f
= &flows
[i
];
2298 struct ofpbuf key
, mask
;
2300 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
2301 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
2302 dp_netdev_flow_to_dpif_flow(netdev_flow
, &key
, &mask
, f
,
2310 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
2311 OVS_NO_THREAD_SAFETY_ANALYSIS
2313 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2314 struct dp_netdev_pmd_thread
*pmd
;
2315 struct dp_packet
*pp
;
2317 if (dp_packet_size(execute
->packet
) < ETH_HEADER_LEN
||
2318 dp_packet_size(execute
->packet
) > UINT16_MAX
) {
2322 /* Tries finding the 'pmd'. If NULL is returned, that means
2323 * the current thread is a non-pmd thread and should use
2324 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
2325 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
2327 pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
2330 /* If the current thread is non-pmd thread, acquires
2331 * the 'non_pmd_mutex'. */
2332 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2333 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2334 ovs_mutex_lock(&dp
->port_mutex
);
2337 pp
= execute
->packet
;
2338 dp_netdev_execute_actions(pmd
, &pp
, 1, false, execute
->actions
,
2339 execute
->actions_len
);
2340 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2341 dp_netdev_pmd_unref(pmd
);
2342 ovs_mutex_unlock(&dp
->port_mutex
);
2343 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2350 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
2354 for (i
= 0; i
< n_ops
; i
++) {
2355 struct dpif_op
*op
= ops
[i
];
2358 case DPIF_OP_FLOW_PUT
:
2359 op
->error
= dpif_netdev_flow_put(dpif
, &op
->u
.flow_put
);
2362 case DPIF_OP_FLOW_DEL
:
2363 op
->error
= dpif_netdev_flow_del(dpif
, &op
->u
.flow_del
);
2366 case DPIF_OP_EXECUTE
:
2367 op
->error
= dpif_netdev_execute(dpif
, &op
->u
.execute
);
2370 case DPIF_OP_FLOW_GET
:
2371 op
->error
= dpif_netdev_flow_get(dpif
, &op
->u
.flow_get
);
2377 /* Returns true if the configuration for rx queues or cpu mask
2380 pmd_config_changed(const struct dp_netdev
*dp
, size_t rxqs
, const char *cmask
)
2382 if (dp
->n_dpdk_rxqs
!= rxqs
) {
2385 if (dp
->pmd_cmask
!= NULL
&& cmask
!= NULL
) {
2386 return strcmp(dp
->pmd_cmask
, cmask
);
2388 return (dp
->pmd_cmask
!= NULL
|| cmask
!= NULL
);
2393 /* Resets pmd threads if the configuration for 'rxq's or cpu mask changes. */
2395 dpif_netdev_pmd_set(struct dpif
*dpif
, unsigned int n_rxqs
, const char *cmask
)
2397 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2399 if (pmd_config_changed(dp
, n_rxqs
, cmask
)) {
2400 struct dp_netdev_port
*port
;
2402 dp_netdev_destroy_all_pmds(dp
);
2404 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2405 if (netdev_is_pmd(port
->netdev
)) {
2408 /* Closes the existing 'rxq's. */
2409 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2410 netdev_rxq_close(port
->rxq
[i
]);
2411 port
->rxq
[i
] = NULL
;
2414 /* Sets the new rx queue config. */
2415 err
= netdev_set_multiq(port
->netdev
,
2416 ovs_numa_get_n_cores() + 1,
2418 if (err
&& (err
!= EOPNOTSUPP
)) {
2419 VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
2420 " %u", netdev_get_name(port
->netdev
),
2425 /* If the set_multiq() above succeeds, reopens the 'rxq's. */
2426 port
->rxq
= xrealloc(port
->rxq
, sizeof *port
->rxq
2427 * netdev_n_rxq(port
->netdev
));
2428 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2429 netdev_rxq_open(port
->netdev
, &port
->rxq
[i
], i
);
2433 dp
->n_dpdk_rxqs
= n_rxqs
;
2435 /* Reconfigures the cpu mask. */
2436 ovs_numa_set_cpu_mask(cmask
);
2437 free(dp
->pmd_cmask
);
2438 dp
->pmd_cmask
= cmask
? xstrdup(cmask
) : NULL
;
2440 /* Restores the non-pmd. */
2441 dp_netdev_set_nonpmd(dp
);
2442 /* Restores all pmd threads. */
2443 dp_netdev_reset_pmd_threads(dp
);
2450 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2451 uint32_t queue_id
, uint32_t *priority
)
2453 *priority
= queue_id
;
2458 /* Creates and returns a new 'struct dp_netdev_actions', whose actions are
2459 * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */
2460 struct dp_netdev_actions
*
2461 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
2463 struct dp_netdev_actions
*netdev_actions
;
2465 netdev_actions
= xmalloc(sizeof *netdev_actions
+ size
);
2466 memcpy(netdev_actions
->actions
, actions
, size
);
2467 netdev_actions
->size
= size
;
2469 return netdev_actions
;
2472 struct dp_netdev_actions
*
2473 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
2475 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
2479 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
2484 static inline unsigned long long
2485 cycles_counter(void)
2488 return rte_get_tsc_cycles();
2494 /* Fake mutex to make sure that the calls to cycles_count_* are balanced */
2495 extern struct ovs_mutex cycles_counter_fake_mutex
;
2497 /* Start counting cycles. Must be followed by 'cycles_count_end()' */
2499 cycles_count_start(struct dp_netdev_pmd_thread
*pmd
)
2500 OVS_ACQUIRES(&cycles_counter_fake_mutex
)
2501 OVS_NO_THREAD_SAFETY_ANALYSIS
2503 pmd
->last_cycles
= cycles_counter();
2506 /* Stop counting cycles and add them to the counter 'type' */
2508 cycles_count_end(struct dp_netdev_pmd_thread
*pmd
,
2509 enum pmd_cycles_counter_type type
)
2510 OVS_RELEASES(&cycles_counter_fake_mutex
)
2511 OVS_NO_THREAD_SAFETY_ANALYSIS
2513 unsigned long long interval
= cycles_counter() - pmd
->last_cycles
;
2515 non_atomic_ullong_add(&pmd
->cycles
.n
[type
], interval
);
2519 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
2520 struct dp_netdev_port
*port
,
2521 struct netdev_rxq
*rxq
)
2523 struct dp_packet
*packets
[NETDEV_MAX_BURST
];
2526 cycles_count_start(pmd
);
2527 error
= netdev_rxq_recv(rxq
, packets
, &cnt
);
2528 cycles_count_end(pmd
, PMD_CYCLES_POLLING
);
2532 *recirc_depth_get() = 0;
2534 /* XXX: initialize md in netdev implementation. */
2535 for (i
= 0; i
< cnt
; i
++) {
2536 packets
[i
]->md
= port
->md
;
2538 cycles_count_start(pmd
);
2539 dp_netdev_input(pmd
, packets
, cnt
);
2540 cycles_count_end(pmd
, PMD_CYCLES_PROCESSING
);
2541 } else if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
2542 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2544 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
2545 netdev_get_name(port
->netdev
), ovs_strerror(error
));
2549 /* Return true if needs to revalidate datapath flows. */
2551 dpif_netdev_run(struct dpif
*dpif
)
2553 struct dp_netdev_port
*port
;
2554 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2555 struct dp_netdev_pmd_thread
*non_pmd
= dp_netdev_get_pmd(dp
,
2557 uint64_t new_tnl_seq
;
2559 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2560 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2561 if (!netdev_is_pmd(port
->netdev
)) {
2564 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2565 dp_netdev_process_rxq_port(non_pmd
, port
, port
->rxq
[i
]);
2569 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2570 dp_netdev_pmd_unref(non_pmd
);
2572 tnl_arp_cache_run();
2573 new_tnl_seq
= seq_read(tnl_conf_seq
);
2575 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
2576 dp
->last_tnl_conf_seq
= new_tnl_seq
;
2583 dpif_netdev_wait(struct dpif
*dpif
)
2585 struct dp_netdev_port
*port
;
2586 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2588 ovs_mutex_lock(&dp_netdev_mutex
);
2589 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2590 if (!netdev_is_pmd(port
->netdev
)) {
2593 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2594 netdev_rxq_wait(port
->rxq
[i
]);
2598 ovs_mutex_unlock(&dp_netdev_mutex
);
2599 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
2603 struct dp_netdev_port
*port
;
2604 struct netdev_rxq
*rx
;
2608 pmd_load_queues(struct dp_netdev_pmd_thread
*pmd
,
2609 struct rxq_poll
**ppoll_list
, int poll_cnt
)
2611 struct rxq_poll
*poll_list
= *ppoll_list
;
2612 struct dp_netdev_port
*port
;
2613 int n_pmds_on_numa
, index
, i
;
2615 /* Simple scheduler for netdev rx polling. */
2616 for (i
= 0; i
< poll_cnt
; i
++) {
2617 port_unref(poll_list
[i
].port
);
2621 n_pmds_on_numa
= get_n_pmd_threads_on_numa(pmd
->dp
, pmd
->numa_id
);
2624 CMAP_FOR_EACH (port
, node
, &pmd
->dp
->ports
) {
2625 /* Calls port_try_ref() to prevent the main thread
2626 * from deleting the port. */
2627 if (port_try_ref(port
)) {
2628 if (netdev_is_pmd(port
->netdev
)
2629 && netdev_get_numa_id(port
->netdev
) == pmd
->numa_id
) {
2632 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2633 if ((index
% n_pmds_on_numa
) == pmd
->index
) {
2634 poll_list
= xrealloc(poll_list
,
2635 sizeof *poll_list
* (poll_cnt
+ 1));
2638 poll_list
[poll_cnt
].port
= port
;
2639 poll_list
[poll_cnt
].rx
= port
->rxq
[i
];
2645 /* Unrefs the port_try_ref(). */
2650 *ppoll_list
= poll_list
;
2655 pmd_thread_main(void *f_
)
2657 struct dp_netdev_pmd_thread
*pmd
= f_
;
2658 unsigned int lc
= 0;
2659 struct rxq_poll
*poll_list
;
2660 unsigned int port_seq
= PMD_INITIAL_SEQ
;
2667 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
2668 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
2669 pmd_thread_setaffinity_cpu(pmd
->core_id
);
2671 emc_cache_init(&pmd
->flow_cache
);
2672 poll_cnt
= pmd_load_queues(pmd
, &poll_list
, poll_cnt
);
2674 /* Signal here to make sure the pmd finishes
2675 * reloading the updated configuration. */
2676 dp_netdev_pmd_reload_done(pmd
);
2681 for (i
= 0; i
< poll_cnt
; i
++) {
2682 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].port
, poll_list
[i
].rx
);
2690 emc_cache_slow_sweep(&pmd
->flow_cache
);
2693 atomic_read_relaxed(&pmd
->change_seq
, &seq
);
2694 if (seq
!= port_seq
) {
2701 emc_cache_uninit(&pmd
->flow_cache
);
2703 if (!latch_is_set(&pmd
->exit_latch
)){
2707 for (i
= 0; i
< poll_cnt
; i
++) {
2708 port_unref(poll_list
[i
].port
);
2711 dp_netdev_pmd_reload_done(pmd
);
2718 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
2719 OVS_ACQUIRES(dp
->upcall_rwlock
)
2721 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
2725 dpif_netdev_disable_upcall(struct dpif
*dpif
)
2726 OVS_NO_THREAD_SAFETY_ANALYSIS
2728 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2729 dp_netdev_disable_upcall(dp
);
2733 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
2734 OVS_RELEASES(dp
->upcall_rwlock
)
2736 fat_rwlock_unlock(&dp
->upcall_rwlock
);
2740 dpif_netdev_enable_upcall(struct dpif
*dpif
)
2741 OVS_NO_THREAD_SAFETY_ANALYSIS
2743 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2744 dp_netdev_enable_upcall(dp
);
2748 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
2750 ovs_mutex_lock(&pmd
->cond_mutex
);
2751 xpthread_cond_signal(&pmd
->cond
);
2752 ovs_mutex_unlock(&pmd
->cond_mutex
);
2755 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
2756 * the pointer if succeeds, otherwise, NULL.
2758 * Caller must unrefs the returned reference. */
2759 static struct dp_netdev_pmd_thread
*
2760 dp_netdev_get_pmd(struct dp_netdev
*dp
, unsigned core_id
)
2762 struct dp_netdev_pmd_thread
*pmd
;
2763 const struct cmap_node
*pnode
;
2765 pnode
= cmap_find(&dp
->poll_threads
, hash_int(core_id
, 0));
2769 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
2771 return dp_netdev_pmd_try_ref(pmd
) ? pmd
: NULL
;
2774 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
2776 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
2778 struct dp_netdev_pmd_thread
*non_pmd
;
2780 non_pmd
= xzalloc(sizeof *non_pmd
);
2781 dp_netdev_configure_pmd(non_pmd
, dp
, 0, NON_PMD_CORE_ID
,
2785 /* Caller must have valid pointer to 'pmd'. */
2787 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
)
2789 return ovs_refcount_try_ref_rcu(&pmd
->ref_cnt
);
2793 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
)
2795 if (pmd
&& ovs_refcount_unref(&pmd
->ref_cnt
) == 1) {
2796 ovsrcu_postpone(dp_netdev_destroy_pmd
, pmd
);
2800 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
2801 * fails, keeps checking for next node until reaching the end of cmap.
2803 * Caller must unrefs the returned reference. */
2804 static struct dp_netdev_pmd_thread
*
2805 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
)
2807 struct dp_netdev_pmd_thread
*next
;
2810 struct cmap_node
*node
;
2812 node
= cmap_next_position(&dp
->poll_threads
, pos
);
2813 next
= node
? CONTAINER_OF(node
, struct dp_netdev_pmd_thread
, node
)
2815 } while (next
&& !dp_netdev_pmd_try_ref(next
));
2821 core_id_to_qid(unsigned core_id
)
2823 if (core_id
!= NON_PMD_CORE_ID
) {
2826 return ovs_numa_get_n_cores();
2830 /* Configures the 'pmd' based on the input argument. */
2832 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
2833 int index
, unsigned core_id
, int numa_id
)
2837 pmd
->core_id
= core_id
;
2838 pmd
->tx_qid
= core_id_to_qid(core_id
);
2839 pmd
->numa_id
= numa_id
;
2841 ovs_refcount_init(&pmd
->ref_cnt
);
2842 latch_init(&pmd
->exit_latch
);
2843 atomic_init(&pmd
->change_seq
, PMD_INITIAL_SEQ
);
2844 xpthread_cond_init(&pmd
->cond
, NULL
);
2845 ovs_mutex_init(&pmd
->cond_mutex
);
2846 ovs_mutex_init(&pmd
->flow_mutex
);
2847 dpcls_init(&pmd
->cls
);
2848 cmap_init(&pmd
->flow_table
);
2849 /* init the 'flow_cache' since there is no
2850 * actual thread created for NON_PMD_CORE_ID. */
2851 if (core_id
== NON_PMD_CORE_ID
) {
2852 emc_cache_init(&pmd
->flow_cache
);
2854 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
2855 hash_int(core_id
, 0));
2859 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
)
2861 dp_netdev_pmd_flow_flush(pmd
);
2862 dpcls_destroy(&pmd
->cls
);
2863 cmap_destroy(&pmd
->flow_table
);
2864 ovs_mutex_destroy(&pmd
->flow_mutex
);
2865 latch_destroy(&pmd
->exit_latch
);
2866 xpthread_cond_destroy(&pmd
->cond
);
2867 ovs_mutex_destroy(&pmd
->cond_mutex
);
2871 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
2872 * and unrefs the struct. */
2874 dp_netdev_del_pmd(struct dp_netdev_pmd_thread
*pmd
)
2876 /* Uninit the 'flow_cache' since there is
2877 * no actual thread uninit it for NON_PMD_CORE_ID. */
2878 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2879 emc_cache_uninit(&pmd
->flow_cache
);
2881 latch_set(&pmd
->exit_latch
);
2882 dp_netdev_reload_pmd__(pmd
);
2883 ovs_numa_unpin_core(pmd
->core_id
);
2884 xpthread_join(pmd
->thread
, NULL
);
2886 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
2887 dp_netdev_pmd_unref(pmd
);
2890 /* Destroys all pmd threads. */
2892 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
)
2894 struct dp_netdev_pmd_thread
*pmd
;
2896 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2897 dp_netdev_del_pmd(pmd
);
2901 /* Deletes all pmd threads on numa node 'numa_id'. */
2903 dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2905 struct dp_netdev_pmd_thread
*pmd
;
2907 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2908 if (pmd
->numa_id
== numa_id
) {
2909 dp_netdev_del_pmd(pmd
);
2914 /* Checks the numa node id of 'netdev' and starts pmd threads for
2917 dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2921 if (!ovs_numa_numa_id_is_valid(numa_id
)) {
2922 VLOG_ERR("Cannot create pmd threads due to numa id (%d)"
2923 "invalid", numa_id
);
2927 n_pmds
= get_n_pmd_threads_on_numa(dp
, numa_id
);
2929 /* If there are already pmd threads created for the numa node
2930 * in which 'netdev' is on, do nothing. Else, creates the
2931 * pmd threads for the numa node. */
2933 int can_have
, n_unpinned
, i
;
2935 n_unpinned
= ovs_numa_get_n_unpinned_cores_on_numa(numa_id
);
2937 VLOG_ERR("Cannot create pmd threads due to out of unpinned "
2938 "cores on numa node");
2942 /* If cpu mask is specified, uses all unpinned cores, otherwise
2943 * tries creating NR_PMD_THREADS pmd threads. */
2944 can_have
= dp
->pmd_cmask
? n_unpinned
: MIN(n_unpinned
, NR_PMD_THREADS
);
2945 for (i
= 0; i
< can_have
; i
++) {
2946 struct dp_netdev_pmd_thread
*pmd
= xzalloc(sizeof *pmd
);
2947 unsigned core_id
= ovs_numa_get_unpinned_core_on_numa(numa_id
);
2949 dp_netdev_configure_pmd(pmd
, dp
, i
, core_id
, numa_id
);
2950 /* Each thread will distribute all devices rx-queues among
2952 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
2954 VLOG_INFO("Created %d pmd threads on numa node %d", can_have
, numa_id
);
2959 /* Called after pmd threads config change. Restarts pmd threads with
2960 * new configuration. */
2962 dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
2964 struct dp_netdev_port
*port
;
2966 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2967 if (netdev_is_pmd(port
->netdev
)) {
2968 int numa_id
= netdev_get_numa_id(port
->netdev
);
2970 dp_netdev_set_pmds_on_numa(dp
, numa_id
);
2976 dpif_netdev_get_datapath_version(void)
2978 return xstrdup("<built-in>");
2982 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
, int cnt
, int size
,
2983 uint16_t tcp_flags
, long long now
)
2987 atomic_store_relaxed(&netdev_flow
->stats
.used
, now
);
2988 non_atomic_ullong_add(&netdev_flow
->stats
.packet_count
, cnt
);
2989 non_atomic_ullong_add(&netdev_flow
->stats
.byte_count
, size
);
2990 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
2992 atomic_store_relaxed(&netdev_flow
->stats
.tcp_flags
, flags
);
2996 dp_netdev_count_packet(struct dp_netdev_pmd_thread
*pmd
,
2997 enum dp_stat_type type
, int cnt
)
2999 non_atomic_ullong_add(&pmd
->stats
.n
[type
], cnt
);
3003 dp_netdev_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet_
,
3004 struct flow
*flow
, struct flow_wildcards
*wc
, ovs_u128
*ufid
,
3005 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
3006 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
3008 struct dp_netdev
*dp
= pmd
->dp
;
3010 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
3014 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
3015 struct ds ds
= DS_EMPTY_INITIALIZER
;
3019 ofpbuf_init(&key
, 0);
3020 odp_flow_key_from_flow(&key
, flow
, &wc
->masks
, flow
->in_port
.odp_port
,
3022 packet_str
= ofp_packet_to_string(dp_packet_data(packet_
),
3023 dp_packet_size(packet_
));
3025 odp_flow_key_format(key
.data
, key
.size
, &ds
);
3027 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
3028 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
3030 ofpbuf_uninit(&key
);
3036 return dp
->upcall_cb(packet_
, flow
, ufid
, pmd
->core_id
, type
, userdata
,
3037 actions
, wc
, put_actions
, dp
->upcall_aux
);
3040 static inline uint32_t
3041 dpif_netdev_packet_get_rss_hash(struct dp_packet
*packet
,
3042 const struct miniflow
*mf
)
3044 uint32_t hash
, recirc_depth
;
3046 hash
= dp_packet_get_rss_hash(packet
);
3047 if (OVS_UNLIKELY(!hash
)) {
3048 hash
= miniflow_hash_5tuple(mf
, 0);
3049 dp_packet_set_rss_hash(packet
, hash
);
3052 /* The RSS hash must account for the recirculation depth to avoid
3053 * collisions in the exact match cache */
3054 recirc_depth
= *recirc_depth_get_unsafe();
3055 if (OVS_UNLIKELY(recirc_depth
)) {
3056 hash
= hash_finish(hash
, recirc_depth
);
3057 dp_packet_set_rss_hash(packet
, hash
);
3062 struct packet_batch
{
3063 unsigned int packet_count
;
3064 unsigned int byte_count
;
3067 struct dp_netdev_flow
*flow
;
3069 struct dp_packet
*packets
[NETDEV_MAX_BURST
];
3073 packet_batch_update(struct packet_batch
*batch
, struct dp_packet
*packet
,
3074 const struct miniflow
*mf
)
3076 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
3077 batch
->packets
[batch
->packet_count
++] = packet
;
3078 batch
->byte_count
+= dp_packet_size(packet
);
3082 packet_batch_init(struct packet_batch
*batch
, struct dp_netdev_flow
*flow
)
3084 flow
->batch
= batch
;
3087 batch
->packet_count
= 0;
3088 batch
->byte_count
= 0;
3089 batch
->tcp_flags
= 0;
3093 packet_batch_execute(struct packet_batch
*batch
,
3094 struct dp_netdev_pmd_thread
*pmd
,
3097 struct dp_netdev_actions
*actions
;
3098 struct dp_netdev_flow
*flow
= batch
->flow
;
3100 dp_netdev_flow_used(flow
, batch
->packet_count
, batch
->byte_count
,
3101 batch
->tcp_flags
, now
);
3103 actions
= dp_netdev_flow_get_actions(flow
);
3105 dp_netdev_execute_actions(pmd
, batch
->packets
, batch
->packet_count
, true,
3106 actions
->actions
, actions
->size
);
3110 dp_netdev_queue_batches(struct dp_packet
*pkt
,
3111 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
3112 struct packet_batch
*batches
, size_t *n_batches
)
3114 struct packet_batch
*batch
= flow
->batch
;
3116 if (OVS_LIKELY(batch
)) {
3117 packet_batch_update(batch
, pkt
, mf
);
3121 batch
= &batches
[(*n_batches
)++];
3122 packet_batch_init(batch
, flow
);
3123 packet_batch_update(batch
, pkt
, mf
);
3127 dp_packet_swap(struct dp_packet
**a
, struct dp_packet
**b
)
3129 struct dp_packet
*tmp
= *a
;
3134 /* Try to process all ('cnt') the 'packets' using only the exact match cache
3135 * 'flow_cache'. If a flow is not found for a packet 'packets[i]', the
3136 * miniflow is copied into 'keys' and the packet pointer is moved at the
3137 * beginning of the 'packets' array.
3139 * The function returns the number of packets that needs to be processed in the
3140 * 'packets' array (they have been moved to the beginning of the vector).
3142 static inline size_t
3143 emc_processing(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
**packets
,
3144 size_t cnt
, struct netdev_flow_key
*keys
,
3145 struct packet_batch batches
[], size_t *n_batches
)
3147 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
3148 struct netdev_flow_key key
;
3149 size_t i
, notfound_cnt
= 0;
3151 miniflow_initialize(&key
.mf
, key
.buf
);
3152 for (i
= 0; i
< cnt
; i
++) {
3153 struct dp_netdev_flow
*flow
;
3155 if (OVS_UNLIKELY(dp_packet_size(packets
[i
]) < ETH_HEADER_LEN
)) {
3156 dp_packet_delete(packets
[i
]);
3160 miniflow_extract(packets
[i
], &key
.mf
);
3161 key
.len
= 0; /* Not computed yet. */
3162 key
.hash
= dpif_netdev_packet_get_rss_hash(packets
[i
], &key
.mf
);
3164 flow
= emc_lookup(flow_cache
, &key
);
3165 if (OVS_LIKELY(flow
)) {
3166 dp_netdev_queue_batches(packets
[i
], flow
, &key
.mf
, batches
,
3169 if (i
!= notfound_cnt
) {
3170 dp_packet_swap(&packets
[i
], &packets
[notfound_cnt
]);
3173 keys
[notfound_cnt
++] = key
;
3177 dp_netdev_count_packet(pmd
, DP_STAT_EXACT_HIT
, cnt
- notfound_cnt
);
3179 return notfound_cnt
;
3183 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
3184 struct dp_packet
**packets
, size_t cnt
,
3185 struct netdev_flow_key
*keys
,
3186 struct packet_batch batches
[], size_t *n_batches
)
3188 #if !defined(__CHECKER__) && !defined(_WIN32)
3189 const size_t PKT_ARRAY_SIZE
= cnt
;
3191 /* Sparse or MSVC doesn't like variable length array. */
3192 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
3194 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
3195 struct dp_netdev
*dp
= pmd
->dp
;
3196 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
3197 int miss_cnt
= 0, lost_cnt
= 0;
3201 for (i
= 0; i
< cnt
; i
++) {
3202 /* Key length is needed in all the cases, hash computed on demand. */
3203 keys
[i
].len
= netdev_flow_key_size(count_1bits(keys
[i
].mf
.map
));
3205 any_miss
= !dpcls_lookup(&pmd
->cls
, keys
, rules
, cnt
);
3206 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
3207 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
3208 struct ofpbuf actions
, put_actions
;
3211 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
3212 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
3214 for (i
= 0; i
< cnt
; i
++) {
3215 struct dp_netdev_flow
*netdev_flow
;
3216 struct ofpbuf
*add_actions
;
3220 if (OVS_LIKELY(rules
[i
])) {
3224 /* It's possible that an earlier slow path execution installed
3225 * a rule covering this flow. In this case, it's a lot cheaper
3226 * to catch it here than execute a miss. */
3227 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
]);
3229 rules
[i
] = &netdev_flow
->cr
;
3235 miniflow_expand(&keys
[i
].mf
, &match
.flow
);
3237 ofpbuf_clear(&actions
);
3238 ofpbuf_clear(&put_actions
);
3240 dpif_flow_hash(dp
->dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
3241 error
= dp_netdev_upcall(pmd
, packets
[i
], &match
.flow
, &match
.wc
,
3242 &ufid
, DPIF_UC_MISS
, NULL
, &actions
,
3244 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
3245 dp_packet_delete(packets
[i
]);
3250 /* We can't allow the packet batching in the next loop to execute
3251 * the actions. Otherwise, if there are any slow path actions,
3252 * we'll send the packet up twice. */
3253 dp_netdev_execute_actions(pmd
, &packets
[i
], 1, true,
3254 actions
.data
, actions
.size
);
3256 add_actions
= put_actions
.size
? &put_actions
: &actions
;
3257 if (OVS_LIKELY(error
!= ENOSPC
)) {
3258 /* XXX: There's a race window where a flow covering this packet
3259 * could have already been installed since we last did the flow
3260 * lookup before upcall. This could be solved by moving the
3261 * mutex lock outside the loop, but that's an awful long time
3262 * to be locking everyone out of making flow installs. If we
3263 * move to a per-core classifier, it would be reasonable. */
3264 ovs_mutex_lock(&pmd
->flow_mutex
);
3265 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
]);
3266 if (OVS_LIKELY(!netdev_flow
)) {
3267 netdev_flow
= dp_netdev_flow_add(pmd
, &match
, &ufid
,
3271 ovs_mutex_unlock(&pmd
->flow_mutex
);
3273 emc_insert(flow_cache
, &keys
[i
], netdev_flow
);
3277 ofpbuf_uninit(&actions
);
3278 ofpbuf_uninit(&put_actions
);
3279 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3280 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
3281 } else if (OVS_UNLIKELY(any_miss
)) {
3282 for (i
= 0; i
< cnt
; i
++) {
3283 if (OVS_UNLIKELY(!rules
[i
])) {
3284 dp_packet_delete(packets
[i
]);
3291 for (i
= 0; i
< cnt
; i
++) {
3292 struct dp_packet
*packet
= packets
[i
];
3293 struct dp_netdev_flow
*flow
;
3295 if (OVS_UNLIKELY(!rules
[i
])) {
3299 flow
= dp_netdev_flow_cast(rules
[i
]);
3301 emc_insert(flow_cache
, &keys
[i
], flow
);
3302 dp_netdev_queue_batches(packet
, flow
, &keys
[i
].mf
, batches
, n_batches
);
3305 dp_netdev_count_packet(pmd
, DP_STAT_MASKED_HIT
, cnt
- miss_cnt
);
3306 dp_netdev_count_packet(pmd
, DP_STAT_MISS
, miss_cnt
);
3307 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
3311 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
3312 struct dp_packet
**packets
, int cnt
)
3314 #if !defined(__CHECKER__) && !defined(_WIN32)
3315 const size_t PKT_ARRAY_SIZE
= cnt
;
3317 /* Sparse or MSVC doesn't like variable length array. */
3318 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
3320 struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
3321 struct packet_batch batches
[PKT_ARRAY_SIZE
];
3322 long long now
= time_msec();
3323 size_t newcnt
, n_batches
, i
;
3326 newcnt
= emc_processing(pmd
, packets
, cnt
, keys
, batches
, &n_batches
);
3327 if (OVS_UNLIKELY(newcnt
)) {
3328 fast_path_processing(pmd
, packets
, newcnt
, keys
, batches
, &n_batches
);
3331 for (i
= 0; i
< n_batches
; i
++) {
3332 batches
[i
].flow
->batch
= NULL
;
3335 for (i
= 0; i
< n_batches
; i
++) {
3336 packet_batch_execute(&batches
[i
], pmd
, now
);
3340 struct dp_netdev_execute_aux
{
3341 struct dp_netdev_pmd_thread
*pmd
;
3345 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
3348 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3349 dp
->upcall_aux
= aux
;
3354 dp_netdev_drop_packets(struct dp_packet
**packets
, int cnt
, bool may_steal
)
3359 for (i
= 0; i
< cnt
; i
++) {
3360 dp_packet_delete(packets
[i
]);
3366 push_tnl_action(const struct dp_netdev
*dp
,
3367 const struct nlattr
*attr
,
3368 struct dp_packet
**packets
, int cnt
)
3370 struct dp_netdev_port
*tun_port
;
3371 const struct ovs_action_push_tnl
*data
;
3373 data
= nl_attr_get(attr
);
3375 tun_port
= dp_netdev_lookup_port(dp
, u32_to_odp(data
->tnl_port
));
3379 netdev_push_header(tun_port
->netdev
, packets
, cnt
, data
);
3385 dp_netdev_clone_pkt_batch(struct dp_packet
**dst_pkts
,
3386 struct dp_packet
**src_pkts
, int cnt
)
3390 for (i
= 0; i
< cnt
; i
++) {
3391 dst_pkts
[i
] = dp_packet_clone(src_pkts
[i
]);
3396 dp_execute_cb(void *aux_
, struct dp_packet
**packets
, int cnt
,
3397 const struct nlattr
*a
, bool may_steal
)
3398 OVS_NO_THREAD_SAFETY_ANALYSIS
3400 struct dp_netdev_execute_aux
*aux
= aux_
;
3401 uint32_t *depth
= recirc_depth_get();
3402 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
3403 struct dp_netdev
*dp
= pmd
->dp
;
3404 int type
= nl_attr_type(a
);
3405 struct dp_netdev_port
*p
;
3408 switch ((enum ovs_action_attr
)type
) {
3409 case OVS_ACTION_ATTR_OUTPUT
:
3410 p
= dp_netdev_lookup_port(dp
, u32_to_odp(nl_attr_get_u32(a
)));
3411 if (OVS_LIKELY(p
)) {
3412 netdev_send(p
->netdev
, pmd
->tx_qid
, packets
, cnt
, may_steal
);
3417 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
3418 if (*depth
< MAX_RECIRC_DEPTH
) {
3419 struct dp_packet
*tnl_pkt
[NETDEV_MAX_BURST
];
3423 dp_netdev_clone_pkt_batch(tnl_pkt
, packets
, cnt
);
3427 err
= push_tnl_action(dp
, a
, packets
, cnt
);
3430 dp_netdev_input(pmd
, packets
, cnt
);
3433 dp_netdev_drop_packets(tnl_pkt
, cnt
, !may_steal
);
3439 case OVS_ACTION_ATTR_TUNNEL_POP
:
3440 if (*depth
< MAX_RECIRC_DEPTH
) {
3441 odp_port_t portno
= u32_to_odp(nl_attr_get_u32(a
));
3443 p
= dp_netdev_lookup_port(dp
, portno
);
3445 struct dp_packet
*tnl_pkt
[NETDEV_MAX_BURST
];
3449 dp_netdev_clone_pkt_batch(tnl_pkt
, packets
, cnt
);
3453 err
= netdev_pop_header(p
->netdev
, packets
, cnt
);
3456 for (i
= 0; i
< cnt
; i
++) {
3457 packets
[i
]->md
.in_port
.odp_port
= portno
;
3461 dp_netdev_input(pmd
, packets
, cnt
);
3464 dp_netdev_drop_packets(tnl_pkt
, cnt
, !may_steal
);
3471 case OVS_ACTION_ATTR_USERSPACE
:
3472 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
3473 const struct nlattr
*userdata
;
3474 struct ofpbuf actions
;
3478 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
3479 ofpbuf_init(&actions
, 0);
3481 for (i
= 0; i
< cnt
; i
++) {
3484 ofpbuf_clear(&actions
);
3486 flow_extract(packets
[i
], &flow
);
3487 dpif_flow_hash(dp
->dpif
, &flow
, sizeof flow
, &ufid
);
3488 error
= dp_netdev_upcall(pmd
, packets
[i
], &flow
, NULL
, &ufid
,
3489 DPIF_UC_ACTION
, userdata
,&actions
,
3491 if (!error
|| error
== ENOSPC
) {
3492 dp_netdev_execute_actions(pmd
, &packets
[i
], 1, may_steal
,
3493 actions
.data
, actions
.size
);
3494 } else if (may_steal
) {
3495 dp_packet_delete(packets
[i
]);
3498 ofpbuf_uninit(&actions
);
3499 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3505 case OVS_ACTION_ATTR_RECIRC
:
3506 if (*depth
< MAX_RECIRC_DEPTH
) {
3507 struct dp_packet
*recirc_pkts
[NETDEV_MAX_BURST
];
3510 dp_netdev_clone_pkt_batch(recirc_pkts
, packets
, cnt
);
3511 packets
= recirc_pkts
;
3514 for (i
= 0; i
< cnt
; i
++) {
3515 packets
[i
]->md
.recirc_id
= nl_attr_get_u32(a
);
3519 dp_netdev_input(pmd
, packets
, cnt
);
3525 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
3528 case OVS_ACTION_ATTR_PUSH_VLAN
:
3529 case OVS_ACTION_ATTR_POP_VLAN
:
3530 case OVS_ACTION_ATTR_PUSH_MPLS
:
3531 case OVS_ACTION_ATTR_POP_MPLS
:
3532 case OVS_ACTION_ATTR_SET
:
3533 case OVS_ACTION_ATTR_SET_MASKED
:
3534 case OVS_ACTION_ATTR_SAMPLE
:
3535 case OVS_ACTION_ATTR_HASH
:
3536 case OVS_ACTION_ATTR_UNSPEC
:
3537 case __OVS_ACTION_ATTR_MAX
:
3541 dp_netdev_drop_packets(packets
, cnt
, may_steal
);
3545 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
3546 struct dp_packet
**packets
, int cnt
,
3548 const struct nlattr
*actions
, size_t actions_len
)
3550 struct dp_netdev_execute_aux aux
= { pmd
};
3552 odp_execute_actions(&aux
, packets
, cnt
, may_steal
, actions
,
3553 actions_len
, dp_execute_cb
);
3556 const struct dpif_class dpif_netdev_class
= {
3559 dpif_netdev_enumerate
,
3560 dpif_netdev_port_open_type
,
3563 dpif_netdev_destroy
,
3566 dpif_netdev_get_stats
,
3567 dpif_netdev_port_add
,
3568 dpif_netdev_port_del
,
3569 dpif_netdev_port_query_by_number
,
3570 dpif_netdev_port_query_by_name
,
3571 NULL
, /* port_get_pid */
3572 dpif_netdev_port_dump_start
,
3573 dpif_netdev_port_dump_next
,
3574 dpif_netdev_port_dump_done
,
3575 dpif_netdev_port_poll
,
3576 dpif_netdev_port_poll_wait
,
3577 dpif_netdev_flow_flush
,
3578 dpif_netdev_flow_dump_create
,
3579 dpif_netdev_flow_dump_destroy
,
3580 dpif_netdev_flow_dump_thread_create
,
3581 dpif_netdev_flow_dump_thread_destroy
,
3582 dpif_netdev_flow_dump_next
,
3583 dpif_netdev_operate
,
3584 NULL
, /* recv_set */
3585 NULL
, /* handlers_set */
3586 dpif_netdev_pmd_set
,
3587 dpif_netdev_queue_to_priority
,
3589 NULL
, /* recv_wait */
3590 NULL
, /* recv_purge */
3591 dpif_netdev_register_upcall_cb
,
3592 dpif_netdev_enable_upcall
,
3593 dpif_netdev_disable_upcall
,
3594 dpif_netdev_get_datapath_version
,
3598 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3599 const char *argv
[], void *aux OVS_UNUSED
)
3601 struct dp_netdev_port
*old_port
;
3602 struct dp_netdev_port
*new_port
;
3603 struct dp_netdev
*dp
;
3606 ovs_mutex_lock(&dp_netdev_mutex
);
3607 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
3608 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
3609 ovs_mutex_unlock(&dp_netdev_mutex
);
3610 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
3613 ovs_refcount_ref(&dp
->ref_cnt
);
3614 ovs_mutex_unlock(&dp_netdev_mutex
);
3616 ovs_mutex_lock(&dp
->port_mutex
);
3617 if (get_port_by_name(dp
, argv
[2], &old_port
)) {
3618 unixctl_command_reply_error(conn
, "unknown port");
3622 port_no
= u32_to_odp(atoi(argv
[3]));
3623 if (!port_no
|| port_no
== ODPP_NONE
) {
3624 unixctl_command_reply_error(conn
, "bad port number");
3627 if (dp_netdev_lookup_port(dp
, port_no
)) {
3628 unixctl_command_reply_error(conn
, "port number already in use");
3632 /* Remove old port. */
3633 cmap_remove(&dp
->ports
, &old_port
->node
, hash_port_no(old_port
->md
.in_port
.odp_port
));
3634 ovsrcu_postpone(free
, old_port
);
3636 /* Insert new port (cmap semantics mean we cannot re-insert 'old_port'). */
3637 new_port
= xmemdup(old_port
, sizeof *old_port
);
3638 new_port
->md
.in_port
.odp_port
= port_no
;
3639 cmap_insert(&dp
->ports
, &new_port
->node
, hash_port_no(port_no
));
3641 seq_change(dp
->port_seq
);
3642 unixctl_command_reply(conn
, NULL
);
3645 ovs_mutex_unlock(&dp
->port_mutex
);
3646 dp_netdev_unref(dp
);
3650 dpif_dummy_delete_port(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3651 const char *argv
[], void *aux OVS_UNUSED
)
3653 struct dp_netdev_port
*port
;
3654 struct dp_netdev
*dp
;
3656 ovs_mutex_lock(&dp_netdev_mutex
);
3657 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
3658 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
3659 ovs_mutex_unlock(&dp_netdev_mutex
);
3660 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
3663 ovs_refcount_ref(&dp
->ref_cnt
);
3664 ovs_mutex_unlock(&dp_netdev_mutex
);
3666 ovs_mutex_lock(&dp
->port_mutex
);
3667 if (get_port_by_name(dp
, argv
[2], &port
)) {
3668 unixctl_command_reply_error(conn
, "unknown port");
3669 } else if (port
->md
.in_port
.odp_port
== ODPP_LOCAL
) {
3670 unixctl_command_reply_error(conn
, "can't delete local port");
3672 do_del_port(dp
, port
);
3673 unixctl_command_reply(conn
, NULL
);
3675 ovs_mutex_unlock(&dp
->port_mutex
);
3677 dp_netdev_unref(dp
);
3681 dpif_dummy_register__(const char *type
)
3683 struct dpif_class
*class;
3685 class = xmalloc(sizeof *class);
3686 *class = dpif_netdev_class
;
3687 class->type
= xstrdup(type
);
3688 dp_register_provider(class);
3692 dpif_dummy_register(bool override
)
3699 dp_enumerate_types(&types
);
3700 SSET_FOR_EACH (type
, &types
) {
3701 if (!dp_unregister_provider(type
)) {
3702 dpif_dummy_register__(type
);
3705 sset_destroy(&types
);
3708 dpif_dummy_register__("dummy");
3710 unixctl_command_register("dpif-dummy/change-port-number",
3711 "dp port new-number",
3712 3, 3, dpif_dummy_change_port_number
, NULL
);
3713 unixctl_command_register("dpif-dummy/delete-port", "dp port",
3714 2, 2, dpif_dummy_delete_port
, NULL
);
3717 /* Datapath Classifier. */
3719 /* A set of rules that all have the same fields wildcarded. */
3720 struct dpcls_subtable
{
3721 /* The fields are only used by writers. */
3722 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
3724 /* These fields are accessed by readers. */
3725 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
3726 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
3727 /* 'mask' must be the last field, additional space is allocated here. */
3730 /* Initializes 'cls' as a classifier that initially contains no classification
3733 dpcls_init(struct dpcls
*cls
)
3735 cmap_init(&cls
->subtables_map
);
3736 pvector_init(&cls
->subtables
);
3740 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
3742 pvector_remove(&cls
->subtables
, subtable
);
3743 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
3744 subtable
->mask
.hash
);
3745 cmap_destroy(&subtable
->rules
);
3746 ovsrcu_postpone(free
, subtable
);
3749 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
3750 * caller's responsibility.
3751 * May only be called after all the readers have been terminated. */
3753 dpcls_destroy(struct dpcls
*cls
)
3756 struct dpcls_subtable
*subtable
;
3758 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
3759 dpcls_destroy_subtable(cls
, subtable
);
3761 cmap_destroy(&cls
->subtables_map
);
3762 pvector_destroy(&cls
->subtables
);
3766 static struct dpcls_subtable
*
3767 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
3769 struct dpcls_subtable
*subtable
;
3771 /* Need to add one. */
3772 subtable
= xmalloc(sizeof *subtable
3773 - sizeof subtable
->mask
.mf
+ mask
->len
);
3774 cmap_init(&subtable
->rules
);
3775 netdev_flow_key_clone(&subtable
->mask
, mask
);
3776 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
3777 pvector_insert(&cls
->subtables
, subtable
, 0);
3778 pvector_publish(&cls
->subtables
);
3783 static inline struct dpcls_subtable
*
3784 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
3786 struct dpcls_subtable
*subtable
;
3788 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
3789 &cls
->subtables_map
) {
3790 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
3794 return dpcls_create_subtable(cls
, mask
);
3797 /* Insert 'rule' into 'cls'. */
3799 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
3800 const struct netdev_flow_key
*mask
)
3802 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
3804 rule
->mask
= &subtable
->mask
;
3805 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
3808 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
3810 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
3812 struct dpcls_subtable
*subtable
;
3814 ovs_assert(rule
->mask
);
3816 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
3818 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
3820 dpcls_destroy_subtable(cls
, subtable
);
3821 pvector_publish(&cls
->subtables
);
3825 /* Returns true if 'target' satisifies 'key' in 'mask', that is, if each 1-bit
3826 * in 'mask' the values in 'key' and 'target' are the same.
3828 * Note: 'key' and 'mask' have the same mask, and 'key' is already masked. */
3830 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
3831 const struct netdev_flow_key
*target
)
3833 const uint64_t *keyp
= rule
->flow
.mf
.inline_values
;
3834 const uint64_t *maskp
= rule
->mask
->mf
.inline_values
;
3835 uint64_t target_u64
;
3837 NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u64
, target
, rule
->flow
.mf
.map
) {
3838 if (OVS_UNLIKELY((target_u64
& *maskp
++) != *keyp
++)) {
3845 /* For each miniflow in 'flows' performs a classifier lookup writing the result
3846 * into the corresponding slot in 'rules'. If a particular entry in 'flows' is
3847 * NULL it is skipped.
3849 * This function is optimized for use in the userspace datapath and therefore
3850 * does not implement a lot of features available in the standard
3851 * classifier_lookup() function. Specifically, it does not implement
3852 * priorities, instead returning any rule which matches the flow.
3854 * Returns true if all flows found a corresponding rule. */
3856 dpcls_lookup(const struct dpcls
*cls
, const struct netdev_flow_key keys
[],
3857 struct dpcls_rule
**rules
, const size_t cnt
)
3859 /* The batch size 16 was experimentally found faster than 8 or 32. */
3860 typedef uint16_t map_type
;
3861 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
3863 #if !defined(__CHECKER__) && !defined(_WIN32)
3864 const int N_MAPS
= DIV_ROUND_UP(cnt
, MAP_BITS
);
3866 enum { N_MAPS
= DIV_ROUND_UP(NETDEV_MAX_BURST
, MAP_BITS
) };
3868 map_type maps
[N_MAPS
];
3869 struct dpcls_subtable
*subtable
;
3871 memset(maps
, 0xff, sizeof maps
);
3872 if (cnt
% MAP_BITS
) {
3873 maps
[N_MAPS
- 1] >>= MAP_BITS
- cnt
% MAP_BITS
; /* Clear extra bits. */
3875 memset(rules
, 0, cnt
* sizeof *rules
);
3877 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
3878 const struct netdev_flow_key
*mkeys
= keys
;
3879 struct dpcls_rule
**mrules
= rules
;
3880 map_type remains
= 0;
3883 BUILD_ASSERT_DECL(sizeof remains
== sizeof *maps
);
3885 for (m
= 0; m
< N_MAPS
; m
++, mkeys
+= MAP_BITS
, mrules
+= MAP_BITS
) {
3886 uint32_t hashes
[MAP_BITS
];
3887 const struct cmap_node
*nodes
[MAP_BITS
];
3888 unsigned long map
= maps
[m
];
3892 continue; /* Skip empty maps. */
3895 /* Compute hashes for the remaining keys. */
3896 ULONG_FOR_EACH_1(i
, map
) {
3897 hashes
[i
] = netdev_flow_key_hash_in_mask(&mkeys
[i
],
3901 map
= cmap_find_batch(&subtable
->rules
, map
, hashes
, nodes
);
3902 /* Check results. */
3903 ULONG_FOR_EACH_1(i
, map
) {
3904 struct dpcls_rule
*rule
;
3906 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
3907 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, &mkeys
[i
]))) {
3912 ULONG_SET0(map
, i
); /* Did not match. */
3914 ; /* Keep Sparse happy. */
3916 maps
[m
] &= ~map
; /* Clear the found rules. */
3920 return true; /* All found. */
3923 return false; /* Some misses. */