2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
24 #include <netinet/in.h>
25 #include <sys/socket.h>
30 #include <sys/ioctl.h>
37 #include "dpif-provider.h"
39 #include "dynamic-string.h"
40 #include "fat-rwlock.h"
46 #include "meta-flow.h"
48 #include "netdev-dpdk.h"
49 #include "netdev-vport.h"
51 #include "odp-execute.h"
53 #include "ofp-print.h"
57 #include "packet-dpif.h"
59 #include "poll-loop.h"
66 #include "tnl-arp-cache.h"
71 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
73 #define FLOW_DUMP_MAX_BATCH 50
74 /* Use per thread recirc_depth to prevent recirculation loop. */
75 #define MAX_RECIRC_DEPTH 5
76 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
78 /* Configuration parameters. */
79 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
81 /* Protects against changes to 'dp_netdevs'. */
82 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
84 /* Contains all 'struct dp_netdev's. */
85 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
86 = SHASH_INITIALIZER(&dp_netdevs
);
88 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
90 /* Stores a miniflow with inline values */
92 struct netdev_flow_key
{
93 uint32_t hash
; /* Hash function differs for different users. */
94 uint32_t len
; /* Length of the following miniflow (incl. map). */
96 uint32_t buf
[FLOW_MAX_PACKET_U32S
- MINI_N_INLINE
];
99 /* Exact match cache for frequently used flows
101 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
102 * search its entries for a miniflow that matches exactly the miniflow of the
103 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
105 * A cache entry holds a reference to its 'dp_netdev_flow'.
107 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
108 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
109 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
110 * value is the index of a cache entry where the miniflow could be.
116 * Each pmd_thread has its own private exact match cache.
117 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
120 #define EM_FLOW_HASH_SHIFT 10
121 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
122 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
123 #define EM_FLOW_HASH_SEGS 2
126 struct dp_netdev_flow
*flow
;
127 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
131 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
134 /* Iterate in the exact match cache through every entry that might contain a
135 * miniflow with hash 'HASH'. */
136 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
137 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
138 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
139 i__ < EM_FLOW_HASH_SEGS; \
140 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
142 /* Simple non-wildcarding single-priority classifier. */
145 struct cmap subtables_map
;
146 struct pvector subtables
;
149 /* A rule to be inserted to the classifier. */
151 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
152 struct netdev_flow_key
*mask
; /* Subtable's mask. */
153 struct netdev_flow_key flow
; /* Matching key. */
154 /* 'flow' must be the last field, additional space is allocated here. */
157 static void dpcls_init(struct dpcls
*);
158 static void dpcls_destroy(struct dpcls
*);
159 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
160 const struct netdev_flow_key
*mask
);
161 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
162 static bool dpcls_lookup(const struct dpcls
*cls
,
163 const struct netdev_flow_key keys
[],
164 struct dpcls_rule
**rules
, size_t cnt
);
166 /* Datapath based on the network device interface from netdev.h.
172 * Some members, marked 'const', are immutable. Accessing other members
173 * requires synchronization, as noted in more detail below.
175 * Acquisition order is, from outermost to innermost:
177 * dp_netdev_mutex (global)
182 const struct dpif_class
*const class;
183 const char *const name
;
185 struct ovs_refcount ref_cnt
;
186 atomic_flag destroyed
;
190 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
191 * changes to 'cls' must be made while still holding the 'flow_mutex'.
193 struct ovs_mutex flow_mutex
;
195 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
199 * ovsthread_stats is internally synchronized. */
200 struct ovsthread_stats stats
; /* Contains 'struct dp_netdev_stats *'. */
204 * Protected by RCU. Take the mutex to add or remove ports. */
205 struct ovs_mutex port_mutex
;
207 struct seq
*port_seq
; /* Incremented whenever a port changes. */
209 /* Protects access to ofproto-dpif-upcall interface during revalidator
210 * thread synchronization. */
211 struct fat_rwlock upcall_rwlock
;
212 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
215 /* Stores all 'struct dp_netdev_pmd_thread's. */
216 struct cmap poll_threads
;
218 /* Protects the access of the 'struct dp_netdev_pmd_thread'
219 * instance for non-pmd thread. */
220 struct ovs_mutex non_pmd_mutex
;
222 /* Each pmd thread will store its pointer to
223 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
224 ovsthread_key_t per_pmd_key
;
226 /* Number of rx queues for each dpdk interface and the cpu mask
227 * for pin of pmd threads. */
230 uint64_t last_tnl_conf_seq
;
233 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
237 DP_STAT_HIT
, /* Packets that matched in the flow table. */
238 DP_STAT_MISS
, /* Packets that did not match. */
239 DP_STAT_LOST
, /* Packets not passed up to the client. */
243 /* Contained by struct dp_netdev's 'stats' member. */
244 struct dp_netdev_stats
{
245 struct ovs_mutex mutex
; /* Protects 'n'. */
247 /* Indexed by DP_STAT_*, protected by 'mutex'. */
248 unsigned long long int n
[DP_N_STATS
] OVS_GUARDED
;
252 /* A port in a netdev-based datapath. */
253 struct dp_netdev_port
{
254 struct cmap_node node
; /* Node in dp_netdev's 'ports'. */
256 struct netdev
*netdev
;
257 struct netdev_saved_flags
*sf
;
258 struct netdev_rxq
**rxq
;
259 struct ovs_refcount ref_cnt
;
260 char *type
; /* Port type as requested by user. */
264 /* A flow in dp_netdev's 'flow_table'.
270 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
271 * its dp_netdev's classifier. The text below calls this classifier 'cls'.
276 * The thread safety rules described here for "struct dp_netdev_flow" are
277 * motivated by two goals:
279 * - Prevent threads that read members of "struct dp_netdev_flow" from
280 * reading bad data due to changes by some thread concurrently modifying
283 * - Prevent two threads making changes to members of a given "struct
284 * dp_netdev_flow" from interfering with each other.
290 * A flow 'flow' may be accessed without a risk of being freed during an RCU
291 * grace period. Code that needs to hold onto a flow for a while
292 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
294 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
295 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
298 * Some members, marked 'const', are immutable. Accessing other members
299 * requires synchronization, as noted in more detail below.
301 struct dp_netdev_flow
{
304 /* Hash table index by unmasked flow. */
305 const struct cmap_node node
; /* In owning dp_netdev's 'flow_table'. */
306 const struct flow flow
; /* Unmasked flow that created this entry. */
308 /* Number of references.
309 * The classifier owns one reference.
310 * Any thread trying to keep a rule from being freed should hold its own
312 struct ovs_refcount ref_cnt
;
316 * Reading or writing these members requires 'mutex'. */
317 struct ovsthread_stats stats
; /* Contains "struct dp_netdev_flow_stats". */
320 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
322 /* Packet classification. */
323 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
324 /* 'cr' must be the last member. */
327 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
328 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
330 /* Contained by struct dp_netdev_flow's 'stats' member. */
331 struct dp_netdev_flow_stats
{
332 struct ovs_mutex mutex
; /* Guards all the other members. */
334 long long int used OVS_GUARDED
; /* Last used time, in monotonic msecs. */
335 long long int packet_count OVS_GUARDED
; /* Number of packets matched. */
336 long long int byte_count OVS_GUARDED
; /* Number of bytes matched. */
337 uint16_t tcp_flags OVS_GUARDED
; /* Bitwise-OR of seen tcp_flags values. */
340 /* A set of datapath actions within a "struct dp_netdev_flow".
346 * A struct dp_netdev_actions 'actions' is protected with RCU. */
347 struct dp_netdev_actions
{
348 /* These members are immutable: they do not change during the struct's
350 struct nlattr
*actions
; /* Sequence of OVS_ACTION_ATTR_* attributes. */
351 unsigned int size
; /* Size of 'actions', in bytes. */
354 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
356 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
357 const struct dp_netdev_flow
*);
358 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
360 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
361 * the performance overhead of interrupt processing. Therefore netdev can
362 * not implement rx-wait for these devices. dpif-netdev needs to poll
363 * these device to check for recv buffer. pmd-thread does polling for
364 * devices assigned to itself thread.
366 * DPDK used PMD for accessing NIC.
368 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
369 * I/O of all non-pmd threads. There will be no actual thread created
372 struct dp_netdev_pmd_thread
{
373 struct dp_netdev
*dp
;
374 struct cmap_node node
; /* In 'dp->poll_threads'. */
376 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
377 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
379 /* Per thread exact-match cache. Note, the instance for cpu core
380 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
381 * need to be protected (e.g. by 'dp_netdev_mutex'). All other
382 * instances will only be accessed by its own pmd thread. */
383 struct emc_cache flow_cache
;
384 struct latch exit_latch
; /* For terminating the pmd thread. */
385 atomic_uint change_seq
; /* For reloading pmd ports. */
387 int index
; /* Idx of this pmd thread among pmd*/
388 /* threads on same numa node. */
389 int core_id
; /* CPU core id of this pmd thread. */
390 int numa_id
; /* numa node id of this pmd thread. */
393 #define PMD_INITIAL_SEQ 1
395 /* Interface to netdev-based datapath. */
398 struct dp_netdev
*dp
;
399 uint64_t last_port_seq
;
402 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
403 struct dp_netdev_port
**portp
);
404 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
405 struct dp_netdev_port
**portp
);
406 static void dp_netdev_free(struct dp_netdev
*)
407 OVS_REQUIRES(dp_netdev_mutex
);
408 static void dp_netdev_flow_flush(struct dp_netdev
*);
409 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
410 const char *type
, odp_port_t port_no
)
411 OVS_REQUIRES(dp
->port_mutex
);
412 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
413 OVS_REQUIRES(dp
->port_mutex
);
414 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
415 bool create
, struct dpif
**);
416 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
417 struct dpif_packet
**, int c
,
419 const struct nlattr
*actions
,
421 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
422 struct dpif_packet
**, int cnt
);
424 static void dp_netdev_disable_upcall(struct dp_netdev
*);
425 void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
426 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
427 struct dp_netdev
*dp
, int index
,
428 int core_id
, int numa_id
);
429 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
);
430 static struct dp_netdev_pmd_thread
*dp_netdev_get_nonpmd(struct dp_netdev
*dp
);
431 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
);
432 static void dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
433 static void dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
434 static void dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
);
436 static void emc_clear_entry(struct emc_entry
*ce
);
439 emc_cache_init(struct emc_cache
*flow_cache
)
443 BUILD_ASSERT(offsetof(struct miniflow
, inline_values
) == sizeof(uint64_t));
445 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
446 flow_cache
->entries
[i
].flow
= NULL
;
447 flow_cache
->entries
[i
].key
.hash
= 0;
448 flow_cache
->entries
[i
].key
.len
449 = offsetof(struct miniflow
, inline_values
);
450 miniflow_initialize(&flow_cache
->entries
[i
].key
.mf
,
451 flow_cache
->entries
[i
].key
.buf
);
456 emc_cache_uninit(struct emc_cache
*flow_cache
)
460 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
461 emc_clear_entry(&flow_cache
->entries
[i
]);
465 static struct dpif_netdev
*
466 dpif_netdev_cast(const struct dpif
*dpif
)
468 ovs_assert(dpif
->dpif_class
->open
== dpif_netdev_open
);
469 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
472 static struct dp_netdev
*
473 get_dp_netdev(const struct dpif
*dpif
)
475 return dpif_netdev_cast(dpif
)->dp
;
479 dpif_netdev_enumerate(struct sset
*all_dps
,
480 const struct dpif_class
*dpif_class
)
482 struct shash_node
*node
;
484 ovs_mutex_lock(&dp_netdev_mutex
);
485 SHASH_FOR_EACH(node
, &dp_netdevs
) {
486 struct dp_netdev
*dp
= node
->data
;
487 if (dpif_class
!= dp
->class) {
488 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
489 * If the class doesn't match, skip this dpif. */
492 sset_add(all_dps
, node
->name
);
494 ovs_mutex_unlock(&dp_netdev_mutex
);
500 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
502 return class != &dpif_netdev_class
;
506 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
508 return strcmp(type
, "internal") ? type
509 : dpif_netdev_class_is_dummy(class) ? "dummy"
514 create_dpif_netdev(struct dp_netdev
*dp
)
516 uint16_t netflow_id
= hash_string(dp
->name
, 0);
517 struct dpif_netdev
*dpif
;
519 ovs_refcount_ref(&dp
->ref_cnt
);
521 dpif
= xmalloc(sizeof *dpif
);
522 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
524 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
529 /* Choose an unused, non-zero port number and return it on success.
530 * Return ODPP_NONE on failure. */
532 choose_port(struct dp_netdev
*dp
, const char *name
)
533 OVS_REQUIRES(dp
->port_mutex
)
537 if (dp
->class != &dpif_netdev_class
) {
541 /* If the port name begins with "br", start the number search at
542 * 100 to make writing tests easier. */
543 if (!strncmp(name
, "br", 2)) {
547 /* If the port name contains a number, try to assign that port number.
548 * This can make writing unit tests easier because port numbers are
550 for (p
= name
; *p
!= '\0'; p
++) {
551 if (isdigit((unsigned char) *p
)) {
552 port_no
= start_no
+ strtol(p
, NULL
, 10);
553 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
554 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
555 return u32_to_odp(port_no
);
562 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
563 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
564 return u32_to_odp(port_no
);
572 create_dp_netdev(const char *name
, const struct dpif_class
*class,
573 struct dp_netdev
**dpp
)
574 OVS_REQUIRES(dp_netdev_mutex
)
576 struct dp_netdev
*dp
;
579 dp
= xzalloc(sizeof *dp
);
580 shash_add(&dp_netdevs
, name
, dp
);
582 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
583 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
584 ovs_refcount_init(&dp
->ref_cnt
);
585 atomic_flag_clear(&dp
->destroyed
);
587 ovs_mutex_init(&dp
->flow_mutex
);
588 dpcls_init(&dp
->cls
);
589 cmap_init(&dp
->flow_table
);
591 ovsthread_stats_init(&dp
->stats
);
593 ovs_mutex_init(&dp
->port_mutex
);
594 cmap_init(&dp
->ports
);
595 dp
->port_seq
= seq_create();
596 fat_rwlock_init(&dp
->upcall_rwlock
);
598 /* Disable upcalls by default. */
599 dp_netdev_disable_upcall(dp
);
600 dp
->upcall_aux
= NULL
;
601 dp
->upcall_cb
= NULL
;
603 cmap_init(&dp
->poll_threads
);
604 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
605 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
607 /* Reserves the core NON_PMD_CORE_ID for all non-pmd threads. */
608 ovs_numa_try_pin_core_specific(NON_PMD_CORE_ID
);
609 dp_netdev_set_nonpmd(dp
);
610 dp
->n_dpdk_rxqs
= NR_QUEUE
;
612 ovs_mutex_lock(&dp
->port_mutex
);
613 error
= do_add_port(dp
, name
, "internal", ODPP_LOCAL
);
614 ovs_mutex_unlock(&dp
->port_mutex
);
620 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
626 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
627 bool create
, struct dpif
**dpifp
)
629 struct dp_netdev
*dp
;
632 ovs_mutex_lock(&dp_netdev_mutex
);
633 dp
= shash_find_data(&dp_netdevs
, name
);
635 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
637 error
= (dp
->class != class ? EINVAL
642 *dpifp
= create_dpif_netdev(dp
);
645 ovs_mutex_unlock(&dp_netdev_mutex
);
651 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
652 OVS_NO_THREAD_SAFETY_ANALYSIS
654 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
655 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
657 /* Before freeing a lock we should release it */
658 fat_rwlock_unlock(&dp
->upcall_rwlock
);
659 fat_rwlock_destroy(&dp
->upcall_rwlock
);
662 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
663 * through the 'dp_netdevs' shash while freeing 'dp'. */
665 dp_netdev_free(struct dp_netdev
*dp
)
666 OVS_REQUIRES(dp_netdev_mutex
)
668 struct dp_netdev_port
*port
;
669 struct dp_netdev_stats
*bucket
;
672 shash_find_and_delete(&dp_netdevs
, dp
->name
);
674 dp_netdev_destroy_all_pmds(dp
);
675 cmap_destroy(&dp
->poll_threads
);
676 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
677 ovsthread_key_delete(dp
->per_pmd_key
);
679 dp_netdev_flow_flush(dp
);
680 ovs_mutex_lock(&dp
->port_mutex
);
681 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
682 do_del_port(dp
, port
);
684 ovs_mutex_unlock(&dp
->port_mutex
);
686 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &dp
->stats
) {
687 ovs_mutex_destroy(&bucket
->mutex
);
688 free_cacheline(bucket
);
690 ovsthread_stats_destroy(&dp
->stats
);
692 dpcls_destroy(&dp
->cls
);
693 cmap_destroy(&dp
->flow_table
);
694 ovs_mutex_destroy(&dp
->flow_mutex
);
695 seq_destroy(dp
->port_seq
);
696 cmap_destroy(&dp
->ports
);
698 /* Upcalls must be disabled at this point */
699 dp_netdev_destroy_upcall_lock(dp
);
702 free(CONST_CAST(char *, dp
->name
));
707 dp_netdev_unref(struct dp_netdev
*dp
)
710 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
711 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
712 ovs_mutex_lock(&dp_netdev_mutex
);
713 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
716 ovs_mutex_unlock(&dp_netdev_mutex
);
721 dpif_netdev_close(struct dpif
*dpif
)
723 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
730 dpif_netdev_destroy(struct dpif
*dpif
)
732 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
734 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
735 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
736 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
745 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
747 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
748 struct dp_netdev_stats
*bucket
;
751 stats
->n_flows
= cmap_count(&dp
->flow_table
);
753 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
754 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &dp
->stats
) {
755 ovs_mutex_lock(&bucket
->mutex
);
756 stats
->n_hit
+= bucket
->n
[DP_STAT_HIT
];
757 stats
->n_missed
+= bucket
->n
[DP_STAT_MISS
];
758 stats
->n_lost
+= bucket
->n
[DP_STAT_LOST
];
759 ovs_mutex_unlock(&bucket
->mutex
);
761 stats
->n_masks
= UINT32_MAX
;
762 stats
->n_mask_hit
= UINT64_MAX
;
768 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
772 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
776 ovs_mutex_lock(&pmd
->cond_mutex
);
777 atomic_add_relaxed(&pmd
->change_seq
, 1, &old_seq
);
778 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
779 ovs_mutex_unlock(&pmd
->cond_mutex
);
782 /* Causes all pmd threads to reload its tx/rx devices.
783 * Must be called after adding/removing ports. */
785 dp_netdev_reload_pmds(struct dp_netdev
*dp
)
787 struct dp_netdev_pmd_thread
*pmd
;
789 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
790 dp_netdev_reload_pmd__(pmd
);
795 hash_port_no(odp_port_t port_no
)
797 return hash_int(odp_to_u32(port_no
), 0);
801 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
803 OVS_REQUIRES(dp
->port_mutex
)
805 struct netdev_saved_flags
*sf
;
806 struct dp_netdev_port
*port
;
807 struct netdev
*netdev
;
808 enum netdev_flags flags
;
809 const char *open_type
;
813 /* XXX reject devices already in some dp_netdev. */
815 /* Open and validate network device. */
816 open_type
= dpif_netdev_port_open_type(dp
->class, type
);
817 error
= netdev_open(devname
, open_type
, &netdev
);
821 /* XXX reject non-Ethernet devices */
823 netdev_get_flags(netdev
, &flags
);
824 if (flags
& NETDEV_LOOPBACK
) {
825 VLOG_ERR("%s: cannot add a loopback device", devname
);
826 netdev_close(netdev
);
830 if (netdev_is_pmd(netdev
)) {
831 int n_cores
= ovs_numa_get_n_cores();
833 if (n_cores
== OVS_CORE_UNSPEC
) {
834 VLOG_ERR("%s, cannot get cpu core info", devname
);
837 /* There can only be ovs_numa_get_n_cores() pmd threads,
838 * so creates a txq for each. */
839 error
= netdev_set_multiq(netdev
, n_cores
, dp
->n_dpdk_rxqs
);
840 if (error
&& (error
!= EOPNOTSUPP
)) {
841 VLOG_ERR("%s, cannot set multiq", devname
);
845 port
= xzalloc(sizeof *port
);
846 port
->port_no
= port_no
;
847 port
->netdev
= netdev
;
848 port
->rxq
= xmalloc(sizeof *port
->rxq
* netdev_n_rxq(netdev
));
849 port
->type
= xstrdup(type
);
850 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
851 error
= netdev_rxq_open(netdev
, &port
->rxq
[i
], i
);
853 && !(error
== EOPNOTSUPP
&& dpif_netdev_class_is_dummy(dp
->class))) {
854 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
855 devname
, ovs_strerror(errno
));
856 netdev_close(netdev
);
864 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
866 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
867 netdev_rxq_close(port
->rxq
[i
]);
869 netdev_close(netdev
);
877 ovs_refcount_init(&port
->ref_cnt
);
878 cmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
880 if (netdev_is_pmd(netdev
)) {
881 dp_netdev_set_pmds_on_numa(dp
, netdev_get_numa_id(netdev
));
882 dp_netdev_reload_pmds(dp
);
884 seq_change(dp
->port_seq
);
890 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
891 odp_port_t
*port_nop
)
893 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
894 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
895 const char *dpif_port
;
899 ovs_mutex_lock(&dp
->port_mutex
);
900 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
901 if (*port_nop
!= ODPP_NONE
) {
903 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
905 port_no
= choose_port(dp
, dpif_port
);
906 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
910 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
912 ovs_mutex_unlock(&dp
->port_mutex
);
918 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
920 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
923 ovs_mutex_lock(&dp
->port_mutex
);
924 if (port_no
== ODPP_LOCAL
) {
927 struct dp_netdev_port
*port
;
929 error
= get_port_by_number(dp
, port_no
, &port
);
931 do_del_port(dp
, port
);
934 ovs_mutex_unlock(&dp
->port_mutex
);
940 is_valid_port_number(odp_port_t port_no
)
942 return port_no
!= ODPP_NONE
;
945 static struct dp_netdev_port
*
946 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
948 struct dp_netdev_port
*port
;
950 CMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
951 if (port
->port_no
== port_no
) {
959 get_port_by_number(struct dp_netdev
*dp
,
960 odp_port_t port_no
, struct dp_netdev_port
**portp
)
962 if (!is_valid_port_number(port_no
)) {
966 *portp
= dp_netdev_lookup_port(dp
, port_no
);
967 return *portp
? 0 : ENOENT
;
972 port_ref(struct dp_netdev_port
*port
)
975 ovs_refcount_ref(&port
->ref_cnt
);
980 port_try_ref(struct dp_netdev_port
*port
)
983 return ovs_refcount_try_ref_rcu(&port
->ref_cnt
);
990 port_unref(struct dp_netdev_port
*port
)
992 if (port
&& ovs_refcount_unref_relaxed(&port
->ref_cnt
) == 1) {
993 int n_rxq
= netdev_n_rxq(port
->netdev
);
996 netdev_close(port
->netdev
);
997 netdev_restore_flags(port
->sf
);
999 for (i
= 0; i
< n_rxq
; i
++) {
1000 netdev_rxq_close(port
->rxq
[i
]);
1009 get_port_by_name(struct dp_netdev
*dp
,
1010 const char *devname
, struct dp_netdev_port
**portp
)
1011 OVS_REQUIRES(dp
->port_mutex
)
1013 struct dp_netdev_port
*port
;
1015 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1016 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1025 get_n_pmd_threads_on_numa(struct dp_netdev
*dp
, int numa_id
)
1027 struct dp_netdev_pmd_thread
*pmd
;
1030 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1031 if (pmd
->numa_id
== numa_id
) {
1039 /* Returns 'true' if there is a port with pmd netdev and the netdev
1040 * is on numa node 'numa_id'. */
1042 has_pmd_port_for_numa(struct dp_netdev
*dp
, int numa_id
)
1044 struct dp_netdev_port
*port
;
1046 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1047 if (netdev_is_pmd(port
->netdev
)
1048 && netdev_get_numa_id(port
->netdev
) == numa_id
) {
1058 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1059 OVS_REQUIRES(dp
->port_mutex
)
1061 cmap_remove(&dp
->ports
, &port
->node
, hash_odp_port(port
->port_no
));
1062 seq_change(dp
->port_seq
);
1063 if (netdev_is_pmd(port
->netdev
)) {
1064 int numa_id
= netdev_get_numa_id(port
->netdev
);
1066 /* If there is no netdev on the numa node, deletes the pmd threads
1067 * for that numa. Else, just reloads the queues. */
1068 if (!has_pmd_port_for_numa(dp
, numa_id
)) {
1069 dp_netdev_del_pmds_on_numa(dp
, numa_id
);
1071 dp_netdev_reload_pmds(dp
);
1078 answer_port_query(const struct dp_netdev_port
*port
,
1079 struct dpif_port
*dpif_port
)
1081 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1082 dpif_port
->type
= xstrdup(port
->type
);
1083 dpif_port
->port_no
= port
->port_no
;
1087 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1088 struct dpif_port
*dpif_port
)
1090 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1091 struct dp_netdev_port
*port
;
1094 error
= get_port_by_number(dp
, port_no
, &port
);
1095 if (!error
&& dpif_port
) {
1096 answer_port_query(port
, dpif_port
);
1103 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1104 struct dpif_port
*dpif_port
)
1106 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1107 struct dp_netdev_port
*port
;
1110 ovs_mutex_lock(&dp
->port_mutex
);
1111 error
= get_port_by_name(dp
, devname
, &port
);
1112 if (!error
&& dpif_port
) {
1113 answer_port_query(port
, dpif_port
);
1115 ovs_mutex_unlock(&dp
->port_mutex
);
1121 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1123 struct dp_netdev_flow_stats
*bucket
;
1126 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &flow
->stats
) {
1127 ovs_mutex_destroy(&bucket
->mutex
);
1128 free_cacheline(bucket
);
1130 ovsthread_stats_destroy(&flow
->stats
);
1132 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1136 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1138 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1139 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1144 dp_netdev_remove_flow(struct dp_netdev
*dp
, struct dp_netdev_flow
*flow
)
1145 OVS_REQUIRES(dp
->flow_mutex
)
1147 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1149 dpcls_remove(&dp
->cls
, &flow
->cr
);
1150 cmap_remove(&dp
->flow_table
, node
, flow_hash(&flow
->flow
, 0));
1153 dp_netdev_flow_unref(flow
);
1157 dp_netdev_flow_flush(struct dp_netdev
*dp
)
1159 struct dp_netdev_flow
*netdev_flow
;
1161 ovs_mutex_lock(&dp
->flow_mutex
);
1162 CMAP_FOR_EACH (netdev_flow
, node
, &dp
->flow_table
) {
1163 dp_netdev_remove_flow(dp
, netdev_flow
);
1165 ovs_mutex_unlock(&dp
->flow_mutex
);
1169 dpif_netdev_flow_flush(struct dpif
*dpif
)
1171 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1173 dp_netdev_flow_flush(dp
);
1177 struct dp_netdev_port_state
{
1178 struct cmap_position position
;
1183 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1185 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
1190 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
1191 struct dpif_port
*dpif_port
)
1193 struct dp_netdev_port_state
*state
= state_
;
1194 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1195 struct cmap_node
*node
;
1198 node
= cmap_next_position(&dp
->ports
, &state
->position
);
1200 struct dp_netdev_port
*port
;
1202 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
1205 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
1206 dpif_port
->name
= state
->name
;
1207 dpif_port
->type
= port
->type
;
1208 dpif_port
->port_no
= port
->port_no
;
1219 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
1221 struct dp_netdev_port_state
*state
= state_
;
1228 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
1230 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1231 uint64_t new_port_seq
;
1234 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
1235 if (dpif
->last_port_seq
!= new_port_seq
) {
1236 dpif
->last_port_seq
= new_port_seq
;
1246 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
1248 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1250 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
1253 static struct dp_netdev_flow
*
1254 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
1256 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
1259 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
1261 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
1264 /* netdev_flow_key utilities.
1266 * netdev_flow_key is basically a miniflow. We use these functions
1267 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1268 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1270 * - Since we are dealing exclusively with miniflows created by
1271 * miniflow_extract(), if the map is different the miniflow is different.
1272 * Therefore we can be faster by comparing the map and the miniflow in a
1274 * _ netdev_flow_key's miniflow has always inline values.
1275 * - These functions can be inlined by the compiler.
1277 * The following assertions make sure that what we're doing with miniflow is
1280 BUILD_ASSERT_DECL(offsetof(struct miniflow
, inline_values
)
1281 == sizeof(uint64_t));
1283 /* Given the number of bits set in the miniflow map, returns the size of the
1284 * 'netdev_flow_key.mf' */
1285 static inline uint32_t
1286 netdev_flow_key_size(uint32_t flow_u32s
)
1288 return offsetof(struct miniflow
, inline_values
) +
1289 MINIFLOW_VALUES_SIZE(flow_u32s
);
1293 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
1294 const struct netdev_flow_key
*b
)
1296 /* 'b->len' may be not set yet. */
1297 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
1300 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
1301 * The maps are compared bitwise, so both 'key->mf' 'mf' must have been
1302 * generated by miniflow_extract. */
1304 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
1305 const struct miniflow
*mf
)
1307 return !memcmp(&key
->mf
, mf
, key
->len
);
1311 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
1312 const struct netdev_flow_key
*src
)
1315 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
1320 netdev_flow_key_from_flow(struct netdev_flow_key
*dst
,
1321 const struct flow
*src
)
1323 struct ofpbuf packet
;
1324 uint64_t buf_stub
[512 / 8];
1325 struct pkt_metadata md
= pkt_metadata_from_flow(src
);
1327 miniflow_initialize(&dst
->mf
, dst
->buf
);
1329 ofpbuf_use_stub(&packet
, buf_stub
, sizeof buf_stub
);
1330 flow_compose(&packet
, src
);
1331 miniflow_extract(&packet
, &md
, &dst
->mf
);
1332 ofpbuf_uninit(&packet
);
1334 dst
->len
= netdev_flow_key_size(count_1bits(dst
->mf
.map
));
1335 dst
->hash
= 0; /* Not computed yet. */
1338 /* Initialize a netdev_flow_key 'mask' from 'match'. */
1340 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
1341 const struct match
*match
)
1343 const uint32_t *mask_u32
= (const uint32_t *) &match
->wc
.masks
;
1344 uint32_t *dst
= mask
->mf
.inline_values
;
1345 uint64_t map
, mask_map
= 0;
1349 /* Only check masks that make sense for the flow. */
1350 map
= flow_wc_map(&match
->flow
);
1353 uint64_t rm1bit
= rightmost_1bit(map
);
1354 int i
= raw_ctz(map
);
1358 *dst
++ = mask_u32
[i
];
1359 hash
= hash_add(hash
, mask_u32
[i
]);
1364 mask
->mf
.values_inline
= true;
1365 mask
->mf
.map
= mask_map
;
1367 hash
= hash_add(hash
, mask_map
);
1368 hash
= hash_add(hash
, mask_map
>> 32);
1370 n
= dst
- mask
->mf
.inline_values
;
1372 mask
->hash
= hash_finish(hash
, n
* 4);
1373 mask
->len
= netdev_flow_key_size(n
);
1376 /* Initializes 'dst' as a copy of 'src' masked with 'mask'. */
1378 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
1379 const struct flow
*flow
,
1380 const struct netdev_flow_key
*mask
)
1382 uint32_t *dst_u32
= dst
->mf
.inline_values
;
1383 const uint32_t *mask_u32
= mask
->mf
.inline_values
;
1387 dst
->len
= mask
->len
;
1388 dst
->mf
.values_inline
= true;
1389 dst
->mf
.map
= mask
->mf
.map
;
1391 FLOW_FOR_EACH_IN_MAP(value
, flow
, mask
->mf
.map
) {
1392 *dst_u32
= value
& *mask_u32
++;
1393 hash
= hash_add(hash
, *dst_u32
++);
1395 dst
->hash
= hash_finish(hash
, (dst_u32
- dst
->mf
.inline_values
) * 4);
1398 /* Iterate through all netdev_flow_key u32 values specified by 'MAP' */
1399 #define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \
1400 for (struct mf_for_each_in_map_aux aux__ \
1401 = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \
1402 mf_get_next_in_map(&aux__, &(VALUE)); \
1405 /* Returns a hash value for the bits of 'key' where there are 1-bits in
1407 static inline uint32_t
1408 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
1409 const struct netdev_flow_key
*mask
)
1411 const uint32_t *p
= mask
->mf
.inline_values
;
1415 NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u32
, key
, mask
->mf
.map
) {
1416 hash
= hash_add(hash
, key_u32
& *p
++);
1419 return hash_finish(hash
, (p
- mask
->mf
.inline_values
) * 4);
1423 emc_entry_alive(struct emc_entry
*ce
)
1425 return ce
->flow
&& !ce
->flow
->dead
;
1429 emc_clear_entry(struct emc_entry
*ce
)
1432 dp_netdev_flow_unref(ce
->flow
);
1438 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
1439 const struct netdev_flow_key
*key
)
1441 if (ce
->flow
!= flow
) {
1443 dp_netdev_flow_unref(ce
->flow
);
1446 if (dp_netdev_flow_ref(flow
)) {
1453 netdev_flow_key_clone(&ce
->key
, key
);
1458 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
1459 struct dp_netdev_flow
*flow
)
1461 struct emc_entry
*to_be_replaced
= NULL
;
1462 struct emc_entry
*current_entry
;
1464 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1465 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
1466 /* We found the entry with the 'mf' miniflow */
1467 emc_change_entry(current_entry
, flow
, NULL
);
1471 /* Replacement policy: put the flow in an empty (not alive) entry, or
1472 * in the first entry where it can be */
1474 || (emc_entry_alive(to_be_replaced
)
1475 && !emc_entry_alive(current_entry
))
1476 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
1477 to_be_replaced
= current_entry
;
1480 /* We didn't find the miniflow in the cache.
1481 * The 'to_be_replaced' entry is where the new flow will be stored */
1483 emc_change_entry(to_be_replaced
, flow
, key
);
1486 static inline struct dp_netdev_flow
*
1487 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
1489 struct emc_entry
*current_entry
;
1491 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1492 if (current_entry
->key
.hash
== key
->hash
1493 && emc_entry_alive(current_entry
)
1494 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
1496 /* We found the entry with the 'key->mf' miniflow */
1497 return current_entry
->flow
;
1504 static struct dp_netdev_flow
*
1505 dp_netdev_lookup_flow(const struct dp_netdev
*dp
,
1506 const struct netdev_flow_key
*key
)
1508 struct dp_netdev_flow
*netdev_flow
;
1509 struct dpcls_rule
*rule
;
1511 dpcls_lookup(&dp
->cls
, key
, &rule
, 1);
1512 netdev_flow
= dp_netdev_flow_cast(rule
);
1517 static struct dp_netdev_flow
*
1518 dp_netdev_find_flow(const struct dp_netdev
*dp
, const struct flow
*flow
)
1520 struct dp_netdev_flow
*netdev_flow
;
1522 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, flow_hash(flow
, 0),
1524 if (flow_equal(&netdev_flow
->flow
, flow
)) {
1533 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow
,
1534 struct dpif_flow_stats
*stats
)
1536 struct dp_netdev_flow_stats
*bucket
;
1539 memset(stats
, 0, sizeof *stats
);
1540 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &netdev_flow
->stats
) {
1541 ovs_mutex_lock(&bucket
->mutex
);
1542 stats
->n_packets
+= bucket
->packet_count
;
1543 stats
->n_bytes
+= bucket
->byte_count
;
1544 stats
->used
= MAX(stats
->used
, bucket
->used
);
1545 stats
->tcp_flags
|= bucket
->tcp_flags
;
1546 ovs_mutex_unlock(&bucket
->mutex
);
1551 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
1552 struct ofpbuf
*buffer
, struct dpif_flow
*flow
)
1554 struct flow_wildcards wc
;
1555 struct dp_netdev_actions
*actions
;
1557 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
1558 odp_flow_key_from_mask(buffer
, &wc
.masks
, &netdev_flow
->flow
,
1559 odp_to_u32(wc
.masks
.in_port
.odp_port
),
1561 flow
->mask
= ofpbuf_data(buffer
);
1562 flow
->mask_len
= ofpbuf_size(buffer
);
1564 actions
= dp_netdev_flow_get_actions(netdev_flow
);
1565 flow
->actions
= actions
->actions
;
1566 flow
->actions_len
= actions
->size
;
1568 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
1572 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1573 const struct nlattr
*mask_key
,
1574 uint32_t mask_key_len
, const struct flow
*flow
,
1578 enum odp_key_fitness fitness
;
1580 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, mask
, flow
);
1582 /* This should not happen: it indicates that
1583 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
1584 * disagree on the acceptable form of a mask. Log the problem
1585 * as an error, with enough details to enable debugging. */
1586 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1588 if (!VLOG_DROP_ERR(&rl
)) {
1592 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
1594 VLOG_ERR("internal error parsing flow mask %s (%s)",
1595 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
1602 enum mf_field_id id
;
1603 /* No mask key, unwildcard everything except fields whose
1604 * prerequisities are not met. */
1605 memset(mask
, 0x0, sizeof *mask
);
1607 for (id
= 0; id
< MFF_N_IDS
; ++id
) {
1608 /* Skip registers and metadata. */
1609 if (!(id
>= MFF_REG0
&& id
< MFF_REG0
+ FLOW_N_REGS
)
1610 && id
!= MFF_METADATA
) {
1611 const struct mf_field
*mf
= mf_from_id(id
);
1612 if (mf_are_prereqs_ok(mf
, flow
)) {
1613 mf_mask_field(mf
, mask
);
1619 /* Force unwildcard the in_port.
1621 * We need to do this even in the case where we unwildcard "everything"
1622 * above because "everything" only includes the 16-bit OpenFlow port number
1623 * mask->in_port.ofp_port, which only covers half of the 32-bit datapath
1624 * port number mask->in_port.odp_port. */
1625 mask
->in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
1631 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1636 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
1637 /* This should not happen: it indicates that odp_flow_key_from_flow()
1638 * and odp_flow_key_to_flow() disagree on the acceptable form of a
1639 * flow. Log the problem as an error, with enough details to enable
1641 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1643 if (!VLOG_DROP_ERR(&rl
)) {
1647 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
1648 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
1655 in_port
= flow
->in_port
.odp_port
;
1656 if (!is_valid_port_number(in_port
) && in_port
!= ODPP_NONE
) {
1664 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
1666 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1667 struct dp_netdev_flow
*netdev_flow
;
1671 error
= dpif_netdev_flow_from_nlattrs(get
->key
, get
->key_len
, &key
);
1676 netdev_flow
= dp_netdev_find_flow(dp
, &key
);
1679 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->flow
);
1687 static struct dp_netdev_flow
*
1688 dp_netdev_flow_add(struct dp_netdev
*dp
, struct match
*match
,
1689 const struct nlattr
*actions
, size_t actions_len
)
1690 OVS_REQUIRES(dp
->flow_mutex
)
1692 struct dp_netdev_flow
*flow
;
1693 struct netdev_flow_key mask
;
1695 netdev_flow_mask_init(&mask
, match
);
1696 /* Make sure wc does not have metadata. */
1697 ovs_assert(!(mask
.mf
.map
& (MINIFLOW_MAP(metadata
) | MINIFLOW_MAP(regs
))));
1699 /* Do not allocate extra space. */
1700 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
1702 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
1703 ovs_refcount_init(&flow
->ref_cnt
);
1704 ovsthread_stats_init(&flow
->stats
);
1705 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
1707 cmap_insert(&dp
->flow_table
,
1708 CONST_CAST(struct cmap_node
*, &flow
->node
),
1709 flow_hash(&flow
->flow
, 0));
1710 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
1711 dpcls_insert(&dp
->cls
, &flow
->cr
, &mask
);
1713 if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
1715 struct ds ds
= DS_EMPTY_INITIALIZER
;
1717 match
.flow
= flow
->flow
;
1718 miniflow_expand(&flow
->cr
.mask
->mf
, &match
.wc
.masks
);
1720 ds_put_cstr(&ds
, "flow_add: ");
1721 match_format(&match
, &ds
, OFP_DEFAULT_PRIORITY
);
1722 ds_put_cstr(&ds
, ", actions:");
1723 format_odp_actions(&ds
, actions
, actions_len
);
1725 VLOG_DBG_RL(&upcall_rl
, "%s", ds_cstr(&ds
));
1734 clear_stats(struct dp_netdev_flow
*netdev_flow
)
1736 struct dp_netdev_flow_stats
*bucket
;
1739 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &netdev_flow
->stats
) {
1740 ovs_mutex_lock(&bucket
->mutex
);
1742 bucket
->packet_count
= 0;
1743 bucket
->byte_count
= 0;
1744 bucket
->tcp_flags
= 0;
1745 ovs_mutex_unlock(&bucket
->mutex
);
1750 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
1752 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1753 struct dp_netdev_flow
*netdev_flow
;
1754 struct netdev_flow_key key
;
1758 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
);
1762 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
1763 put
->mask
, put
->mask_len
,
1764 &match
.flow
, &match
.wc
.masks
);
1769 /* Must produce a netdev_flow_key for lookup.
1770 * This interface is no longer performance critical, since it is not used
1771 * for upcall processing any more. */
1772 netdev_flow_key_from_flow(&key
, &match
.flow
);
1774 ovs_mutex_lock(&dp
->flow_mutex
);
1775 netdev_flow
= dp_netdev_lookup_flow(dp
, &key
);
1777 if (put
->flags
& DPIF_FP_CREATE
) {
1778 if (cmap_count(&dp
->flow_table
) < MAX_FLOWS
) {
1780 memset(put
->stats
, 0, sizeof *put
->stats
);
1782 dp_netdev_flow_add(dp
, &match
, put
->actions
, put
->actions_len
);
1791 if (put
->flags
& DPIF_FP_MODIFY
1792 && flow_equal(&match
.flow
, &netdev_flow
->flow
)) {
1793 struct dp_netdev_actions
*new_actions
;
1794 struct dp_netdev_actions
*old_actions
;
1796 new_actions
= dp_netdev_actions_create(put
->actions
,
1799 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
1800 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
1803 get_dpif_flow_stats(netdev_flow
, put
->stats
);
1805 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
1806 clear_stats(netdev_flow
);
1809 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
1810 } else if (put
->flags
& DPIF_FP_CREATE
) {
1813 /* Overlapping flow. */
1817 ovs_mutex_unlock(&dp
->flow_mutex
);
1823 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
1825 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1826 struct dp_netdev_flow
*netdev_flow
;
1830 error
= dpif_netdev_flow_from_nlattrs(del
->key
, del
->key_len
, &key
);
1835 ovs_mutex_lock(&dp
->flow_mutex
);
1836 netdev_flow
= dp_netdev_find_flow(dp
, &key
);
1839 get_dpif_flow_stats(netdev_flow
, del
->stats
);
1841 dp_netdev_remove_flow(dp
, netdev_flow
);
1845 ovs_mutex_unlock(&dp
->flow_mutex
);
1850 struct dpif_netdev_flow_dump
{
1851 struct dpif_flow_dump up
;
1852 struct cmap_position pos
;
1854 struct ovs_mutex mutex
;
1857 static struct dpif_netdev_flow_dump
*
1858 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
1860 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
1863 static struct dpif_flow_dump
*
1864 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
)
1866 struct dpif_netdev_flow_dump
*dump
;
1868 dump
= xmalloc(sizeof *dump
);
1869 dpif_flow_dump_init(&dump
->up
, dpif_
);
1870 memset(&dump
->pos
, 0, sizeof dump
->pos
);
1872 ovs_mutex_init(&dump
->mutex
);
1878 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
1880 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
1882 ovs_mutex_destroy(&dump
->mutex
);
1887 struct dpif_netdev_flow_dump_thread
{
1888 struct dpif_flow_dump_thread up
;
1889 struct dpif_netdev_flow_dump
*dump
;
1890 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
1891 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
1894 static struct dpif_netdev_flow_dump_thread
*
1895 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
1897 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
1900 static struct dpif_flow_dump_thread
*
1901 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
1903 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
1904 struct dpif_netdev_flow_dump_thread
*thread
;
1906 thread
= xmalloc(sizeof *thread
);
1907 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
1908 thread
->dump
= dump
;
1913 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
1915 struct dpif_netdev_flow_dump_thread
*thread
1916 = dpif_netdev_flow_dump_thread_cast(thread_
);
1922 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
1923 struct dpif_flow
*flows
, int max_flows
)
1925 struct dpif_netdev_flow_dump_thread
*thread
1926 = dpif_netdev_flow_dump_thread_cast(thread_
);
1927 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
1928 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
1929 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
1930 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
1934 ovs_mutex_lock(&dump
->mutex
);
1935 if (!dump
->status
) {
1936 for (n_flows
= 0; n_flows
< MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
1938 struct cmap_node
*node
;
1940 node
= cmap_next_position(&dp
->flow_table
, &dump
->pos
);
1945 netdev_flows
[n_flows
] = CONTAINER_OF(node
, struct dp_netdev_flow
,
1949 ovs_mutex_unlock(&dump
->mutex
);
1951 for (i
= 0; i
< n_flows
; i
++) {
1952 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
1953 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
1954 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
1955 struct dpif_flow
*f
= &flows
[i
];
1956 struct dp_netdev_actions
*dp_actions
;
1957 struct flow_wildcards wc
;
1960 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
1963 ofpbuf_use_stack(&buf
, keybuf
, sizeof *keybuf
);
1964 odp_flow_key_from_flow(&buf
, &netdev_flow
->flow
, &wc
.masks
,
1965 netdev_flow
->flow
.in_port
.odp_port
, true);
1966 f
->key
= ofpbuf_data(&buf
);
1967 f
->key_len
= ofpbuf_size(&buf
);
1970 ofpbuf_use_stack(&buf
, maskbuf
, sizeof *maskbuf
);
1971 odp_flow_key_from_mask(&buf
, &wc
.masks
, &netdev_flow
->flow
,
1972 odp_to_u32(wc
.masks
.in_port
.odp_port
),
1974 f
->mask
= ofpbuf_data(&buf
);
1975 f
->mask_len
= ofpbuf_size(&buf
);
1978 dp_actions
= dp_netdev_flow_get_actions(netdev_flow
);
1979 f
->actions
= dp_actions
->actions
;
1980 f
->actions_len
= dp_actions
->size
;
1983 get_dpif_flow_stats(netdev_flow
, &f
->stats
);
1990 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
1991 OVS_NO_THREAD_SAFETY_ANALYSIS
1993 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1994 struct dp_netdev_pmd_thread
*pmd
;
1995 struct dpif_packet packet
, *pp
;
1997 if (ofpbuf_size(execute
->packet
) < ETH_HEADER_LEN
||
1998 ofpbuf_size(execute
->packet
) > UINT16_MAX
) {
2002 packet
.ofpbuf
= *execute
->packet
;
2003 packet
.md
= execute
->md
;
2006 /* Tries finding the 'pmd'. If NULL is returned, that means
2007 * the current thread is a non-pmd thread and should use
2008 * dp_netdev_get_nonpmd(). */
2009 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
2011 pmd
= dp_netdev_get_nonpmd(dp
);
2014 /* If the current thread is non-pmd thread, acquires
2015 * the 'non_pmd_mutex'. */
2016 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2017 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2019 dp_netdev_execute_actions(pmd
, &pp
, 1, false, execute
->actions
,
2020 execute
->actions_len
);
2021 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2022 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2025 /* Even though may_steal is set to false, some actions could modify or
2026 * reallocate the ofpbuf memory. We need to pass those changes to the
2028 *execute
->packet
= packet
.ofpbuf
;
2029 execute
->md
= packet
.md
;
2034 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
2038 for (i
= 0; i
< n_ops
; i
++) {
2039 struct dpif_op
*op
= ops
[i
];
2042 case DPIF_OP_FLOW_PUT
:
2043 op
->error
= dpif_netdev_flow_put(dpif
, &op
->u
.flow_put
);
2046 case DPIF_OP_FLOW_DEL
:
2047 op
->error
= dpif_netdev_flow_del(dpif
, &op
->u
.flow_del
);
2050 case DPIF_OP_EXECUTE
:
2051 op
->error
= dpif_netdev_execute(dpif
, &op
->u
.execute
);
2054 case DPIF_OP_FLOW_GET
:
2055 op
->error
= dpif_netdev_flow_get(dpif
, &op
->u
.flow_get
);
2061 /* Returns true if the configuration for rx queues or cpu mask
2064 pmd_config_changed(const struct dp_netdev
*dp
, size_t rxqs
, const char *cmask
)
2066 if (dp
->n_dpdk_rxqs
!= rxqs
) {
2069 if (dp
->pmd_cmask
!= NULL
&& cmask
!= NULL
) {
2070 return strcmp(dp
->pmd_cmask
, cmask
);
2072 return (dp
->pmd_cmask
!= NULL
|| cmask
!= NULL
);
2077 /* Resets pmd threads if the configuration for 'rxq's or cpu mask changes. */
2079 dpif_netdev_pmd_set(struct dpif
*dpif
, unsigned int n_rxqs
, const char *cmask
)
2081 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2083 if (pmd_config_changed(dp
, n_rxqs
, cmask
)) {
2084 struct dp_netdev_port
*port
;
2086 dp_netdev_destroy_all_pmds(dp
);
2088 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2089 if (netdev_is_pmd(port
->netdev
)) {
2092 /* Closes the existing 'rxq's. */
2093 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2094 netdev_rxq_close(port
->rxq
[i
]);
2095 port
->rxq
[i
] = NULL
;
2098 /* Sets the new rx queue config. */
2099 err
= netdev_set_multiq(port
->netdev
, ovs_numa_get_n_cores(),
2101 if (err
&& (err
!= EOPNOTSUPP
)) {
2102 VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
2103 " %u", netdev_get_name(port
->netdev
),
2108 /* If the set_multiq() above succeeds, reopens the 'rxq's. */
2109 port
->rxq
= xrealloc(port
->rxq
, sizeof *port
->rxq
2110 * netdev_n_rxq(port
->netdev
));
2111 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2112 netdev_rxq_open(port
->netdev
, &port
->rxq
[i
], i
);
2116 dp
->n_dpdk_rxqs
= n_rxqs
;
2118 /* Reconfigures the cpu mask. */
2119 ovs_numa_set_cpu_mask(cmask
);
2120 free(dp
->pmd_cmask
);
2121 dp
->pmd_cmask
= cmask
? xstrdup(cmask
) : NULL
;
2123 /* Restores the non-pmd. */
2124 dp_netdev_set_nonpmd(dp
);
2125 /* Restores all pmd threads. */
2126 dp_netdev_reset_pmd_threads(dp
);
2133 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2134 uint32_t queue_id
, uint32_t *priority
)
2136 *priority
= queue_id
;
2141 /* Creates and returns a new 'struct dp_netdev_actions', with a reference count
2142 * of 1, whose actions are a copy of from the 'ofpacts_len' bytes of
2144 struct dp_netdev_actions
*
2145 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
2147 struct dp_netdev_actions
*netdev_actions
;
2149 netdev_actions
= xmalloc(sizeof *netdev_actions
);
2150 netdev_actions
->actions
= xmemdup(actions
, size
);
2151 netdev_actions
->size
= size
;
2153 return netdev_actions
;
2156 struct dp_netdev_actions
*
2157 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
2159 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
2163 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
2165 free(actions
->actions
);
2171 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
2172 struct dp_netdev_port
*port
,
2173 struct netdev_rxq
*rxq
)
2175 struct dpif_packet
*packets
[NETDEV_MAX_RX_BATCH
];
2178 error
= netdev_rxq_recv(rxq
, packets
, &cnt
);
2182 *recirc_depth_get() = 0;
2184 /* XXX: initialize md in netdev implementation. */
2185 for (i
= 0; i
< cnt
; i
++) {
2186 packets
[i
]->md
= PKT_METADATA_INITIALIZER(port
->port_no
);
2188 dp_netdev_input(pmd
, packets
, cnt
);
2189 } else if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
2190 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2192 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
2193 netdev_get_name(port
->netdev
), ovs_strerror(error
));
2197 /* Return true if needs to revalidate datapath flows. */
2199 dpif_netdev_run(struct dpif
*dpif
)
2201 struct dp_netdev_port
*port
;
2202 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2203 struct dp_netdev_pmd_thread
*non_pmd
= dp_netdev_get_nonpmd(dp
);
2204 uint64_t new_tnl_seq
;
2206 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2207 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2208 if (!netdev_is_pmd(port
->netdev
)) {
2211 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2212 dp_netdev_process_rxq_port(non_pmd
, port
, port
->rxq
[i
]);
2216 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2217 tnl_arp_cache_run();
2218 new_tnl_seq
= seq_read(tnl_conf_seq
);
2220 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
2221 dp
->last_tnl_conf_seq
= new_tnl_seq
;
2228 dpif_netdev_wait(struct dpif
*dpif
)
2230 struct dp_netdev_port
*port
;
2231 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2233 ovs_mutex_lock(&dp_netdev_mutex
);
2234 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2235 if (!netdev_is_pmd(port
->netdev
)) {
2238 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2239 netdev_rxq_wait(port
->rxq
[i
]);
2243 ovs_mutex_unlock(&dp_netdev_mutex
);
2244 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
2248 struct dp_netdev_port
*port
;
2249 struct netdev_rxq
*rx
;
2253 pmd_load_queues(struct dp_netdev_pmd_thread
*pmd
,
2254 struct rxq_poll
**ppoll_list
, int poll_cnt
)
2256 struct rxq_poll
*poll_list
= *ppoll_list
;
2257 struct dp_netdev_port
*port
;
2258 int n_pmds_on_numa
, index
, i
;
2260 /* Simple scheduler for netdev rx polling. */
2261 for (i
= 0; i
< poll_cnt
; i
++) {
2262 port_unref(poll_list
[i
].port
);
2266 n_pmds_on_numa
= get_n_pmd_threads_on_numa(pmd
->dp
, pmd
->numa_id
);
2269 CMAP_FOR_EACH (port
, node
, &pmd
->dp
->ports
) {
2270 /* Calls port_try_ref() to prevent the main thread
2271 * from deleting the port. */
2272 if (port_try_ref(port
)) {
2273 if (netdev_is_pmd(port
->netdev
)
2274 && netdev_get_numa_id(port
->netdev
) == pmd
->numa_id
) {
2277 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2278 if ((index
% n_pmds_on_numa
) == pmd
->index
) {
2279 poll_list
= xrealloc(poll_list
,
2280 sizeof *poll_list
* (poll_cnt
+ 1));
2283 poll_list
[poll_cnt
].port
= port
;
2284 poll_list
[poll_cnt
].rx
= port
->rxq
[i
];
2290 /* Unrefs the port_try_ref(). */
2295 *ppoll_list
= poll_list
;
2300 pmd_thread_main(void *f_
)
2302 struct dp_netdev_pmd_thread
*pmd
= f_
;
2303 unsigned int lc
= 0;
2304 struct rxq_poll
*poll_list
;
2305 unsigned int port_seq
= PMD_INITIAL_SEQ
;
2312 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
2313 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
2314 pmd_thread_setaffinity_cpu(pmd
->core_id
);
2316 emc_cache_init(&pmd
->flow_cache
);
2317 poll_cnt
= pmd_load_queues(pmd
, &poll_list
, poll_cnt
);
2319 /* Signal here to make sure the pmd finishes
2320 * reloading the updated configuration. */
2321 dp_netdev_pmd_reload_done(pmd
);
2326 for (i
= 0; i
< poll_cnt
; i
++) {
2327 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].port
, poll_list
[i
].rx
);
2337 atomic_read_relaxed(&pmd
->change_seq
, &seq
);
2338 if (seq
!= port_seq
) {
2345 emc_cache_uninit(&pmd
->flow_cache
);
2347 if (!latch_is_set(&pmd
->exit_latch
)){
2351 for (i
= 0; i
< poll_cnt
; i
++) {
2352 port_unref(poll_list
[i
].port
);
2355 dp_netdev_pmd_reload_done(pmd
);
2362 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
2363 OVS_ACQUIRES(dp
->upcall_rwlock
)
2365 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
2369 dpif_netdev_disable_upcall(struct dpif
*dpif
)
2370 OVS_NO_THREAD_SAFETY_ANALYSIS
2372 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2373 dp_netdev_disable_upcall(dp
);
2377 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
2378 OVS_RELEASES(dp
->upcall_rwlock
)
2380 fat_rwlock_unlock(&dp
->upcall_rwlock
);
2384 dpif_netdev_enable_upcall(struct dpif
*dpif
)
2385 OVS_NO_THREAD_SAFETY_ANALYSIS
2387 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2388 dp_netdev_enable_upcall(dp
);
2392 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
2394 ovs_mutex_lock(&pmd
->cond_mutex
);
2395 xpthread_cond_signal(&pmd
->cond
);
2396 ovs_mutex_unlock(&pmd
->cond_mutex
);
2399 /* Returns the pointer to the dp_netdev_pmd_thread for non-pmd threads. */
2400 static struct dp_netdev_pmd_thread
*
2401 dp_netdev_get_nonpmd(struct dp_netdev
*dp
)
2403 struct dp_netdev_pmd_thread
*pmd
;
2404 const struct cmap_node
*pnode
;
2406 pnode
= cmap_find(&dp
->poll_threads
, hash_int(NON_PMD_CORE_ID
, 0));
2408 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
2413 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
2415 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
2417 struct dp_netdev_pmd_thread
*non_pmd
;
2419 non_pmd
= xzalloc(sizeof *non_pmd
);
2420 dp_netdev_configure_pmd(non_pmd
, dp
, 0, NON_PMD_CORE_ID
,
2424 /* Configures the 'pmd' based on the input argument. */
2426 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
2427 int index
, int core_id
, int numa_id
)
2431 pmd
->core_id
= core_id
;
2432 pmd
->numa_id
= numa_id
;
2433 latch_init(&pmd
->exit_latch
);
2434 atomic_init(&pmd
->change_seq
, PMD_INITIAL_SEQ
);
2435 xpthread_cond_init(&pmd
->cond
, NULL
);
2436 ovs_mutex_init(&pmd
->cond_mutex
);
2437 /* init the 'flow_cache' since there is no
2438 * actual thread created for NON_PMD_CORE_ID. */
2439 if (core_id
== NON_PMD_CORE_ID
) {
2440 emc_cache_init(&pmd
->flow_cache
);
2442 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
2443 hash_int(core_id
, 0));
2446 /* Stops the pmd thread, removes it from the 'dp->poll_threads'
2447 * and destroys the struct. */
2449 dp_netdev_del_pmd(struct dp_netdev_pmd_thread
*pmd
)
2451 /* Uninit the 'flow_cache' since there is
2452 * no actual thread uninit it. */
2453 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2454 emc_cache_uninit(&pmd
->flow_cache
);
2456 latch_set(&pmd
->exit_latch
);
2457 dp_netdev_reload_pmd__(pmd
);
2458 ovs_numa_unpin_core(pmd
->core_id
);
2459 xpthread_join(pmd
->thread
, NULL
);
2461 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
2462 latch_destroy(&pmd
->exit_latch
);
2463 xpthread_cond_destroy(&pmd
->cond
);
2464 ovs_mutex_destroy(&pmd
->cond_mutex
);
2468 /* Destroys all pmd threads. */
2470 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
)
2472 struct dp_netdev_pmd_thread
*pmd
;
2474 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2475 dp_netdev_del_pmd(pmd
);
2479 /* Deletes all pmd threads on numa node 'numa_id'. */
2481 dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2483 struct dp_netdev_pmd_thread
*pmd
;
2485 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2486 if (pmd
->numa_id
== numa_id
) {
2487 dp_netdev_del_pmd(pmd
);
2492 /* Checks the numa node id of 'netdev' and starts pmd threads for
2495 dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2499 if (!ovs_numa_numa_id_is_valid(numa_id
)) {
2500 VLOG_ERR("Cannot create pmd threads due to numa id (%d)"
2501 "invalid", numa_id
);
2505 n_pmds
= get_n_pmd_threads_on_numa(dp
, numa_id
);
2507 /* If there are already pmd threads created for the numa node
2508 * in which 'netdev' is on, do nothing. Else, creates the
2509 * pmd threads for the numa node. */
2511 int can_have
, n_unpinned
, i
;
2513 n_unpinned
= ovs_numa_get_n_unpinned_cores_on_numa(numa_id
);
2515 VLOG_ERR("Cannot create pmd threads due to out of unpinned "
2516 "cores on numa node");
2520 /* If cpu mask is specified, uses all unpinned cores, otherwise
2521 * tries creating NR_PMD_THREADS pmd threads. */
2522 can_have
= dp
->pmd_cmask
? n_unpinned
: MIN(n_unpinned
, NR_PMD_THREADS
);
2523 for (i
= 0; i
< can_have
; i
++) {
2524 struct dp_netdev_pmd_thread
*pmd
= xzalloc(sizeof *pmd
);
2525 int core_id
= ovs_numa_get_unpinned_core_on_numa(numa_id
);
2527 dp_netdev_configure_pmd(pmd
, dp
, i
, core_id
, numa_id
);
2528 /* Each thread will distribute all devices rx-queues among
2530 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
2532 VLOG_INFO("Created %d pmd threads on numa node %d", can_have
, numa_id
);
2538 dp_netdev_flow_stats_new_cb(void)
2540 struct dp_netdev_flow_stats
*bucket
= xzalloc_cacheline(sizeof *bucket
);
2541 ovs_mutex_init(&bucket
->mutex
);
2545 /* Called after pmd threads config change. Restarts pmd threads with
2546 * new configuration. */
2548 dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
2550 struct dp_netdev_port
*port
;
2552 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2553 if (netdev_is_pmd(port
->netdev
)) {
2554 int numa_id
= netdev_get_numa_id(port
->netdev
);
2556 dp_netdev_set_pmds_on_numa(dp
, numa_id
);
2562 dpif_netdev_get_datapath_version(void)
2564 return xstrdup("<built-in>");
2568 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
,
2572 long long int now
= time_msec();
2573 struct dp_netdev_flow_stats
*bucket
;
2575 bucket
= ovsthread_stats_bucket_get(&netdev_flow
->stats
,
2576 dp_netdev_flow_stats_new_cb
);
2578 ovs_mutex_lock(&bucket
->mutex
);
2579 bucket
->used
= MAX(now
, bucket
->used
);
2580 bucket
->packet_count
+= cnt
;
2581 bucket
->byte_count
+= size
;
2582 bucket
->tcp_flags
|= tcp_flags
;
2583 ovs_mutex_unlock(&bucket
->mutex
);
2587 dp_netdev_stats_new_cb(void)
2589 struct dp_netdev_stats
*bucket
= xzalloc_cacheline(sizeof *bucket
);
2590 ovs_mutex_init(&bucket
->mutex
);
2595 dp_netdev_count_packet(struct dp_netdev
*dp
, enum dp_stat_type type
, int cnt
)
2597 struct dp_netdev_stats
*bucket
;
2599 bucket
= ovsthread_stats_bucket_get(&dp
->stats
, dp_netdev_stats_new_cb
);
2600 ovs_mutex_lock(&bucket
->mutex
);
2601 bucket
->n
[type
] += cnt
;
2602 ovs_mutex_unlock(&bucket
->mutex
);
2606 dp_netdev_upcall(struct dp_netdev
*dp
, struct dpif_packet
*packet_
,
2607 struct flow
*flow
, struct flow_wildcards
*wc
,
2608 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
2609 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
2611 struct ofpbuf
*packet
= &packet_
->ofpbuf
;
2613 if (type
== DPIF_UC_MISS
) {
2614 dp_netdev_count_packet(dp
, DP_STAT_MISS
, 1);
2617 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
2621 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
2622 struct ds ds
= DS_EMPTY_INITIALIZER
;
2626 ofpbuf_init(&key
, 0);
2627 odp_flow_key_from_flow(&key
, flow
, &wc
->masks
, flow
->in_port
.odp_port
,
2630 packet_str
= ofp_packet_to_string(ofpbuf_data(packet
),
2631 ofpbuf_size(packet
));
2633 odp_flow_key_format(ofpbuf_data(&key
), ofpbuf_size(&key
), &ds
);
2635 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
2636 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
2638 ofpbuf_uninit(&key
);
2643 return dp
->upcall_cb(packet
, flow
, type
, userdata
, actions
, wc
,
2644 put_actions
, dp
->upcall_aux
);
2647 static inline uint32_t
2648 dpif_netdev_packet_get_dp_hash(struct dpif_packet
*packet
,
2649 const struct miniflow
*mf
)
2653 hash
= dpif_packet_get_dp_hash(packet
);
2654 if (OVS_UNLIKELY(!hash
)) {
2655 hash
= miniflow_hash_5tuple(mf
, 0);
2656 dpif_packet_set_dp_hash(packet
, hash
);
2661 struct packet_batch
{
2662 unsigned int packet_count
;
2663 unsigned int byte_count
;
2666 struct dp_netdev_flow
*flow
;
2668 struct dpif_packet
*packets
[NETDEV_MAX_RX_BATCH
];
2672 packet_batch_update(struct packet_batch
*batch
, struct dpif_packet
*packet
,
2673 const struct miniflow
*mf
)
2675 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
2676 batch
->packets
[batch
->packet_count
++] = packet
;
2677 batch
->byte_count
+= ofpbuf_size(&packet
->ofpbuf
);
2681 packet_batch_init(struct packet_batch
*batch
, struct dp_netdev_flow
*flow
)
2685 batch
->packet_count
= 0;
2686 batch
->byte_count
= 0;
2687 batch
->tcp_flags
= 0;
2691 packet_batch_execute(struct packet_batch
*batch
,
2692 struct dp_netdev_pmd_thread
*pmd
)
2694 struct dp_netdev_actions
*actions
;
2695 struct dp_netdev_flow
*flow
= batch
->flow
;
2697 dp_netdev_flow_used(batch
->flow
, batch
->packet_count
, batch
->byte_count
,
2700 actions
= dp_netdev_flow_get_actions(flow
);
2702 dp_netdev_execute_actions(pmd
, batch
->packets
, batch
->packet_count
, true,
2703 actions
->actions
, actions
->size
);
2705 dp_netdev_count_packet(pmd
->dp
, DP_STAT_HIT
, batch
->packet_count
);
2709 dp_netdev_queue_batches(struct dpif_packet
*pkt
,
2710 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
2711 struct packet_batch
*batches
, size_t *n_batches
,
2714 struct packet_batch
*batch
= NULL
;
2717 if (OVS_UNLIKELY(!flow
)) {
2720 /* XXX: This O(n^2) algortihm makes sense if we're operating under the
2721 * assumption that the number of distinct flows (and therefore the
2722 * number of distinct batches) is quite small. If this turns out not
2723 * to be the case, it may make sense to pre sort based on the
2724 * netdev_flow pointer. That done we can get the appropriate batching
2725 * in O(n * log(n)) instead. */
2726 for (j
= *n_batches
- 1; j
>= 0; j
--) {
2727 if (batches
[j
].flow
== flow
) {
2728 batch
= &batches
[j
];
2729 packet_batch_update(batch
, pkt
, mf
);
2733 if (OVS_UNLIKELY(*n_batches
>= max_batches
)) {
2737 batch
= &batches
[(*n_batches
)++];
2738 packet_batch_init(batch
, flow
);
2739 packet_batch_update(batch
, pkt
, mf
);
2744 dpif_packet_swap(struct dpif_packet
**a
, struct dpif_packet
**b
)
2746 struct dpif_packet
*tmp
= *a
;
2751 /* Try to process all ('cnt') the 'packets' using only the exact match cache
2752 * 'flow_cache'. If a flow is not found for a packet 'packets[i]', or if there
2753 * is no matching batch for a packet's flow, the miniflow is copied into 'keys'
2754 * and the packet pointer is moved at the beginning of the 'packets' array.
2756 * The function returns the number of packets that needs to be processed in the
2757 * 'packets' array (they have been moved to the beginning of the vector).
2759 static inline size_t
2760 emc_processing(struct dp_netdev_pmd_thread
*pmd
, struct dpif_packet
**packets
,
2761 size_t cnt
, struct netdev_flow_key
*keys
)
2763 struct netdev_flow_key key
;
2764 struct packet_batch batches
[4];
2765 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
2766 size_t n_batches
, i
;
2767 size_t notfound_cnt
= 0;
2770 miniflow_initialize(&key
.mf
, key
.buf
);
2771 for (i
= 0; i
< cnt
; i
++) {
2772 struct dp_netdev_flow
*flow
;
2774 if (OVS_UNLIKELY(ofpbuf_size(&packets
[i
]->ofpbuf
) < ETH_HEADER_LEN
)) {
2775 dpif_packet_delete(packets
[i
]);
2779 miniflow_extract(&packets
[i
]->ofpbuf
, &packets
[i
]->md
, &key
.mf
);
2780 key
.len
= 0; /* Not computed yet. */
2781 key
.hash
= dpif_netdev_packet_get_dp_hash(packets
[i
], &key
.mf
);
2783 flow
= emc_lookup(flow_cache
, &key
);
2784 if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets
[i
], flow
, &key
.mf
,
2785 batches
, &n_batches
,
2786 ARRAY_SIZE(batches
)))) {
2787 if (i
!= notfound_cnt
) {
2788 dpif_packet_swap(&packets
[i
], &packets
[notfound_cnt
]);
2791 keys
[notfound_cnt
++] = key
;
2795 for (i
= 0; i
< n_batches
; i
++) {
2796 packet_batch_execute(&batches
[i
], pmd
);
2799 return notfound_cnt
;
2803 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
2804 struct dpif_packet
**packets
, size_t cnt
,
2805 struct netdev_flow_key
*keys
)
2807 #if !defined(__CHECKER__) && !defined(_WIN32)
2808 const size_t PKT_ARRAY_SIZE
= cnt
;
2810 /* Sparse or MSVC doesn't like variable length array. */
2811 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_RX_BATCH
};
2813 struct packet_batch batches
[PKT_ARRAY_SIZE
];
2814 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
2815 struct dp_netdev
*dp
= pmd
->dp
;
2816 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
2817 size_t n_batches
, i
;
2820 for (i
= 0; i
< cnt
; i
++) {
2821 /* Key length is needed in all the cases, hash computed on demand. */
2822 keys
[i
].len
= netdev_flow_key_size(count_1bits(keys
[i
].mf
.map
));
2824 any_miss
= !dpcls_lookup(&dp
->cls
, keys
, rules
, cnt
);
2825 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
2826 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
2827 struct ofpbuf actions
, put_actions
;
2829 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
2830 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
2832 for (i
= 0; i
< cnt
; i
++) {
2833 struct dp_netdev_flow
*netdev_flow
;
2834 struct ofpbuf
*add_actions
;
2838 if (OVS_LIKELY(rules
[i
])) {
2842 /* It's possible that an earlier slow path execution installed
2843 * a rule covering this flow. In this case, it's a lot cheaper
2844 * to catch it here than execute a miss. */
2845 netdev_flow
= dp_netdev_lookup_flow(dp
, &keys
[i
]);
2847 rules
[i
] = &netdev_flow
->cr
;
2851 miniflow_expand(&keys
[i
].mf
, &match
.flow
);
2853 ofpbuf_clear(&actions
);
2854 ofpbuf_clear(&put_actions
);
2856 error
= dp_netdev_upcall(dp
, packets
[i
], &match
.flow
, &match
.wc
,
2857 DPIF_UC_MISS
, NULL
, &actions
,
2859 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
2863 /* We can't allow the packet batching in the next loop to execute
2864 * the actions. Otherwise, if there are any slow path actions,
2865 * we'll send the packet up twice. */
2866 dp_netdev_execute_actions(pmd
, &packets
[i
], 1, true,
2867 ofpbuf_data(&actions
),
2868 ofpbuf_size(&actions
));
2870 add_actions
= ofpbuf_size(&put_actions
)
2874 if (OVS_LIKELY(error
!= ENOSPC
)) {
2875 /* XXX: There's a race window where a flow covering this packet
2876 * could have already been installed since we last did the flow
2877 * lookup before upcall. This could be solved by moving the
2878 * mutex lock outside the loop, but that's an awful long time
2879 * to be locking everyone out of making flow installs. If we
2880 * move to a per-core classifier, it would be reasonable. */
2881 ovs_mutex_lock(&dp
->flow_mutex
);
2882 netdev_flow
= dp_netdev_lookup_flow(dp
, &keys
[i
]);
2883 if (OVS_LIKELY(!netdev_flow
)) {
2884 netdev_flow
= dp_netdev_flow_add(dp
, &match
,
2885 ofpbuf_data(add_actions
),
2886 ofpbuf_size(add_actions
));
2888 ovs_mutex_unlock(&dp
->flow_mutex
);
2890 emc_insert(flow_cache
, &keys
[i
], netdev_flow
);
2894 ofpbuf_uninit(&actions
);
2895 ofpbuf_uninit(&put_actions
);
2896 fat_rwlock_unlock(&dp
->upcall_rwlock
);
2897 } else if (OVS_UNLIKELY(any_miss
)) {
2898 int dropped_cnt
= 0;
2900 for (i
= 0; i
< cnt
; i
++) {
2901 if (OVS_UNLIKELY(!rules
[i
])) {
2902 dpif_packet_delete(packets
[i
]);
2907 dp_netdev_count_packet(dp
, DP_STAT_LOST
, dropped_cnt
);
2911 for (i
= 0; i
< cnt
; i
++) {
2912 struct dpif_packet
*packet
= packets
[i
];
2913 struct dp_netdev_flow
*flow
;
2915 if (OVS_UNLIKELY(!rules
[i
])) {
2919 flow
= dp_netdev_flow_cast(rules
[i
]);
2921 emc_insert(flow_cache
, &keys
[i
], flow
);
2922 dp_netdev_queue_batches(packet
, flow
, &keys
[i
].mf
, batches
,
2923 &n_batches
, ARRAY_SIZE(batches
));
2926 for (i
= 0; i
< n_batches
; i
++) {
2927 packet_batch_execute(&batches
[i
], pmd
);
2932 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
2933 struct dpif_packet
**packets
, int cnt
)
2935 #if !defined(__CHECKER__) && !defined(_WIN32)
2936 const size_t PKT_ARRAY_SIZE
= cnt
;
2938 /* Sparse or MSVC doesn't like variable length array. */
2939 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_RX_BATCH
};
2941 struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
2944 newcnt
= emc_processing(pmd
, packets
, cnt
, keys
);
2945 if (OVS_UNLIKELY(newcnt
)) {
2946 fast_path_processing(pmd
, packets
, newcnt
, keys
);
2950 struct dp_netdev_execute_aux
{
2951 struct dp_netdev_pmd_thread
*pmd
;
2955 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
2958 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2959 dp
->upcall_aux
= aux
;
2964 dp_netdev_drop_packets(struct dpif_packet
** packets
, int cnt
, bool may_steal
)
2969 for (i
= 0; i
< cnt
; i
++) {
2970 dpif_packet_delete(packets
[i
]);
2976 push_tnl_action(const struct dp_netdev
*dp
,
2977 const struct nlattr
*attr
,
2978 struct dpif_packet
**packets
, int cnt
)
2980 struct dp_netdev_port
*tun_port
;
2981 const struct ovs_action_push_tnl
*data
;
2983 data
= nl_attr_get(attr
);
2985 tun_port
= dp_netdev_lookup_port(dp
, u32_to_odp(data
->tnl_port
));
2989 netdev_push_header(tun_port
->netdev
, packets
, cnt
, data
);
2995 dp_netdev_clone_pkt_batch(struct dpif_packet
**tnl_pkt
,
2996 struct dpif_packet
**packets
, int cnt
)
3000 for (i
= 0; i
< cnt
; i
++) {
3001 tnl_pkt
[i
] = dpif_packet_clone(packets
[i
]);
3006 dp_execute_cb(void *aux_
, struct dpif_packet
**packets
, int cnt
,
3007 const struct nlattr
*a
, bool may_steal
)
3008 OVS_NO_THREAD_SAFETY_ANALYSIS
3010 struct dp_netdev_execute_aux
*aux
= aux_
;
3011 uint32_t *depth
= recirc_depth_get();
3012 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
3013 struct dp_netdev
*dp
= pmd
->dp
;
3014 int type
= nl_attr_type(a
);
3015 struct dp_netdev_port
*p
;
3018 switch ((enum ovs_action_attr
)type
) {
3019 case OVS_ACTION_ATTR_OUTPUT
:
3020 p
= dp_netdev_lookup_port(dp
, u32_to_odp(nl_attr_get_u32(a
)));
3021 if (OVS_LIKELY(p
)) {
3022 netdev_send(p
->netdev
, pmd
->core_id
, packets
, cnt
, may_steal
);
3027 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
3028 if (*depth
< MAX_RECIRC_DEPTH
) {
3029 struct dpif_packet
*tnl_pkt
[NETDEV_MAX_RX_BATCH
];
3033 dp_netdev_clone_pkt_batch(tnl_pkt
, packets
, cnt
);
3037 err
= push_tnl_action(dp
, a
, packets
, cnt
);
3040 dp_netdev_input(pmd
, packets
, cnt
);
3043 dp_netdev_drop_packets(tnl_pkt
, cnt
, !may_steal
);
3049 case OVS_ACTION_ATTR_TUNNEL_POP
:
3050 if (*depth
< MAX_RECIRC_DEPTH
) {
3051 odp_port_t portno
= u32_to_odp(nl_attr_get_u32(a
));
3053 p
= dp_netdev_lookup_port(dp
, portno
);
3055 struct dpif_packet
*tnl_pkt
[NETDEV_MAX_RX_BATCH
];
3059 dp_netdev_clone_pkt_batch(tnl_pkt
, packets
, cnt
);
3063 err
= netdev_pop_header(p
->netdev
, packets
, cnt
);
3066 for (i
= 0; i
< cnt
; i
++) {
3067 packets
[i
]->md
.in_port
.odp_port
= portno
;
3071 dp_netdev_input(pmd
, packets
, cnt
);
3074 dp_netdev_drop_packets(tnl_pkt
, cnt
, !may_steal
);
3081 case OVS_ACTION_ATTR_USERSPACE
:
3082 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
3083 const struct nlattr
*userdata
;
3084 struct ofpbuf actions
;
3087 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
3088 ofpbuf_init(&actions
, 0);
3090 for (i
= 0; i
< cnt
; i
++) {
3093 ofpbuf_clear(&actions
);
3095 flow_extract(&packets
[i
]->ofpbuf
, &packets
[i
]->md
, &flow
);
3096 error
= dp_netdev_upcall(dp
, packets
[i
], &flow
, NULL
,
3097 DPIF_UC_ACTION
, userdata
, &actions
,
3099 if (!error
|| error
== ENOSPC
) {
3100 dp_netdev_execute_actions(pmd
, &packets
[i
], 1, may_steal
,
3101 ofpbuf_data(&actions
),
3102 ofpbuf_size(&actions
));
3103 } else if (may_steal
) {
3104 dpif_packet_delete(packets
[i
]);
3107 ofpbuf_uninit(&actions
);
3108 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3114 case OVS_ACTION_ATTR_HASH
: {
3115 const struct ovs_action_hash
*hash_act
;
3118 hash_act
= nl_attr_get(a
);
3120 for (i
= 0; i
< cnt
; i
++) {
3122 if (hash_act
->hash_alg
== OVS_HASH_ALG_L4
) {
3123 /* Hash need not be symmetric, nor does it need to include
3125 hash
= hash_2words(dpif_packet_get_dp_hash(packets
[i
]),
3126 hash_act
->hash_basis
);
3128 VLOG_WARN("Unknown hash algorithm specified "
3129 "for the hash action.");
3134 hash
= 1; /* 0 is not valid */
3137 dpif_packet_set_dp_hash(packets
[i
], hash
);
3142 case OVS_ACTION_ATTR_RECIRC
:
3143 if (*depth
< MAX_RECIRC_DEPTH
) {
3146 for (i
= 0; i
< cnt
; i
++) {
3147 struct dpif_packet
*recirc_pkt
;
3149 recirc_pkt
= (may_steal
) ? packets
[i
]
3150 : dpif_packet_clone(packets
[i
]);
3152 recirc_pkt
->md
.recirc_id
= nl_attr_get_u32(a
);
3154 /* Hash is private to each packet */
3155 recirc_pkt
->md
.dp_hash
= dpif_packet_get_dp_hash(packets
[i
]);
3157 dp_netdev_input(pmd
, &recirc_pkt
, 1);
3164 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
3167 case OVS_ACTION_ATTR_PUSH_VLAN
:
3168 case OVS_ACTION_ATTR_POP_VLAN
:
3169 case OVS_ACTION_ATTR_PUSH_MPLS
:
3170 case OVS_ACTION_ATTR_POP_MPLS
:
3171 case OVS_ACTION_ATTR_SET
:
3172 case OVS_ACTION_ATTR_SET_MASKED
:
3173 case OVS_ACTION_ATTR_SAMPLE
:
3174 case OVS_ACTION_ATTR_UNSPEC
:
3175 case __OVS_ACTION_ATTR_MAX
:
3179 dp_netdev_drop_packets(packets
, cnt
, may_steal
);
3183 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
3184 struct dpif_packet
**packets
, int cnt
,
3186 const struct nlattr
*actions
, size_t actions_len
)
3188 struct dp_netdev_execute_aux aux
= { pmd
};
3190 odp_execute_actions(&aux
, packets
, cnt
, may_steal
, actions
,
3191 actions_len
, dp_execute_cb
);
3194 const struct dpif_class dpif_netdev_class
= {
3196 dpif_netdev_enumerate
,
3197 dpif_netdev_port_open_type
,
3200 dpif_netdev_destroy
,
3203 dpif_netdev_get_stats
,
3204 dpif_netdev_port_add
,
3205 dpif_netdev_port_del
,
3206 dpif_netdev_port_query_by_number
,
3207 dpif_netdev_port_query_by_name
,
3208 NULL
, /* port_get_pid */
3209 dpif_netdev_port_dump_start
,
3210 dpif_netdev_port_dump_next
,
3211 dpif_netdev_port_dump_done
,
3212 dpif_netdev_port_poll
,
3213 dpif_netdev_port_poll_wait
,
3214 dpif_netdev_flow_flush
,
3215 dpif_netdev_flow_dump_create
,
3216 dpif_netdev_flow_dump_destroy
,
3217 dpif_netdev_flow_dump_thread_create
,
3218 dpif_netdev_flow_dump_thread_destroy
,
3219 dpif_netdev_flow_dump_next
,
3220 dpif_netdev_operate
,
3221 NULL
, /* recv_set */
3222 NULL
, /* handlers_set */
3223 dpif_netdev_pmd_set
,
3224 dpif_netdev_queue_to_priority
,
3226 NULL
, /* recv_wait */
3227 NULL
, /* recv_purge */
3228 dpif_netdev_register_upcall_cb
,
3229 dpif_netdev_enable_upcall
,
3230 dpif_netdev_disable_upcall
,
3231 dpif_netdev_get_datapath_version
,
3235 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3236 const char *argv
[], void *aux OVS_UNUSED
)
3238 struct dp_netdev_port
*old_port
;
3239 struct dp_netdev_port
*new_port
;
3240 struct dp_netdev
*dp
;
3243 ovs_mutex_lock(&dp_netdev_mutex
);
3244 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
3245 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
3246 ovs_mutex_unlock(&dp_netdev_mutex
);
3247 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
3250 ovs_refcount_ref(&dp
->ref_cnt
);
3251 ovs_mutex_unlock(&dp_netdev_mutex
);
3253 ovs_mutex_lock(&dp
->port_mutex
);
3254 if (get_port_by_name(dp
, argv
[2], &old_port
)) {
3255 unixctl_command_reply_error(conn
, "unknown port");
3259 port_no
= u32_to_odp(atoi(argv
[3]));
3260 if (!port_no
|| port_no
== ODPP_NONE
) {
3261 unixctl_command_reply_error(conn
, "bad port number");
3264 if (dp_netdev_lookup_port(dp
, port_no
)) {
3265 unixctl_command_reply_error(conn
, "port number already in use");
3269 /* Remove old port. */
3270 cmap_remove(&dp
->ports
, &old_port
->node
, hash_port_no(old_port
->port_no
));
3271 ovsrcu_postpone(free
, old_port
);
3273 /* Insert new port (cmap semantics mean we cannot re-insert 'old_port'). */
3274 new_port
= xmemdup(old_port
, sizeof *old_port
);
3275 new_port
->port_no
= port_no
;
3276 cmap_insert(&dp
->ports
, &new_port
->node
, hash_port_no(port_no
));
3278 seq_change(dp
->port_seq
);
3279 unixctl_command_reply(conn
, NULL
);
3282 ovs_mutex_unlock(&dp
->port_mutex
);
3283 dp_netdev_unref(dp
);
3287 dpif_dummy_delete_port(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3288 const char *argv
[], void *aux OVS_UNUSED
)
3290 struct dp_netdev_port
*port
;
3291 struct dp_netdev
*dp
;
3293 ovs_mutex_lock(&dp_netdev_mutex
);
3294 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
3295 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
3296 ovs_mutex_unlock(&dp_netdev_mutex
);
3297 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
3300 ovs_refcount_ref(&dp
->ref_cnt
);
3301 ovs_mutex_unlock(&dp_netdev_mutex
);
3303 ovs_mutex_lock(&dp
->port_mutex
);
3304 if (get_port_by_name(dp
, argv
[2], &port
)) {
3305 unixctl_command_reply_error(conn
, "unknown port");
3306 } else if (port
->port_no
== ODPP_LOCAL
) {
3307 unixctl_command_reply_error(conn
, "can't delete local port");
3309 do_del_port(dp
, port
);
3310 unixctl_command_reply(conn
, NULL
);
3312 ovs_mutex_unlock(&dp
->port_mutex
);
3314 dp_netdev_unref(dp
);
3318 dpif_dummy_register__(const char *type
)
3320 struct dpif_class
*class;
3322 class = xmalloc(sizeof *class);
3323 *class = dpif_netdev_class
;
3324 class->type
= xstrdup(type
);
3325 dp_register_provider(class);
3329 dpif_dummy_register(bool override
)
3336 dp_enumerate_types(&types
);
3337 SSET_FOR_EACH (type
, &types
) {
3338 if (!dp_unregister_provider(type
)) {
3339 dpif_dummy_register__(type
);
3342 sset_destroy(&types
);
3345 dpif_dummy_register__("dummy");
3347 unixctl_command_register("dpif-dummy/change-port-number",
3348 "dp port new-number",
3349 3, 3, dpif_dummy_change_port_number
, NULL
);
3350 unixctl_command_register("dpif-dummy/delete-port", "dp port",
3351 2, 2, dpif_dummy_delete_port
, NULL
);
3354 /* Datapath Classifier. */
3356 /* A set of rules that all have the same fields wildcarded. */
3357 struct dpcls_subtable
{
3358 /* The fields are only used by writers. */
3359 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
3361 /* These fields are accessed by readers. */
3362 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
3363 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
3364 /* 'mask' must be the last field, additional space is allocated here. */
3367 /* Initializes 'cls' as a classifier that initially contains no classification
3370 dpcls_init(struct dpcls
*cls
)
3372 cmap_init(&cls
->subtables_map
);
3373 pvector_init(&cls
->subtables
);
3377 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
3379 pvector_remove(&cls
->subtables
, subtable
);
3380 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
3381 subtable
->mask
.hash
);
3382 cmap_destroy(&subtable
->rules
);
3383 ovsrcu_postpone(free
, subtable
);
3386 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
3387 * caller's responsibility.
3388 * May only be called after all the readers have been terminated. */
3390 dpcls_destroy(struct dpcls
*cls
)
3393 struct dpcls_subtable
*subtable
;
3395 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
3396 dpcls_destroy_subtable(cls
, subtable
);
3398 cmap_destroy(&cls
->subtables_map
);
3399 pvector_destroy(&cls
->subtables
);
3403 static struct dpcls_subtable
*
3404 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
3406 struct dpcls_subtable
*subtable
;
3408 /* Need to add one. */
3409 subtable
= xmalloc(sizeof *subtable
3410 - sizeof subtable
->mask
.mf
+ mask
->len
);
3411 cmap_init(&subtable
->rules
);
3412 netdev_flow_key_clone(&subtable
->mask
, mask
);
3413 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
3414 pvector_insert(&cls
->subtables
, subtable
, 0);
3415 pvector_publish(&cls
->subtables
);
3420 static inline struct dpcls_subtable
*
3421 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
3423 struct dpcls_subtable
*subtable
;
3425 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
3426 &cls
->subtables_map
) {
3427 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
3431 return dpcls_create_subtable(cls
, mask
);
3434 /* Insert 'rule' into 'cls'. */
3436 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
3437 const struct netdev_flow_key
*mask
)
3439 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
3441 rule
->mask
= &subtable
->mask
;
3442 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
3445 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
3447 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
3449 struct dpcls_subtable
*subtable
;
3451 ovs_assert(rule
->mask
);
3453 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
3455 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
3457 dpcls_destroy_subtable(cls
, subtable
);
3458 pvector_publish(&cls
->subtables
);
3462 /* Returns true if 'target' satisifies 'key' in 'mask', that is, if each 1-bit
3463 * in 'mask' the values in 'key' and 'target' are the same.
3465 * Note: 'key' and 'mask' have the same mask, and 'key' is already masked. */
3467 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
3468 const struct netdev_flow_key
*target
)
3470 const uint32_t *keyp
= rule
->flow
.mf
.inline_values
;
3471 const uint32_t *maskp
= rule
->mask
->mf
.inline_values
;
3472 uint32_t target_u32
;
3474 NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u32
, target
, rule
->flow
.mf
.map
) {
3475 if (OVS_UNLIKELY((target_u32
& *maskp
++) != *keyp
++)) {
3482 /* For each miniflow in 'flows' performs a classifier lookup writing the result
3483 * into the corresponding slot in 'rules'. If a particular entry in 'flows' is
3484 * NULL it is skipped.
3486 * This function is optimized for use in the userspace datapath and therefore
3487 * does not implement a lot of features available in the standard
3488 * classifier_lookup() function. Specifically, it does not implement
3489 * priorities, instead returning any rule which matches the flow.
3491 * Returns true if all flows found a corresponding rule. */
3493 dpcls_lookup(const struct dpcls
*cls
, const struct netdev_flow_key keys
[],
3494 struct dpcls_rule
**rules
, const size_t cnt
)
3496 /* The batch size 16 was experimentally found faster than 8 or 32. */
3497 typedef uint16_t map_type
;
3498 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
3500 #if !defined(__CHECKER__) && !defined(_WIN32)
3501 const int N_MAPS
= DIV_ROUND_UP(cnt
, MAP_BITS
);
3503 enum { N_MAPS
= DIV_ROUND_UP(NETDEV_MAX_RX_BATCH
, MAP_BITS
) };
3505 map_type maps
[N_MAPS
];
3506 struct dpcls_subtable
*subtable
;
3508 memset(maps
, 0xff, sizeof maps
);
3509 if (cnt
% MAP_BITS
) {
3510 maps
[N_MAPS
- 1] >>= MAP_BITS
- cnt
% MAP_BITS
; /* Clear extra bits. */
3512 memset(rules
, 0, cnt
* sizeof *rules
);
3514 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
3515 const struct netdev_flow_key
*mkeys
= keys
;
3516 struct dpcls_rule
**mrules
= rules
;
3517 map_type remains
= 0;
3520 BUILD_ASSERT_DECL(sizeof remains
== sizeof *maps
);
3522 for (m
= 0; m
< N_MAPS
; m
++, mkeys
+= MAP_BITS
, mrules
+= MAP_BITS
) {
3523 uint32_t hashes
[MAP_BITS
];
3524 const struct cmap_node
*nodes
[MAP_BITS
];
3525 unsigned long map
= maps
[m
];
3529 continue; /* Skip empty maps. */
3532 /* Compute hashes for the remaining keys. */
3533 ULONG_FOR_EACH_1(i
, map
) {
3534 hashes
[i
] = netdev_flow_key_hash_in_mask(&mkeys
[i
],
3538 map
= cmap_find_batch(&subtable
->rules
, map
, hashes
, nodes
);
3539 /* Check results. */
3540 ULONG_FOR_EACH_1(i
, map
) {
3541 struct dpcls_rule
*rule
;
3543 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
3544 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, &mkeys
[i
]))) {
3549 ULONG_SET0(map
, i
); /* Did not match. */
3551 ; /* Keep Sparse happy. */
3553 maps
[m
] &= ~map
; /* Clear the found rules. */
3557 return true; /* All found. */
3560 return false; /* Some misses. */