2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
24 #include <netinet/in.h>
25 #include <sys/socket.h>
30 #include <sys/ioctl.h>
36 #include "dp-packet.h"
38 #include "dpif-provider.h"
40 #include "dynamic-string.h"
41 #include "fat-rwlock.h"
47 #include "meta-flow.h"
49 #include "netdev-dpdk.h"
50 #include "netdev-vport.h"
52 #include "odp-execute.h"
54 #include "ofp-print.h"
59 #include "poll-loop.h"
66 #include "tnl-arp-cache.h"
69 #include "openvswitch/vlog.h"
71 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
73 #define FLOW_DUMP_MAX_BATCH 50
74 /* Use per thread recirc_depth to prevent recirculation loop. */
75 #define MAX_RECIRC_DEPTH 5
76 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
78 /* Configuration parameters. */
79 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
81 /* Protects against changes to 'dp_netdevs'. */
82 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
84 /* Contains all 'struct dp_netdev's. */
85 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
86 = SHASH_INITIALIZER(&dp_netdevs
);
88 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
90 /* Stores a miniflow with inline values */
92 struct netdev_flow_key
{
93 uint32_t hash
; /* Hash function differs for different users. */
94 uint32_t len
; /* Length of the following miniflow (incl. map). */
96 uint64_t buf
[FLOW_MAX_PACKET_U64S
- MINI_N_INLINE
];
99 /* Exact match cache for frequently used flows
101 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
102 * search its entries for a miniflow that matches exactly the miniflow of the
103 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
105 * A cache entry holds a reference to its 'dp_netdev_flow'.
107 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
108 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
109 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
110 * value is the index of a cache entry where the miniflow could be.
116 * Each pmd_thread has its own private exact match cache.
117 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
120 #define EM_FLOW_HASH_SHIFT 10
121 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
122 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
123 #define EM_FLOW_HASH_SEGS 2
126 struct dp_netdev_flow
*flow
;
127 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
131 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
132 int sweep_idx
; /* For emc_cache_slow_sweep(). */
135 /* Iterate in the exact match cache through every entry that might contain a
136 * miniflow with hash 'HASH'. */
137 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
138 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
139 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
140 i__ < EM_FLOW_HASH_SEGS; \
141 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
143 /* Simple non-wildcarding single-priority classifier. */
146 struct cmap subtables_map
;
147 struct pvector subtables
;
150 /* A rule to be inserted to the classifier. */
152 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
153 struct netdev_flow_key
*mask
; /* Subtable's mask. */
154 struct netdev_flow_key flow
; /* Matching key. */
155 /* 'flow' must be the last field, additional space is allocated here. */
158 static void dpcls_init(struct dpcls
*);
159 static void dpcls_destroy(struct dpcls
*);
160 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
161 const struct netdev_flow_key
*mask
);
162 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
163 static bool dpcls_lookup(const struct dpcls
*cls
,
164 const struct netdev_flow_key keys
[],
165 struct dpcls_rule
**rules
, size_t cnt
);
167 /* Datapath based on the network device interface from netdev.h.
173 * Some members, marked 'const', are immutable. Accessing other members
174 * requires synchronization, as noted in more detail below.
176 * Acquisition order is, from outermost to innermost:
178 * dp_netdev_mutex (global)
182 const struct dpif_class
*const class;
183 const char *const name
;
185 struct ovs_refcount ref_cnt
;
186 atomic_flag destroyed
;
190 * Protected by RCU. Take the mutex to add or remove ports. */
191 struct ovs_mutex port_mutex
;
193 struct seq
*port_seq
; /* Incremented whenever a port changes. */
195 /* Protects access to ofproto-dpif-upcall interface during revalidator
196 * thread synchronization. */
197 struct fat_rwlock upcall_rwlock
;
198 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
201 /* Stores all 'struct dp_netdev_pmd_thread's. */
202 struct cmap poll_threads
;
204 /* Protects the access of the 'struct dp_netdev_pmd_thread'
205 * instance for non-pmd thread. */
206 struct ovs_mutex non_pmd_mutex
;
208 /* Each pmd thread will store its pointer to
209 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
210 ovsthread_key_t per_pmd_key
;
212 /* Number of rx queues for each dpdk interface and the cpu mask
213 * for pin of pmd threads. */
216 uint64_t last_tnl_conf_seq
;
219 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
223 DP_STAT_HIT
, /* Packets that matched in the flow table. */
224 DP_STAT_MISS
, /* Packets that did not match. */
225 DP_STAT_LOST
, /* Packets not passed up to the client. */
229 /* A port in a netdev-based datapath. */
230 struct dp_netdev_port
{
231 struct cmap_node node
; /* Node in dp_netdev's 'ports'. */
233 struct netdev
*netdev
;
234 struct netdev_saved_flags
*sf
;
235 struct netdev_rxq
**rxq
;
236 struct ovs_refcount ref_cnt
;
237 char *type
; /* Port type as requested by user. */
240 /* Contained by struct dp_netdev_flow's 'stats' member. */
241 struct dp_netdev_flow_stats
{
242 atomic_llong used
; /* Last used time, in monotonic msecs. */
243 atomic_ullong packet_count
; /* Number of packets matched. */
244 atomic_ullong byte_count
; /* Number of bytes matched. */
245 atomic_uint16_t tcp_flags
; /* Bitwise-OR of seen tcp_flags values. */
248 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
254 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
255 * its pmd thread's classifier. The text below calls this classifier 'cls'.
260 * The thread safety rules described here for "struct dp_netdev_flow" are
261 * motivated by two goals:
263 * - Prevent threads that read members of "struct dp_netdev_flow" from
264 * reading bad data due to changes by some thread concurrently modifying
267 * - Prevent two threads making changes to members of a given "struct
268 * dp_netdev_flow" from interfering with each other.
274 * A flow 'flow' may be accessed without a risk of being freed during an RCU
275 * grace period. Code that needs to hold onto a flow for a while
276 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
278 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
279 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
282 * Some members, marked 'const', are immutable. Accessing other members
283 * requires synchronization, as noted in more detail below.
285 struct dp_netdev_flow
{
288 /* Hash table index by unmasked flow. */
289 const struct cmap_node node
; /* In owning dp_netdev_pmd_thread's */
291 const ovs_u128 ufid
; /* Unique flow identifier. */
292 const struct flow flow
; /* Unmasked flow that created this entry. */
293 const int pmd_id
; /* The 'core_id' of pmd thread owning this */
296 /* Number of references.
297 * The classifier owns one reference.
298 * Any thread trying to keep a rule from being freed should hold its own
300 struct ovs_refcount ref_cnt
;
303 struct dp_netdev_flow_stats stats
;
306 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
308 /* Packet classification. */
309 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
310 /* 'cr' must be the last member. */
313 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
314 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
315 static int dpif_netdev_flow_from_nlattrs(const struct nlattr
*, uint32_t,
318 /* A set of datapath actions within a "struct dp_netdev_flow".
324 * A struct dp_netdev_actions 'actions' is protected with RCU. */
325 struct dp_netdev_actions
{
326 /* These members are immutable: they do not change during the struct's
328 struct nlattr
*actions
; /* Sequence of OVS_ACTION_ATTR_* attributes. */
329 unsigned int size
; /* Size of 'actions', in bytes. */
332 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
334 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
335 const struct dp_netdev_flow
*);
336 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
338 /* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
339 struct dp_netdev_pmd_stats
{
340 /* Indexed by DP_STAT_*. */
341 atomic_ullong n
[DP_N_STATS
];
344 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
345 * the performance overhead of interrupt processing. Therefore netdev can
346 * not implement rx-wait for these devices. dpif-netdev needs to poll
347 * these device to check for recv buffer. pmd-thread does polling for
348 * devices assigned to itself.
350 * DPDK used PMD for accessing NIC.
352 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
353 * I/O of all non-pmd threads. There will be no actual thread created
356 * Each struct has its own flow table and classifier. Packets received
357 * from managed ports are looked up in the corresponding pmd thread's
358 * flow table, and are executed with the found actions.
360 struct dp_netdev_pmd_thread
{
361 struct dp_netdev
*dp
;
362 struct ovs_refcount ref_cnt
; /* Every reference must be refcount'ed. */
363 struct cmap_node node
; /* In 'dp->poll_threads'. */
365 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
366 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
368 /* Per thread exact-match cache. Note, the instance for cpu core
369 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
370 * need to be protected (e.g. by 'dp_netdev_mutex'). All other
371 * instances will only be accessed by its own pmd thread. */
372 struct emc_cache flow_cache
;
374 /* Classifier and Flow-Table.
376 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
377 * changes to 'cls' must be made while still holding the 'flow_mutex'.
379 struct ovs_mutex flow_mutex
;
381 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
384 struct dp_netdev_pmd_stats stats
;
386 struct latch exit_latch
; /* For terminating the pmd thread. */
387 atomic_uint change_seq
; /* For reloading pmd ports. */
389 int index
; /* Idx of this pmd thread among pmd*/
390 /* threads on same numa node. */
391 int core_id
; /* CPU core id of this pmd thread. */
392 int numa_id
; /* numa node id of this pmd thread. */
395 #define PMD_INITIAL_SEQ 1
397 /* Interface to netdev-based datapath. */
400 struct dp_netdev
*dp
;
401 uint64_t last_port_seq
;
404 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
405 struct dp_netdev_port
**portp
);
406 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
407 struct dp_netdev_port
**portp
);
408 static void dp_netdev_free(struct dp_netdev
*)
409 OVS_REQUIRES(dp_netdev_mutex
);
410 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
411 const char *type
, odp_port_t port_no
)
412 OVS_REQUIRES(dp
->port_mutex
);
413 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
414 OVS_REQUIRES(dp
->port_mutex
);
415 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
416 bool create
, struct dpif
**);
417 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
418 struct dp_packet
**, int c
,
420 const struct nlattr
*actions
,
422 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
423 struct dp_packet
**, int cnt
);
425 static void dp_netdev_disable_upcall(struct dp_netdev
*);
426 void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
427 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
428 struct dp_netdev
*dp
, int index
,
429 int core_id
, int numa_id
);
430 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
);
431 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
);
432 static struct dp_netdev_pmd_thread
*dp_netdev_get_pmd(struct dp_netdev
*dp
,
434 static struct dp_netdev_pmd_thread
*
435 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
);
436 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
);
437 static void dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
438 static void dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
439 static void dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
);
440 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
);
441 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
);
442 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
);
444 static inline bool emc_entry_alive(struct emc_entry
*ce
);
445 static void emc_clear_entry(struct emc_entry
*ce
);
448 emc_cache_init(struct emc_cache
*flow_cache
)
452 BUILD_ASSERT(offsetof(struct miniflow
, inline_values
) == sizeof(uint64_t));
454 flow_cache
->sweep_idx
= 0;
455 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
456 flow_cache
->entries
[i
].flow
= NULL
;
457 flow_cache
->entries
[i
].key
.hash
= 0;
458 flow_cache
->entries
[i
].key
.len
459 = offsetof(struct miniflow
, inline_values
);
460 miniflow_initialize(&flow_cache
->entries
[i
].key
.mf
,
461 flow_cache
->entries
[i
].key
.buf
);
466 emc_cache_uninit(struct emc_cache
*flow_cache
)
470 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
471 emc_clear_entry(&flow_cache
->entries
[i
]);
475 /* Check and clear dead flow references slowly (one entry at each
478 emc_cache_slow_sweep(struct emc_cache
*flow_cache
)
480 struct emc_entry
*entry
= &flow_cache
->entries
[flow_cache
->sweep_idx
];
482 if (!emc_entry_alive(entry
)) {
483 emc_clear_entry(entry
);
485 flow_cache
->sweep_idx
= (flow_cache
->sweep_idx
+ 1) & EM_FLOW_HASH_MASK
;
488 static struct dpif_netdev
*
489 dpif_netdev_cast(const struct dpif
*dpif
)
491 ovs_assert(dpif
->dpif_class
->open
== dpif_netdev_open
);
492 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
495 static struct dp_netdev
*
496 get_dp_netdev(const struct dpif
*dpif
)
498 return dpif_netdev_cast(dpif
)->dp
;
502 dpif_netdev_enumerate(struct sset
*all_dps
,
503 const struct dpif_class
*dpif_class
)
505 struct shash_node
*node
;
507 ovs_mutex_lock(&dp_netdev_mutex
);
508 SHASH_FOR_EACH(node
, &dp_netdevs
) {
509 struct dp_netdev
*dp
= node
->data
;
510 if (dpif_class
!= dp
->class) {
511 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
512 * If the class doesn't match, skip this dpif. */
515 sset_add(all_dps
, node
->name
);
517 ovs_mutex_unlock(&dp_netdev_mutex
);
523 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
525 return class != &dpif_netdev_class
;
529 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
531 return strcmp(type
, "internal") ? type
532 : dpif_netdev_class_is_dummy(class) ? "dummy"
537 create_dpif_netdev(struct dp_netdev
*dp
)
539 uint16_t netflow_id
= hash_string(dp
->name
, 0);
540 struct dpif_netdev
*dpif
;
542 ovs_refcount_ref(&dp
->ref_cnt
);
544 dpif
= xmalloc(sizeof *dpif
);
545 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
547 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
552 /* Choose an unused, non-zero port number and return it on success.
553 * Return ODPP_NONE on failure. */
555 choose_port(struct dp_netdev
*dp
, const char *name
)
556 OVS_REQUIRES(dp
->port_mutex
)
560 if (dp
->class != &dpif_netdev_class
) {
564 /* If the port name begins with "br", start the number search at
565 * 100 to make writing tests easier. */
566 if (!strncmp(name
, "br", 2)) {
570 /* If the port name contains a number, try to assign that port number.
571 * This can make writing unit tests easier because port numbers are
573 for (p
= name
; *p
!= '\0'; p
++) {
574 if (isdigit((unsigned char) *p
)) {
575 port_no
= start_no
+ strtol(p
, NULL
, 10);
576 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
577 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
578 return u32_to_odp(port_no
);
585 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
586 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
587 return u32_to_odp(port_no
);
595 create_dp_netdev(const char *name
, const struct dpif_class
*class,
596 struct dp_netdev
**dpp
)
597 OVS_REQUIRES(dp_netdev_mutex
)
599 struct dp_netdev
*dp
;
602 dp
= xzalloc(sizeof *dp
);
603 shash_add(&dp_netdevs
, name
, dp
);
605 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
606 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
607 ovs_refcount_init(&dp
->ref_cnt
);
608 atomic_flag_clear(&dp
->destroyed
);
610 ovs_mutex_init(&dp
->port_mutex
);
611 cmap_init(&dp
->ports
);
612 dp
->port_seq
= seq_create();
613 fat_rwlock_init(&dp
->upcall_rwlock
);
615 /* Disable upcalls by default. */
616 dp_netdev_disable_upcall(dp
);
617 dp
->upcall_aux
= NULL
;
618 dp
->upcall_cb
= NULL
;
620 cmap_init(&dp
->poll_threads
);
621 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
622 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
624 /* Reserves the core NON_PMD_CORE_ID for all non-pmd threads. */
625 ovs_numa_try_pin_core_specific(NON_PMD_CORE_ID
);
626 dp_netdev_set_nonpmd(dp
);
627 dp
->n_dpdk_rxqs
= NR_QUEUE
;
629 ovs_mutex_lock(&dp
->port_mutex
);
630 error
= do_add_port(dp
, name
, "internal", ODPP_LOCAL
);
631 ovs_mutex_unlock(&dp
->port_mutex
);
637 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
643 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
644 bool create
, struct dpif
**dpifp
)
646 struct dp_netdev
*dp
;
649 ovs_mutex_lock(&dp_netdev_mutex
);
650 dp
= shash_find_data(&dp_netdevs
, name
);
652 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
654 error
= (dp
->class != class ? EINVAL
659 *dpifp
= create_dpif_netdev(dp
);
662 ovs_mutex_unlock(&dp_netdev_mutex
);
668 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
669 OVS_NO_THREAD_SAFETY_ANALYSIS
671 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
672 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
674 /* Before freeing a lock we should release it */
675 fat_rwlock_unlock(&dp
->upcall_rwlock
);
676 fat_rwlock_destroy(&dp
->upcall_rwlock
);
679 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
680 * through the 'dp_netdevs' shash while freeing 'dp'. */
682 dp_netdev_free(struct dp_netdev
*dp
)
683 OVS_REQUIRES(dp_netdev_mutex
)
685 struct dp_netdev_port
*port
;
687 shash_find_and_delete(&dp_netdevs
, dp
->name
);
689 dp_netdev_destroy_all_pmds(dp
);
690 cmap_destroy(&dp
->poll_threads
);
691 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
692 ovsthread_key_delete(dp
->per_pmd_key
);
694 ovs_mutex_lock(&dp
->port_mutex
);
695 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
696 do_del_port(dp
, port
);
698 ovs_mutex_unlock(&dp
->port_mutex
);
700 seq_destroy(dp
->port_seq
);
701 cmap_destroy(&dp
->ports
);
703 /* Upcalls must be disabled at this point */
704 dp_netdev_destroy_upcall_lock(dp
);
707 free(CONST_CAST(char *, dp
->name
));
712 dp_netdev_unref(struct dp_netdev
*dp
)
715 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
716 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
717 ovs_mutex_lock(&dp_netdev_mutex
);
718 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
721 ovs_mutex_unlock(&dp_netdev_mutex
);
726 dpif_netdev_close(struct dpif
*dpif
)
728 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
735 dpif_netdev_destroy(struct dpif
*dpif
)
737 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
739 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
740 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
741 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
749 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
750 * load/store semantics. While the increment is not atomic, the load and
751 * store operations are, making it impossible to read inconsistent values.
753 * This is used to update thread local stats counters. */
755 non_atomic_ullong_add(atomic_ullong
*var
, unsigned long long n
)
757 unsigned long long tmp
;
759 atomic_read_relaxed(var
, &tmp
);
761 atomic_store_relaxed(var
, tmp
);
765 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
767 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
768 struct dp_netdev_pmd_thread
*pmd
;
770 stats
->n_flows
= stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
771 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
772 unsigned long long n
;
773 stats
->n_flows
+= cmap_count(&pmd
->flow_table
);
775 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_HIT
], &n
);
777 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MISS
], &n
);
778 stats
->n_missed
+= n
;
779 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_LOST
], &n
);
782 stats
->n_masks
= UINT32_MAX
;
783 stats
->n_mask_hit
= UINT64_MAX
;
789 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
793 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
797 ovs_mutex_lock(&pmd
->cond_mutex
);
798 atomic_add_relaxed(&pmd
->change_seq
, 1, &old_seq
);
799 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
800 ovs_mutex_unlock(&pmd
->cond_mutex
);
803 /* Causes all pmd threads to reload its tx/rx devices.
804 * Must be called after adding/removing ports. */
806 dp_netdev_reload_pmds(struct dp_netdev
*dp
)
808 struct dp_netdev_pmd_thread
*pmd
;
810 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
811 dp_netdev_reload_pmd__(pmd
);
816 hash_port_no(odp_port_t port_no
)
818 return hash_int(odp_to_u32(port_no
), 0);
822 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
824 OVS_REQUIRES(dp
->port_mutex
)
826 struct netdev_saved_flags
*sf
;
827 struct dp_netdev_port
*port
;
828 struct netdev
*netdev
;
829 enum netdev_flags flags
;
830 const char *open_type
;
834 /* XXX reject devices already in some dp_netdev. */
836 /* Open and validate network device. */
837 open_type
= dpif_netdev_port_open_type(dp
->class, type
);
838 error
= netdev_open(devname
, open_type
, &netdev
);
842 /* XXX reject non-Ethernet devices */
844 netdev_get_flags(netdev
, &flags
);
845 if (flags
& NETDEV_LOOPBACK
) {
846 VLOG_ERR("%s: cannot add a loopback device", devname
);
847 netdev_close(netdev
);
851 if (netdev_is_pmd(netdev
)) {
852 int n_cores
= ovs_numa_get_n_cores();
854 if (n_cores
== OVS_CORE_UNSPEC
) {
855 VLOG_ERR("%s, cannot get cpu core info", devname
);
858 /* There can only be ovs_numa_get_n_cores() pmd threads,
859 * so creates a txq for each. */
860 error
= netdev_set_multiq(netdev
, n_cores
, dp
->n_dpdk_rxqs
);
861 if (error
&& (error
!= EOPNOTSUPP
)) {
862 VLOG_ERR("%s, cannot set multiq", devname
);
866 port
= xzalloc(sizeof *port
);
867 port
->port_no
= port_no
;
868 port
->netdev
= netdev
;
869 port
->rxq
= xmalloc(sizeof *port
->rxq
* netdev_n_rxq(netdev
));
870 port
->type
= xstrdup(type
);
871 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
872 error
= netdev_rxq_open(netdev
, &port
->rxq
[i
], i
);
874 && !(error
== EOPNOTSUPP
&& dpif_netdev_class_is_dummy(dp
->class))) {
875 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
876 devname
, ovs_strerror(errno
));
877 netdev_close(netdev
);
885 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
887 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
888 netdev_rxq_close(port
->rxq
[i
]);
890 netdev_close(netdev
);
898 ovs_refcount_init(&port
->ref_cnt
);
899 cmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
901 if (netdev_is_pmd(netdev
)) {
902 dp_netdev_set_pmds_on_numa(dp
, netdev_get_numa_id(netdev
));
903 dp_netdev_reload_pmds(dp
);
905 seq_change(dp
->port_seq
);
911 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
912 odp_port_t
*port_nop
)
914 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
915 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
916 const char *dpif_port
;
920 ovs_mutex_lock(&dp
->port_mutex
);
921 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
922 if (*port_nop
!= ODPP_NONE
) {
924 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
926 port_no
= choose_port(dp
, dpif_port
);
927 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
931 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
933 ovs_mutex_unlock(&dp
->port_mutex
);
939 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
941 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
944 ovs_mutex_lock(&dp
->port_mutex
);
945 if (port_no
== ODPP_LOCAL
) {
948 struct dp_netdev_port
*port
;
950 error
= get_port_by_number(dp
, port_no
, &port
);
952 do_del_port(dp
, port
);
955 ovs_mutex_unlock(&dp
->port_mutex
);
961 is_valid_port_number(odp_port_t port_no
)
963 return port_no
!= ODPP_NONE
;
966 static struct dp_netdev_port
*
967 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
969 struct dp_netdev_port
*port
;
971 CMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
972 if (port
->port_no
== port_no
) {
980 get_port_by_number(struct dp_netdev
*dp
,
981 odp_port_t port_no
, struct dp_netdev_port
**portp
)
983 if (!is_valid_port_number(port_no
)) {
987 *portp
= dp_netdev_lookup_port(dp
, port_no
);
988 return *portp
? 0 : ENOENT
;
993 port_ref(struct dp_netdev_port
*port
)
996 ovs_refcount_ref(&port
->ref_cnt
);
1001 port_try_ref(struct dp_netdev_port
*port
)
1004 return ovs_refcount_try_ref_rcu(&port
->ref_cnt
);
1011 port_unref(struct dp_netdev_port
*port
)
1013 if (port
&& ovs_refcount_unref_relaxed(&port
->ref_cnt
) == 1) {
1014 int n_rxq
= netdev_n_rxq(port
->netdev
);
1017 netdev_close(port
->netdev
);
1018 netdev_restore_flags(port
->sf
);
1020 for (i
= 0; i
< n_rxq
; i
++) {
1021 netdev_rxq_close(port
->rxq
[i
]);
1030 get_port_by_name(struct dp_netdev
*dp
,
1031 const char *devname
, struct dp_netdev_port
**portp
)
1032 OVS_REQUIRES(dp
->port_mutex
)
1034 struct dp_netdev_port
*port
;
1036 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1037 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1046 get_n_pmd_threads_on_numa(struct dp_netdev
*dp
, int numa_id
)
1048 struct dp_netdev_pmd_thread
*pmd
;
1051 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1052 if (pmd
->numa_id
== numa_id
) {
1060 /* Returns 'true' if there is a port with pmd netdev and the netdev
1061 * is on numa node 'numa_id'. */
1063 has_pmd_port_for_numa(struct dp_netdev
*dp
, int numa_id
)
1065 struct dp_netdev_port
*port
;
1067 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1068 if (netdev_is_pmd(port
->netdev
)
1069 && netdev_get_numa_id(port
->netdev
) == numa_id
) {
1079 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1080 OVS_REQUIRES(dp
->port_mutex
)
1082 cmap_remove(&dp
->ports
, &port
->node
, hash_odp_port(port
->port_no
));
1083 seq_change(dp
->port_seq
);
1084 if (netdev_is_pmd(port
->netdev
)) {
1085 int numa_id
= netdev_get_numa_id(port
->netdev
);
1087 /* If there is no netdev on the numa node, deletes the pmd threads
1088 * for that numa. Else, just reloads the queues. */
1089 if (!has_pmd_port_for_numa(dp
, numa_id
)) {
1090 dp_netdev_del_pmds_on_numa(dp
, numa_id
);
1092 dp_netdev_reload_pmds(dp
);
1099 answer_port_query(const struct dp_netdev_port
*port
,
1100 struct dpif_port
*dpif_port
)
1102 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1103 dpif_port
->type
= xstrdup(port
->type
);
1104 dpif_port
->port_no
= port
->port_no
;
1108 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1109 struct dpif_port
*dpif_port
)
1111 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1112 struct dp_netdev_port
*port
;
1115 error
= get_port_by_number(dp
, port_no
, &port
);
1116 if (!error
&& dpif_port
) {
1117 answer_port_query(port
, dpif_port
);
1124 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1125 struct dpif_port
*dpif_port
)
1127 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1128 struct dp_netdev_port
*port
;
1131 ovs_mutex_lock(&dp
->port_mutex
);
1132 error
= get_port_by_name(dp
, devname
, &port
);
1133 if (!error
&& dpif_port
) {
1134 answer_port_query(port
, dpif_port
);
1136 ovs_mutex_unlock(&dp
->port_mutex
);
1142 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1144 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1148 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1150 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1151 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1156 dp_netdev_flow_hash(const ovs_u128
*ufid
)
1158 return ufid
->u32
[0];
1162 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread
*pmd
,
1163 struct dp_netdev_flow
*flow
)
1164 OVS_REQUIRES(pmd
->flow_mutex
)
1166 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1168 dpcls_remove(&pmd
->cls
, &flow
->cr
);
1169 cmap_remove(&pmd
->flow_table
, node
, dp_netdev_flow_hash(&flow
->ufid
));
1172 dp_netdev_flow_unref(flow
);
1176 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
)
1178 struct dp_netdev_flow
*netdev_flow
;
1180 ovs_mutex_lock(&pmd
->flow_mutex
);
1181 CMAP_FOR_EACH (netdev_flow
, node
, &pmd
->flow_table
) {
1182 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
1184 ovs_mutex_unlock(&pmd
->flow_mutex
);
1188 dpif_netdev_flow_flush(struct dpif
*dpif
)
1190 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1191 struct dp_netdev_pmd_thread
*pmd
;
1193 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1194 dp_netdev_pmd_flow_flush(pmd
);
1200 struct dp_netdev_port_state
{
1201 struct cmap_position position
;
1206 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1208 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
1213 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
1214 struct dpif_port
*dpif_port
)
1216 struct dp_netdev_port_state
*state
= state_
;
1217 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1218 struct cmap_node
*node
;
1221 node
= cmap_next_position(&dp
->ports
, &state
->position
);
1223 struct dp_netdev_port
*port
;
1225 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
1228 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
1229 dpif_port
->name
= state
->name
;
1230 dpif_port
->type
= port
->type
;
1231 dpif_port
->port_no
= port
->port_no
;
1242 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
1244 struct dp_netdev_port_state
*state
= state_
;
1251 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
1253 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1254 uint64_t new_port_seq
;
1257 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
1258 if (dpif
->last_port_seq
!= new_port_seq
) {
1259 dpif
->last_port_seq
= new_port_seq
;
1269 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
1271 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1273 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
1276 static struct dp_netdev_flow
*
1277 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
1279 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
1282 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
1284 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
1287 /* netdev_flow_key utilities.
1289 * netdev_flow_key is basically a miniflow. We use these functions
1290 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1291 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1293 * - Since we are dealing exclusively with miniflows created by
1294 * miniflow_extract(), if the map is different the miniflow is different.
1295 * Therefore we can be faster by comparing the map and the miniflow in a
1297 * _ netdev_flow_key's miniflow has always inline values.
1298 * - These functions can be inlined by the compiler.
1300 * The following assertions make sure that what we're doing with miniflow is
1303 BUILD_ASSERT_DECL(offsetof(struct miniflow
, inline_values
)
1304 == sizeof(uint64_t));
1306 /* Given the number of bits set in the miniflow map, returns the size of the
1307 * 'netdev_flow_key.mf' */
1308 static inline uint32_t
1309 netdev_flow_key_size(uint32_t flow_u32s
)
1311 return offsetof(struct miniflow
, inline_values
) +
1312 MINIFLOW_VALUES_SIZE(flow_u32s
);
1316 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
1317 const struct netdev_flow_key
*b
)
1319 /* 'b->len' may be not set yet. */
1320 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
1323 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
1324 * The maps are compared bitwise, so both 'key->mf' 'mf' must have been
1325 * generated by miniflow_extract. */
1327 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
1328 const struct miniflow
*mf
)
1330 return !memcmp(&key
->mf
, mf
, key
->len
);
1334 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
1335 const struct netdev_flow_key
*src
)
1338 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
1343 netdev_flow_key_from_flow(struct netdev_flow_key
*dst
,
1344 const struct flow
*src
)
1346 struct dp_packet packet
;
1347 uint64_t buf_stub
[512 / 8];
1349 miniflow_initialize(&dst
->mf
, dst
->buf
);
1351 dp_packet_use_stub(&packet
, buf_stub
, sizeof buf_stub
);
1352 pkt_metadata_from_flow(&packet
.md
, src
);
1353 flow_compose(&packet
, src
);
1354 miniflow_extract(&packet
, &dst
->mf
);
1355 dp_packet_uninit(&packet
);
1357 dst
->len
= netdev_flow_key_size(count_1bits(dst
->mf
.map
));
1358 dst
->hash
= 0; /* Not computed yet. */
1361 /* Initialize a netdev_flow_key 'mask' from 'match'. */
1363 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
1364 const struct match
*match
)
1366 const uint64_t *mask_u64
= (const uint64_t *) &match
->wc
.masks
;
1367 uint64_t *dst
= mask
->mf
.inline_values
;
1368 uint64_t map
, mask_map
= 0;
1372 /* Only check masks that make sense for the flow. */
1373 map
= flow_wc_map(&match
->flow
);
1376 uint64_t rm1bit
= rightmost_1bit(map
);
1377 int i
= raw_ctz(map
);
1381 *dst
++ = mask_u64
[i
];
1382 hash
= hash_add64(hash
, mask_u64
[i
]);
1387 mask
->mf
.values_inline
= true;
1388 mask
->mf
.map
= mask_map
;
1390 hash
= hash_add64(hash
, mask_map
);
1392 n
= dst
- mask
->mf
.inline_values
;
1394 mask
->hash
= hash_finish(hash
, n
* 8);
1395 mask
->len
= netdev_flow_key_size(n
);
1398 /* Initializes 'dst' as a copy of 'src' masked with 'mask'. */
1400 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
1401 const struct flow
*flow
,
1402 const struct netdev_flow_key
*mask
)
1404 uint64_t *dst_u64
= dst
->mf
.inline_values
;
1405 const uint64_t *mask_u64
= mask
->mf
.inline_values
;
1409 dst
->len
= mask
->len
;
1410 dst
->mf
.values_inline
= true;
1411 dst
->mf
.map
= mask
->mf
.map
;
1413 FLOW_FOR_EACH_IN_MAP(value
, flow
, mask
->mf
.map
) {
1414 *dst_u64
= value
& *mask_u64
++;
1415 hash
= hash_add64(hash
, *dst_u64
++);
1417 dst
->hash
= hash_finish(hash
, (dst_u64
- dst
->mf
.inline_values
) * 8);
1420 /* Iterate through all netdev_flow_key u64 values specified by 'MAP' */
1421 #define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \
1422 for (struct mf_for_each_in_map_aux aux__ \
1423 = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \
1424 mf_get_next_in_map(&aux__, &(VALUE)); \
1427 /* Returns a hash value for the bits of 'key' where there are 1-bits in
1429 static inline uint32_t
1430 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
1431 const struct netdev_flow_key
*mask
)
1433 const uint64_t *p
= mask
->mf
.inline_values
;
1437 NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u64
, key
, mask
->mf
.map
) {
1438 hash
= hash_add64(hash
, key_u64
& *p
++);
1441 return hash_finish(hash
, (p
- mask
->mf
.inline_values
) * 8);
1445 emc_entry_alive(struct emc_entry
*ce
)
1447 return ce
->flow
&& !ce
->flow
->dead
;
1451 emc_clear_entry(struct emc_entry
*ce
)
1454 dp_netdev_flow_unref(ce
->flow
);
1460 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
1461 const struct netdev_flow_key
*key
)
1463 if (ce
->flow
!= flow
) {
1465 dp_netdev_flow_unref(ce
->flow
);
1468 if (dp_netdev_flow_ref(flow
)) {
1475 netdev_flow_key_clone(&ce
->key
, key
);
1480 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
1481 struct dp_netdev_flow
*flow
)
1483 struct emc_entry
*to_be_replaced
= NULL
;
1484 struct emc_entry
*current_entry
;
1486 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1487 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
1488 /* We found the entry with the 'mf' miniflow */
1489 emc_change_entry(current_entry
, flow
, NULL
);
1493 /* Replacement policy: put the flow in an empty (not alive) entry, or
1494 * in the first entry where it can be */
1496 || (emc_entry_alive(to_be_replaced
)
1497 && !emc_entry_alive(current_entry
))
1498 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
1499 to_be_replaced
= current_entry
;
1502 /* We didn't find the miniflow in the cache.
1503 * The 'to_be_replaced' entry is where the new flow will be stored */
1505 emc_change_entry(to_be_replaced
, flow
, key
);
1508 static inline struct dp_netdev_flow
*
1509 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
1511 struct emc_entry
*current_entry
;
1513 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1514 if (current_entry
->key
.hash
== key
->hash
1515 && emc_entry_alive(current_entry
)
1516 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
1518 /* We found the entry with the 'key->mf' miniflow */
1519 return current_entry
->flow
;
1526 static struct dp_netdev_flow
*
1527 dp_netdev_pmd_lookup_flow(const struct dp_netdev_pmd_thread
*pmd
,
1528 const struct netdev_flow_key
*key
)
1530 struct dp_netdev_flow
*netdev_flow
;
1531 struct dpcls_rule
*rule
;
1533 dpcls_lookup(&pmd
->cls
, key
, &rule
, 1);
1534 netdev_flow
= dp_netdev_flow_cast(rule
);
1539 static struct dp_netdev_flow
*
1540 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread
*pmd
,
1541 const ovs_u128
*ufidp
, const struct nlattr
*key
,
1544 struct dp_netdev_flow
*netdev_flow
;
1548 /* If a UFID is not provided, determine one based on the key. */
1549 if (!ufidp
&& key
&& key_len
1550 && !dpif_netdev_flow_from_nlattrs(key
, key_len
, &flow
)) {
1551 dpif_flow_hash(pmd
->dp
->dpif
, &flow
, sizeof flow
, &ufid
);
1556 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, dp_netdev_flow_hash(ufidp
),
1558 if (ovs_u128_equal(&netdev_flow
->ufid
, ufidp
)) {
1568 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_
,
1569 struct dpif_flow_stats
*stats
)
1571 struct dp_netdev_flow
*netdev_flow
;
1572 unsigned long long n
;
1576 netdev_flow
= CONST_CAST(struct dp_netdev_flow
*, netdev_flow_
);
1578 atomic_read_relaxed(&netdev_flow
->stats
.packet_count
, &n
);
1579 stats
->n_packets
= n
;
1580 atomic_read_relaxed(&netdev_flow
->stats
.byte_count
, &n
);
1582 atomic_read_relaxed(&netdev_flow
->stats
.used
, &used
);
1584 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
1585 stats
->tcp_flags
= flags
;
1588 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
1589 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
1590 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
1593 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
1594 struct ofpbuf
*key_buf
, struct ofpbuf
*mask_buf
,
1595 struct dpif_flow
*flow
, bool terse
)
1598 memset(flow
, 0, sizeof *flow
);
1600 struct flow_wildcards wc
;
1601 struct dp_netdev_actions
*actions
;
1604 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
1607 offset
= key_buf
->size
;
1608 flow
->key
= ofpbuf_tail(key_buf
);
1609 odp_flow_key_from_flow(key_buf
, &netdev_flow
->flow
, &wc
.masks
,
1610 netdev_flow
->flow
.in_port
.odp_port
, true);
1611 flow
->key_len
= key_buf
->size
- offset
;
1614 offset
= mask_buf
->size
;
1615 flow
->mask
= ofpbuf_tail(mask_buf
);
1616 odp_flow_key_from_mask(mask_buf
, &wc
.masks
, &netdev_flow
->flow
,
1617 odp_to_u32(wc
.masks
.in_port
.odp_port
),
1619 flow
->mask_len
= mask_buf
->size
- offset
;
1622 actions
= dp_netdev_flow_get_actions(netdev_flow
);
1623 flow
->actions
= actions
->actions
;
1624 flow
->actions_len
= actions
->size
;
1627 flow
->ufid
= netdev_flow
->ufid
;
1628 flow
->ufid_present
= true;
1629 flow
->pmd_id
= netdev_flow
->pmd_id
;
1630 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
1634 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1635 const struct nlattr
*mask_key
,
1636 uint32_t mask_key_len
, const struct flow
*flow
,
1640 enum odp_key_fitness fitness
;
1642 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, mask
, flow
);
1644 /* This should not happen: it indicates that
1645 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
1646 * disagree on the acceptable form of a mask. Log the problem
1647 * as an error, with enough details to enable debugging. */
1648 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1650 if (!VLOG_DROP_ERR(&rl
)) {
1654 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
1656 VLOG_ERR("internal error parsing flow mask %s (%s)",
1657 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
1664 enum mf_field_id id
;
1665 /* No mask key, unwildcard everything except fields whose
1666 * prerequisities are not met. */
1667 memset(mask
, 0x0, sizeof *mask
);
1669 for (id
= 0; id
< MFF_N_IDS
; ++id
) {
1670 /* Skip registers and metadata. */
1671 if (!(id
>= MFF_REG0
&& id
< MFF_REG0
+ FLOW_N_REGS
)
1672 && id
!= MFF_METADATA
) {
1673 const struct mf_field
*mf
= mf_from_id(id
);
1674 if (mf_are_prereqs_ok(mf
, flow
)) {
1675 mf_mask_field(mf
, mask
);
1681 /* Force unwildcard the in_port.
1683 * We need to do this even in the case where we unwildcard "everything"
1684 * above because "everything" only includes the 16-bit OpenFlow port number
1685 * mask->in_port.ofp_port, which only covers half of the 32-bit datapath
1686 * port number mask->in_port.odp_port. */
1687 mask
->in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
1693 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1698 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
1699 /* This should not happen: it indicates that odp_flow_key_from_flow()
1700 * and odp_flow_key_to_flow() disagree on the acceptable form of a
1701 * flow. Log the problem as an error, with enough details to enable
1703 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1705 if (!VLOG_DROP_ERR(&rl
)) {
1709 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
1710 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
1717 in_port
= flow
->in_port
.odp_port
;
1718 if (!is_valid_port_number(in_port
) && in_port
!= ODPP_NONE
) {
1726 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
1728 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1729 struct dp_netdev_flow
*netdev_flow
;
1730 struct dp_netdev_pmd_thread
*pmd
;
1731 int pmd_id
= get
->pmd_id
== PMD_ID_NULL
? NON_PMD_CORE_ID
: get
->pmd_id
;
1734 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
1739 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, get
->ufid
, get
->key
,
1742 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->buffer
,
1747 dp_netdev_pmd_unref(pmd
);
1753 static struct dp_netdev_flow
*
1754 dp_netdev_flow_add(struct dp_netdev_pmd_thread
*pmd
,
1755 struct match
*match
, const ovs_u128
*ufid
,
1756 const struct nlattr
*actions
, size_t actions_len
)
1757 OVS_REQUIRES(pmd
->flow_mutex
)
1759 struct dp_netdev_flow
*flow
;
1760 struct netdev_flow_key mask
;
1762 netdev_flow_mask_init(&mask
, match
);
1763 /* Make sure wc does not have metadata. */
1764 ovs_assert(!(mask
.mf
.map
& (MINIFLOW_MAP(metadata
) | MINIFLOW_MAP(regs
))));
1766 /* Do not allocate extra space. */
1767 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
1768 memset(&flow
->stats
, 0, sizeof flow
->stats
);
1770 *CONST_CAST(int *, &flow
->pmd_id
) = pmd
->core_id
;
1771 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
1772 *CONST_CAST(ovs_u128
*, &flow
->ufid
) = *ufid
;
1773 ovs_refcount_init(&flow
->ref_cnt
);
1774 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
1776 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
1777 dpcls_insert(&pmd
->cls
, &flow
->cr
, &mask
);
1779 cmap_insert(&pmd
->flow_table
, CONST_CAST(struct cmap_node
*, &flow
->node
),
1780 dp_netdev_flow_hash(&flow
->ufid
));
1782 if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
1784 struct ds ds
= DS_EMPTY_INITIALIZER
;
1786 match
.flow
= flow
->flow
;
1787 miniflow_expand(&flow
->cr
.mask
->mf
, &match
.wc
.masks
);
1789 ds_put_cstr(&ds
, "flow_add: ");
1790 odp_format_ufid(ufid
, &ds
);
1791 ds_put_cstr(&ds
, " ");
1792 match_format(&match
, &ds
, OFP_DEFAULT_PRIORITY
);
1793 ds_put_cstr(&ds
, ", actions:");
1794 format_odp_actions(&ds
, actions
, actions_len
);
1796 VLOG_DBG_RL(&upcall_rl
, "%s", ds_cstr(&ds
));
1805 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
1807 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1808 struct dp_netdev_flow
*netdev_flow
;
1809 struct netdev_flow_key key
;
1810 struct dp_netdev_pmd_thread
*pmd
;
1813 int pmd_id
= put
->pmd_id
== PMD_ID_NULL
? NON_PMD_CORE_ID
: put
->pmd_id
;
1816 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
);
1820 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
1821 put
->mask
, put
->mask_len
,
1822 &match
.flow
, &match
.wc
.masks
);
1827 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
1832 /* Must produce a netdev_flow_key for lookup.
1833 * This interface is no longer performance critical, since it is not used
1834 * for upcall processing any more. */
1835 netdev_flow_key_from_flow(&key
, &match
.flow
);
1840 dpif_flow_hash(dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
1843 ovs_mutex_lock(&pmd
->flow_mutex
);
1844 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &key
);
1846 if (put
->flags
& DPIF_FP_CREATE
) {
1847 if (cmap_count(&pmd
->flow_table
) < MAX_FLOWS
) {
1849 memset(put
->stats
, 0, sizeof *put
->stats
);
1851 dp_netdev_flow_add(pmd
, &match
, &ufid
, put
->actions
,
1861 if (put
->flags
& DPIF_FP_MODIFY
1862 && flow_equal(&match
.flow
, &netdev_flow
->flow
)) {
1863 struct dp_netdev_actions
*new_actions
;
1864 struct dp_netdev_actions
*old_actions
;
1866 new_actions
= dp_netdev_actions_create(put
->actions
,
1869 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
1870 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
1873 get_dpif_flow_stats(netdev_flow
, put
->stats
);
1875 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
1876 /* XXX: The userspace datapath uses thread local statistics
1877 * (for flows), which should be updated only by the owning
1878 * thread. Since we cannot write on stats memory here,
1879 * we choose not to support this flag. Please note:
1880 * - This feature is currently used only by dpctl commands with
1882 * - Should the need arise, this operation can be implemented
1883 * by keeping a base value (to be update here) for each
1884 * counter, and subtracting it before outputting the stats */
1888 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
1889 } else if (put
->flags
& DPIF_FP_CREATE
) {
1892 /* Overlapping flow. */
1896 ovs_mutex_unlock(&pmd
->flow_mutex
);
1897 dp_netdev_pmd_unref(pmd
);
1903 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
1905 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1906 struct dp_netdev_flow
*netdev_flow
;
1907 struct dp_netdev_pmd_thread
*pmd
;
1908 int pmd_id
= del
->pmd_id
== PMD_ID_NULL
? NON_PMD_CORE_ID
: del
->pmd_id
;
1911 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
1916 ovs_mutex_lock(&pmd
->flow_mutex
);
1917 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, del
->ufid
, del
->key
,
1921 get_dpif_flow_stats(netdev_flow
, del
->stats
);
1923 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
1927 ovs_mutex_unlock(&pmd
->flow_mutex
);
1928 dp_netdev_pmd_unref(pmd
);
1933 struct dpif_netdev_flow_dump
{
1934 struct dpif_flow_dump up
;
1935 struct cmap_position poll_thread_pos
;
1936 struct cmap_position flow_pos
;
1937 struct dp_netdev_pmd_thread
*cur_pmd
;
1939 struct ovs_mutex mutex
;
1942 static struct dpif_netdev_flow_dump
*
1943 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
1945 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
1948 static struct dpif_flow_dump
*
1949 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
, bool terse
)
1951 struct dpif_netdev_flow_dump
*dump
;
1953 dump
= xzalloc(sizeof *dump
);
1954 dpif_flow_dump_init(&dump
->up
, dpif_
);
1955 dump
->up
.terse
= terse
;
1956 ovs_mutex_init(&dump
->mutex
);
1962 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
1964 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
1966 ovs_mutex_destroy(&dump
->mutex
);
1971 struct dpif_netdev_flow_dump_thread
{
1972 struct dpif_flow_dump_thread up
;
1973 struct dpif_netdev_flow_dump
*dump
;
1974 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
1975 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
1978 static struct dpif_netdev_flow_dump_thread
*
1979 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
1981 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
1984 static struct dpif_flow_dump_thread
*
1985 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
1987 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
1988 struct dpif_netdev_flow_dump_thread
*thread
;
1990 thread
= xmalloc(sizeof *thread
);
1991 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
1992 thread
->dump
= dump
;
1997 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
1999 struct dpif_netdev_flow_dump_thread
*thread
2000 = dpif_netdev_flow_dump_thread_cast(thread_
);
2006 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
2007 struct dpif_flow
*flows
, int max_flows
)
2009 struct dpif_netdev_flow_dump_thread
*thread
2010 = dpif_netdev_flow_dump_thread_cast(thread_
);
2011 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
2012 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
2016 ovs_mutex_lock(&dump
->mutex
);
2017 if (!dump
->status
) {
2018 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
2019 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
2020 struct dp_netdev_pmd_thread
*pmd
= dump
->cur_pmd
;
2021 int flow_limit
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
2023 /* First call to dump_next(), extracts the first pmd thread.
2024 * If there is no pmd thread, returns immediately. */
2026 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2028 ovs_mutex_unlock(&dump
->mutex
);
2035 for (n_flows
= 0; n_flows
< flow_limit
; n_flows
++) {
2036 struct cmap_node
*node
;
2038 node
= cmap_next_position(&pmd
->flow_table
, &dump
->flow_pos
);
2042 netdev_flows
[n_flows
] = CONTAINER_OF(node
,
2043 struct dp_netdev_flow
,
2046 /* When finishing dumping the current pmd thread, moves to
2048 if (n_flows
< flow_limit
) {
2049 memset(&dump
->flow_pos
, 0, sizeof dump
->flow_pos
);
2050 dp_netdev_pmd_unref(pmd
);
2051 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2057 /* Keeps the reference to next caller. */
2058 dump
->cur_pmd
= pmd
;
2060 /* If the current dump is empty, do not exit the loop, since the
2061 * remaining pmds could have flows to be dumped. Just dumps again
2062 * on the new 'pmd'. */
2065 ovs_mutex_unlock(&dump
->mutex
);
2067 for (i
= 0; i
< n_flows
; i
++) {
2068 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
2069 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
2070 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
2071 struct dpif_flow
*f
= &flows
[i
];
2072 struct ofpbuf key
, mask
;
2074 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
2075 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
2076 dp_netdev_flow_to_dpif_flow(netdev_flow
, &key
, &mask
, f
,
2084 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
2085 OVS_NO_THREAD_SAFETY_ANALYSIS
2087 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2088 struct dp_netdev_pmd_thread
*pmd
;
2089 struct dp_packet
*pp
;
2091 if (dp_packet_size(execute
->packet
) < ETH_HEADER_LEN
||
2092 dp_packet_size(execute
->packet
) > UINT16_MAX
) {
2096 /* Tries finding the 'pmd'. If NULL is returned, that means
2097 * the current thread is a non-pmd thread and should use
2098 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
2099 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
2101 pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
2104 /* If the current thread is non-pmd thread, acquires
2105 * the 'non_pmd_mutex'. */
2106 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2107 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2108 ovs_mutex_lock(&dp
->port_mutex
);
2111 pp
= execute
->packet
;
2112 dp_netdev_execute_actions(pmd
, &pp
, 1, false, execute
->actions
,
2113 execute
->actions_len
);
2114 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2115 dp_netdev_pmd_unref(pmd
);
2116 ovs_mutex_unlock(&dp
->port_mutex
);
2117 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2124 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
2128 for (i
= 0; i
< n_ops
; i
++) {
2129 struct dpif_op
*op
= ops
[i
];
2132 case DPIF_OP_FLOW_PUT
:
2133 op
->error
= dpif_netdev_flow_put(dpif
, &op
->u
.flow_put
);
2136 case DPIF_OP_FLOW_DEL
:
2137 op
->error
= dpif_netdev_flow_del(dpif
, &op
->u
.flow_del
);
2140 case DPIF_OP_EXECUTE
:
2141 op
->error
= dpif_netdev_execute(dpif
, &op
->u
.execute
);
2144 case DPIF_OP_FLOW_GET
:
2145 op
->error
= dpif_netdev_flow_get(dpif
, &op
->u
.flow_get
);
2151 /* Returns true if the configuration for rx queues or cpu mask
2154 pmd_config_changed(const struct dp_netdev
*dp
, size_t rxqs
, const char *cmask
)
2156 if (dp
->n_dpdk_rxqs
!= rxqs
) {
2159 if (dp
->pmd_cmask
!= NULL
&& cmask
!= NULL
) {
2160 return strcmp(dp
->pmd_cmask
, cmask
);
2162 return (dp
->pmd_cmask
!= NULL
|| cmask
!= NULL
);
2167 /* Resets pmd threads if the configuration for 'rxq's or cpu mask changes. */
2169 dpif_netdev_pmd_set(struct dpif
*dpif
, unsigned int n_rxqs
, const char *cmask
)
2171 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2173 if (pmd_config_changed(dp
, n_rxqs
, cmask
)) {
2174 struct dp_netdev_port
*port
;
2176 dp_netdev_destroy_all_pmds(dp
);
2178 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2179 if (netdev_is_pmd(port
->netdev
)) {
2182 /* Closes the existing 'rxq's. */
2183 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2184 netdev_rxq_close(port
->rxq
[i
]);
2185 port
->rxq
[i
] = NULL
;
2188 /* Sets the new rx queue config. */
2189 err
= netdev_set_multiq(port
->netdev
, ovs_numa_get_n_cores(),
2191 if (err
&& (err
!= EOPNOTSUPP
)) {
2192 VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
2193 " %u", netdev_get_name(port
->netdev
),
2198 /* If the set_multiq() above succeeds, reopens the 'rxq's. */
2199 port
->rxq
= xrealloc(port
->rxq
, sizeof *port
->rxq
2200 * netdev_n_rxq(port
->netdev
));
2201 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2202 netdev_rxq_open(port
->netdev
, &port
->rxq
[i
], i
);
2206 dp
->n_dpdk_rxqs
= n_rxqs
;
2208 /* Reconfigures the cpu mask. */
2209 ovs_numa_set_cpu_mask(cmask
);
2210 free(dp
->pmd_cmask
);
2211 dp
->pmd_cmask
= cmask
? xstrdup(cmask
) : NULL
;
2213 /* Restores the non-pmd. */
2214 dp_netdev_set_nonpmd(dp
);
2215 /* Restores all pmd threads. */
2216 dp_netdev_reset_pmd_threads(dp
);
2223 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2224 uint32_t queue_id
, uint32_t *priority
)
2226 *priority
= queue_id
;
2231 /* Creates and returns a new 'struct dp_netdev_actions', with a reference count
2232 * of 1, whose actions are a copy of from the 'ofpacts_len' bytes of
2234 struct dp_netdev_actions
*
2235 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
2237 struct dp_netdev_actions
*netdev_actions
;
2239 netdev_actions
= xmalloc(sizeof *netdev_actions
);
2240 netdev_actions
->actions
= xmemdup(actions
, size
);
2241 netdev_actions
->size
= size
;
2243 return netdev_actions
;
2246 struct dp_netdev_actions
*
2247 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
2249 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
2253 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
2255 free(actions
->actions
);
2261 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
2262 struct dp_netdev_port
*port
,
2263 struct netdev_rxq
*rxq
)
2265 struct dp_packet
*packets
[NETDEV_MAX_RX_BATCH
];
2268 error
= netdev_rxq_recv(rxq
, packets
, &cnt
);
2272 *recirc_depth_get() = 0;
2274 /* XXX: initialize md in netdev implementation. */
2275 for (i
= 0; i
< cnt
; i
++) {
2276 packets
[i
]->md
= PKT_METADATA_INITIALIZER(port
->port_no
);
2278 dp_netdev_input(pmd
, packets
, cnt
);
2279 } else if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
2280 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2282 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
2283 netdev_get_name(port
->netdev
), ovs_strerror(error
));
2287 /* Return true if needs to revalidate datapath flows. */
2289 dpif_netdev_run(struct dpif
*dpif
)
2291 struct dp_netdev_port
*port
;
2292 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2293 struct dp_netdev_pmd_thread
*non_pmd
= dp_netdev_get_pmd(dp
,
2295 uint64_t new_tnl_seq
;
2297 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2298 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2299 if (!netdev_is_pmd(port
->netdev
)) {
2302 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2303 dp_netdev_process_rxq_port(non_pmd
, port
, port
->rxq
[i
]);
2307 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2308 dp_netdev_pmd_unref(non_pmd
);
2310 tnl_arp_cache_run();
2311 new_tnl_seq
= seq_read(tnl_conf_seq
);
2313 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
2314 dp
->last_tnl_conf_seq
= new_tnl_seq
;
2321 dpif_netdev_wait(struct dpif
*dpif
)
2323 struct dp_netdev_port
*port
;
2324 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2326 ovs_mutex_lock(&dp_netdev_mutex
);
2327 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2328 if (!netdev_is_pmd(port
->netdev
)) {
2331 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2332 netdev_rxq_wait(port
->rxq
[i
]);
2336 ovs_mutex_unlock(&dp_netdev_mutex
);
2337 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
2341 struct dp_netdev_port
*port
;
2342 struct netdev_rxq
*rx
;
2346 pmd_load_queues(struct dp_netdev_pmd_thread
*pmd
,
2347 struct rxq_poll
**ppoll_list
, int poll_cnt
)
2349 struct rxq_poll
*poll_list
= *ppoll_list
;
2350 struct dp_netdev_port
*port
;
2351 int n_pmds_on_numa
, index
, i
;
2353 /* Simple scheduler for netdev rx polling. */
2354 for (i
= 0; i
< poll_cnt
; i
++) {
2355 port_unref(poll_list
[i
].port
);
2359 n_pmds_on_numa
= get_n_pmd_threads_on_numa(pmd
->dp
, pmd
->numa_id
);
2362 CMAP_FOR_EACH (port
, node
, &pmd
->dp
->ports
) {
2363 /* Calls port_try_ref() to prevent the main thread
2364 * from deleting the port. */
2365 if (port_try_ref(port
)) {
2366 if (netdev_is_pmd(port
->netdev
)
2367 && netdev_get_numa_id(port
->netdev
) == pmd
->numa_id
) {
2370 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2371 if ((index
% n_pmds_on_numa
) == pmd
->index
) {
2372 poll_list
= xrealloc(poll_list
,
2373 sizeof *poll_list
* (poll_cnt
+ 1));
2376 poll_list
[poll_cnt
].port
= port
;
2377 poll_list
[poll_cnt
].rx
= port
->rxq
[i
];
2383 /* Unrefs the port_try_ref(). */
2388 *ppoll_list
= poll_list
;
2393 pmd_thread_main(void *f_
)
2395 struct dp_netdev_pmd_thread
*pmd
= f_
;
2396 unsigned int lc
= 0;
2397 struct rxq_poll
*poll_list
;
2398 unsigned int port_seq
= PMD_INITIAL_SEQ
;
2405 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
2406 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
2407 pmd_thread_setaffinity_cpu(pmd
->core_id
);
2409 emc_cache_init(&pmd
->flow_cache
);
2410 poll_cnt
= pmd_load_queues(pmd
, &poll_list
, poll_cnt
);
2412 /* Signal here to make sure the pmd finishes
2413 * reloading the updated configuration. */
2414 dp_netdev_pmd_reload_done(pmd
);
2419 for (i
= 0; i
< poll_cnt
; i
++) {
2420 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].port
, poll_list
[i
].rx
);
2428 emc_cache_slow_sweep(&pmd
->flow_cache
);
2431 atomic_read_relaxed(&pmd
->change_seq
, &seq
);
2432 if (seq
!= port_seq
) {
2439 emc_cache_uninit(&pmd
->flow_cache
);
2441 if (!latch_is_set(&pmd
->exit_latch
)){
2445 for (i
= 0; i
< poll_cnt
; i
++) {
2446 port_unref(poll_list
[i
].port
);
2449 dp_netdev_pmd_reload_done(pmd
);
2456 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
2457 OVS_ACQUIRES(dp
->upcall_rwlock
)
2459 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
2463 dpif_netdev_disable_upcall(struct dpif
*dpif
)
2464 OVS_NO_THREAD_SAFETY_ANALYSIS
2466 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2467 dp_netdev_disable_upcall(dp
);
2471 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
2472 OVS_RELEASES(dp
->upcall_rwlock
)
2474 fat_rwlock_unlock(&dp
->upcall_rwlock
);
2478 dpif_netdev_enable_upcall(struct dpif
*dpif
)
2479 OVS_NO_THREAD_SAFETY_ANALYSIS
2481 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2482 dp_netdev_enable_upcall(dp
);
2486 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
2488 ovs_mutex_lock(&pmd
->cond_mutex
);
2489 xpthread_cond_signal(&pmd
->cond
);
2490 ovs_mutex_unlock(&pmd
->cond_mutex
);
2493 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
2494 * the pointer if succeeds, otherwise, NULL.
2496 * Caller must unrefs the returned reference. */
2497 static struct dp_netdev_pmd_thread
*
2498 dp_netdev_get_pmd(struct dp_netdev
*dp
, int core_id
)
2500 struct dp_netdev_pmd_thread
*pmd
;
2501 const struct cmap_node
*pnode
;
2503 pnode
= cmap_find(&dp
->poll_threads
, hash_int(core_id
, 0));
2507 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
2509 return dp_netdev_pmd_try_ref(pmd
) ? pmd
: NULL
;
2512 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
2514 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
2516 struct dp_netdev_pmd_thread
*non_pmd
;
2518 non_pmd
= xzalloc(sizeof *non_pmd
);
2519 dp_netdev_configure_pmd(non_pmd
, dp
, 0, NON_PMD_CORE_ID
,
2523 /* Caller must have valid pointer to 'pmd'. */
2525 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
)
2527 return ovs_refcount_try_ref_rcu(&pmd
->ref_cnt
);
2531 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
)
2533 if (pmd
&& ovs_refcount_unref(&pmd
->ref_cnt
) == 1) {
2534 ovsrcu_postpone(dp_netdev_destroy_pmd
, pmd
);
2538 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
2539 * fails, keeps checking for next node until reaching the end of cmap.
2541 * Caller must unrefs the returned reference. */
2542 static struct dp_netdev_pmd_thread
*
2543 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
)
2545 struct dp_netdev_pmd_thread
*next
;
2548 struct cmap_node
*node
;
2550 node
= cmap_next_position(&dp
->poll_threads
, pos
);
2551 next
= node
? CONTAINER_OF(node
, struct dp_netdev_pmd_thread
, node
)
2553 } while (next
&& !dp_netdev_pmd_try_ref(next
));
2558 /* Configures the 'pmd' based on the input argument. */
2560 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
2561 int index
, int core_id
, int numa_id
)
2565 pmd
->core_id
= core_id
;
2566 pmd
->numa_id
= numa_id
;
2568 ovs_refcount_init(&pmd
->ref_cnt
);
2569 latch_init(&pmd
->exit_latch
);
2570 atomic_init(&pmd
->change_seq
, PMD_INITIAL_SEQ
);
2571 xpthread_cond_init(&pmd
->cond
, NULL
);
2572 ovs_mutex_init(&pmd
->cond_mutex
);
2573 ovs_mutex_init(&pmd
->flow_mutex
);
2574 dpcls_init(&pmd
->cls
);
2575 cmap_init(&pmd
->flow_table
);
2576 /* init the 'flow_cache' since there is no
2577 * actual thread created for NON_PMD_CORE_ID. */
2578 if (core_id
== NON_PMD_CORE_ID
) {
2579 emc_cache_init(&pmd
->flow_cache
);
2581 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
2582 hash_int(core_id
, 0));
2586 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
)
2588 dp_netdev_pmd_flow_flush(pmd
);
2589 dpcls_destroy(&pmd
->cls
);
2590 cmap_destroy(&pmd
->flow_table
);
2591 ovs_mutex_destroy(&pmd
->flow_mutex
);
2592 latch_destroy(&pmd
->exit_latch
);
2593 xpthread_cond_destroy(&pmd
->cond
);
2594 ovs_mutex_destroy(&pmd
->cond_mutex
);
2598 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
2599 * and unrefs the struct. */
2601 dp_netdev_del_pmd(struct dp_netdev_pmd_thread
*pmd
)
2603 /* Uninit the 'flow_cache' since there is
2604 * no actual thread uninit it for NON_PMD_CORE_ID. */
2605 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2606 emc_cache_uninit(&pmd
->flow_cache
);
2608 latch_set(&pmd
->exit_latch
);
2609 dp_netdev_reload_pmd__(pmd
);
2610 ovs_numa_unpin_core(pmd
->core_id
);
2611 xpthread_join(pmd
->thread
, NULL
);
2613 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
2614 dp_netdev_pmd_unref(pmd
);
2617 /* Destroys all pmd threads. */
2619 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
)
2621 struct dp_netdev_pmd_thread
*pmd
;
2623 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2624 dp_netdev_del_pmd(pmd
);
2628 /* Deletes all pmd threads on numa node 'numa_id'. */
2630 dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2632 struct dp_netdev_pmd_thread
*pmd
;
2634 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2635 if (pmd
->numa_id
== numa_id
) {
2636 dp_netdev_del_pmd(pmd
);
2641 /* Checks the numa node id of 'netdev' and starts pmd threads for
2644 dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2648 if (!ovs_numa_numa_id_is_valid(numa_id
)) {
2649 VLOG_ERR("Cannot create pmd threads due to numa id (%d)"
2650 "invalid", numa_id
);
2654 n_pmds
= get_n_pmd_threads_on_numa(dp
, numa_id
);
2656 /* If there are already pmd threads created for the numa node
2657 * in which 'netdev' is on, do nothing. Else, creates the
2658 * pmd threads for the numa node. */
2660 int can_have
, n_unpinned
, i
;
2662 n_unpinned
= ovs_numa_get_n_unpinned_cores_on_numa(numa_id
);
2664 VLOG_ERR("Cannot create pmd threads due to out of unpinned "
2665 "cores on numa node");
2669 /* If cpu mask is specified, uses all unpinned cores, otherwise
2670 * tries creating NR_PMD_THREADS pmd threads. */
2671 can_have
= dp
->pmd_cmask
? n_unpinned
: MIN(n_unpinned
, NR_PMD_THREADS
);
2672 for (i
= 0; i
< can_have
; i
++) {
2673 struct dp_netdev_pmd_thread
*pmd
= xzalloc(sizeof *pmd
);
2674 int core_id
= ovs_numa_get_unpinned_core_on_numa(numa_id
);
2676 dp_netdev_configure_pmd(pmd
, dp
, i
, core_id
, numa_id
);
2677 /* Each thread will distribute all devices rx-queues among
2679 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
2681 VLOG_INFO("Created %d pmd threads on numa node %d", can_have
, numa_id
);
2686 /* Called after pmd threads config change. Restarts pmd threads with
2687 * new configuration. */
2689 dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
2691 struct dp_netdev_port
*port
;
2693 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2694 if (netdev_is_pmd(port
->netdev
)) {
2695 int numa_id
= netdev_get_numa_id(port
->netdev
);
2697 dp_netdev_set_pmds_on_numa(dp
, numa_id
);
2703 dpif_netdev_get_datapath_version(void)
2705 return xstrdup("<built-in>");
2709 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
, int cnt
, int size
,
2712 long long now
= time_msec();
2715 atomic_store_relaxed(&netdev_flow
->stats
.used
, now
);
2716 non_atomic_ullong_add(&netdev_flow
->stats
.packet_count
, cnt
);
2717 non_atomic_ullong_add(&netdev_flow
->stats
.byte_count
, size
);
2718 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
2720 atomic_store_relaxed(&netdev_flow
->stats
.tcp_flags
, flags
);
2724 dp_netdev_count_packet(struct dp_netdev_pmd_thread
*pmd
,
2725 enum dp_stat_type type
, int cnt
)
2727 non_atomic_ullong_add(&pmd
->stats
.n
[type
], cnt
);
2731 dp_netdev_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet_
,
2732 struct flow
*flow
, struct flow_wildcards
*wc
, ovs_u128
*ufid
,
2733 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
2734 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
2736 struct dp_netdev
*dp
= pmd
->dp
;
2738 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
2742 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
2743 struct ds ds
= DS_EMPTY_INITIALIZER
;
2747 ofpbuf_init(&key
, 0);
2748 odp_flow_key_from_flow(&key
, flow
, &wc
->masks
, flow
->in_port
.odp_port
,
2750 packet_str
= ofp_packet_to_string(dp_packet_data(packet_
),
2751 dp_packet_size(packet_
));
2753 odp_flow_key_format(key
.data
, key
.size
, &ds
);
2755 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
2756 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
2758 ofpbuf_uninit(&key
);
2764 return dp
->upcall_cb(packet_
, flow
, ufid
, pmd
->core_id
, type
, userdata
,
2765 actions
, wc
, put_actions
, dp
->upcall_aux
);
2768 static inline uint32_t
2769 dpif_netdev_packet_get_dp_hash(struct dp_packet
*packet
,
2770 const struct miniflow
*mf
)
2774 hash
= dp_packet_get_dp_hash(packet
);
2775 if (OVS_UNLIKELY(!hash
)) {
2776 hash
= miniflow_hash_5tuple(mf
, 0);
2777 dp_packet_set_dp_hash(packet
, hash
);
2782 struct packet_batch
{
2783 unsigned int packet_count
;
2784 unsigned int byte_count
;
2787 struct dp_netdev_flow
*flow
;
2789 struct dp_packet
*packets
[NETDEV_MAX_RX_BATCH
];
2793 packet_batch_update(struct packet_batch
*batch
, struct dp_packet
*packet
,
2794 const struct miniflow
*mf
)
2796 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
2797 batch
->packets
[batch
->packet_count
++] = packet
;
2798 batch
->byte_count
+= dp_packet_size(packet
);
2802 packet_batch_init(struct packet_batch
*batch
, struct dp_netdev_flow
*flow
)
2806 batch
->packet_count
= 0;
2807 batch
->byte_count
= 0;
2808 batch
->tcp_flags
= 0;
2812 packet_batch_execute(struct packet_batch
*batch
,
2813 struct dp_netdev_pmd_thread
*pmd
)
2815 struct dp_netdev_actions
*actions
;
2816 struct dp_netdev_flow
*flow
= batch
->flow
;
2818 dp_netdev_flow_used(batch
->flow
, batch
->packet_count
, batch
->byte_count
,
2821 actions
= dp_netdev_flow_get_actions(flow
);
2823 dp_netdev_execute_actions(pmd
, batch
->packets
, batch
->packet_count
, true,
2824 actions
->actions
, actions
->size
);
2826 dp_netdev_count_packet(pmd
, DP_STAT_HIT
, batch
->packet_count
);
2830 dp_netdev_queue_batches(struct dp_packet
*pkt
,
2831 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
2832 struct packet_batch
*batches
, size_t *n_batches
,
2835 struct packet_batch
*batch
= NULL
;
2838 if (OVS_UNLIKELY(!flow
)) {
2841 /* XXX: This O(n^2) algortihm makes sense if we're operating under the
2842 * assumption that the number of distinct flows (and therefore the
2843 * number of distinct batches) is quite small. If this turns out not
2844 * to be the case, it may make sense to pre sort based on the
2845 * netdev_flow pointer. That done we can get the appropriate batching
2846 * in O(n * log(n)) instead. */
2847 for (j
= *n_batches
- 1; j
>= 0; j
--) {
2848 if (batches
[j
].flow
== flow
) {
2849 batch
= &batches
[j
];
2850 packet_batch_update(batch
, pkt
, mf
);
2854 if (OVS_UNLIKELY(*n_batches
>= max_batches
)) {
2858 batch
= &batches
[(*n_batches
)++];
2859 packet_batch_init(batch
, flow
);
2860 packet_batch_update(batch
, pkt
, mf
);
2865 dp_packet_swap(struct dp_packet
**a
, struct dp_packet
**b
)
2867 struct dp_packet
*tmp
= *a
;
2872 /* Try to process all ('cnt') the 'packets' using only the exact match cache
2873 * 'flow_cache'. If a flow is not found for a packet 'packets[i]', or if there
2874 * is no matching batch for a packet's flow, the miniflow is copied into 'keys'
2875 * and the packet pointer is moved at the beginning of the 'packets' array.
2877 * The function returns the number of packets that needs to be processed in the
2878 * 'packets' array (they have been moved to the beginning of the vector).
2880 static inline size_t
2881 emc_processing(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
**packets
,
2882 size_t cnt
, struct netdev_flow_key
*keys
)
2884 struct netdev_flow_key key
;
2885 struct packet_batch batches
[4];
2886 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
2887 size_t n_batches
, i
;
2888 size_t notfound_cnt
= 0;
2891 miniflow_initialize(&key
.mf
, key
.buf
);
2892 for (i
= 0; i
< cnt
; i
++) {
2893 struct dp_netdev_flow
*flow
;
2895 if (OVS_UNLIKELY(dp_packet_size(packets
[i
]) < ETH_HEADER_LEN
)) {
2896 dp_packet_delete(packets
[i
]);
2900 miniflow_extract(packets
[i
], &key
.mf
);
2901 key
.len
= 0; /* Not computed yet. */
2902 key
.hash
= dpif_netdev_packet_get_dp_hash(packets
[i
], &key
.mf
);
2904 flow
= emc_lookup(flow_cache
, &key
);
2905 if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets
[i
], flow
, &key
.mf
,
2906 batches
, &n_batches
,
2907 ARRAY_SIZE(batches
)))) {
2908 if (i
!= notfound_cnt
) {
2909 dp_packet_swap(&packets
[i
], &packets
[notfound_cnt
]);
2912 keys
[notfound_cnt
++] = key
;
2916 for (i
= 0; i
< n_batches
; i
++) {
2917 packet_batch_execute(&batches
[i
], pmd
);
2920 return notfound_cnt
;
2924 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
2925 struct dp_packet
**packets
, size_t cnt
,
2926 struct netdev_flow_key
*keys
)
2928 #if !defined(__CHECKER__) && !defined(_WIN32)
2929 const size_t PKT_ARRAY_SIZE
= cnt
;
2931 /* Sparse or MSVC doesn't like variable length array. */
2932 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_RX_BATCH
};
2934 struct packet_batch batches
[PKT_ARRAY_SIZE
];
2935 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
2936 struct dp_netdev
*dp
= pmd
->dp
;
2937 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
2938 size_t n_batches
, i
;
2941 for (i
= 0; i
< cnt
; i
++) {
2942 /* Key length is needed in all the cases, hash computed on demand. */
2943 keys
[i
].len
= netdev_flow_key_size(count_1bits(keys
[i
].mf
.map
));
2945 any_miss
= !dpcls_lookup(&pmd
->cls
, keys
, rules
, cnt
);
2946 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
2947 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
2948 struct ofpbuf actions
, put_actions
;
2949 int miss_cnt
= 0, lost_cnt
= 0;
2952 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
2953 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
2955 for (i
= 0; i
< cnt
; i
++) {
2956 struct dp_netdev_flow
*netdev_flow
;
2957 struct ofpbuf
*add_actions
;
2961 if (OVS_LIKELY(rules
[i
])) {
2965 /* It's possible that an earlier slow path execution installed
2966 * a rule covering this flow. In this case, it's a lot cheaper
2967 * to catch it here than execute a miss. */
2968 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
]);
2970 rules
[i
] = &netdev_flow
->cr
;
2976 miniflow_expand(&keys
[i
].mf
, &match
.flow
);
2978 ofpbuf_clear(&actions
);
2979 ofpbuf_clear(&put_actions
);
2981 dpif_flow_hash(dp
->dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
2982 error
= dp_netdev_upcall(pmd
, packets
[i
], &match
.flow
, &match
.wc
,
2983 &ufid
, DPIF_UC_MISS
, NULL
, &actions
,
2985 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
2986 dp_packet_delete(packets
[i
]);
2991 /* We can't allow the packet batching in the next loop to execute
2992 * the actions. Otherwise, if there are any slow path actions,
2993 * we'll send the packet up twice. */
2994 dp_netdev_execute_actions(pmd
, &packets
[i
], 1, true,
2995 actions
.data
, actions
.size
);
2997 add_actions
= put_actions
.size
? &put_actions
: &actions
;
2998 if (OVS_LIKELY(error
!= ENOSPC
)) {
2999 /* XXX: There's a race window where a flow covering this packet
3000 * could have already been installed since we last did the flow
3001 * lookup before upcall. This could be solved by moving the
3002 * mutex lock outside the loop, but that's an awful long time
3003 * to be locking everyone out of making flow installs. If we
3004 * move to a per-core classifier, it would be reasonable. */
3005 ovs_mutex_lock(&pmd
->flow_mutex
);
3006 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
]);
3007 if (OVS_LIKELY(!netdev_flow
)) {
3008 netdev_flow
= dp_netdev_flow_add(pmd
, &match
, &ufid
,
3012 ovs_mutex_unlock(&pmd
->flow_mutex
);
3014 emc_insert(flow_cache
, &keys
[i
], netdev_flow
);
3018 ofpbuf_uninit(&actions
);
3019 ofpbuf_uninit(&put_actions
);
3020 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3021 dp_netdev_count_packet(pmd
, DP_STAT_MISS
, miss_cnt
);
3022 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
3023 } else if (OVS_UNLIKELY(any_miss
)) {
3024 int dropped_cnt
= 0;
3026 for (i
= 0; i
< cnt
; i
++) {
3027 if (OVS_UNLIKELY(!rules
[i
])) {
3028 dp_packet_delete(packets
[i
]);
3033 dp_netdev_count_packet(pmd
, DP_STAT_MISS
, dropped_cnt
);
3034 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, dropped_cnt
);
3038 for (i
= 0; i
< cnt
; i
++) {
3039 struct dp_packet
*packet
= packets
[i
];
3040 struct dp_netdev_flow
*flow
;
3042 if (OVS_UNLIKELY(!rules
[i
])) {
3046 flow
= dp_netdev_flow_cast(rules
[i
]);
3048 emc_insert(flow_cache
, &keys
[i
], flow
);
3049 dp_netdev_queue_batches(packet
, flow
, &keys
[i
].mf
, batches
,
3050 &n_batches
, ARRAY_SIZE(batches
));
3053 for (i
= 0; i
< n_batches
; i
++) {
3054 packet_batch_execute(&batches
[i
], pmd
);
3059 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
3060 struct dp_packet
**packets
, int cnt
)
3062 #if !defined(__CHECKER__) && !defined(_WIN32)
3063 const size_t PKT_ARRAY_SIZE
= cnt
;
3065 /* Sparse or MSVC doesn't like variable length array. */
3066 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_RX_BATCH
};
3068 struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
3071 newcnt
= emc_processing(pmd
, packets
, cnt
, keys
);
3072 if (OVS_UNLIKELY(newcnt
)) {
3073 fast_path_processing(pmd
, packets
, newcnt
, keys
);
3077 struct dp_netdev_execute_aux
{
3078 struct dp_netdev_pmd_thread
*pmd
;
3082 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
3085 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3086 dp
->upcall_aux
= aux
;
3091 dp_netdev_drop_packets(struct dp_packet
** packets
, int cnt
, bool may_steal
)
3096 for (i
= 0; i
< cnt
; i
++) {
3097 dp_packet_delete(packets
[i
]);
3103 push_tnl_action(const struct dp_netdev
*dp
,
3104 const struct nlattr
*attr
,
3105 struct dp_packet
**packets
, int cnt
)
3107 struct dp_netdev_port
*tun_port
;
3108 const struct ovs_action_push_tnl
*data
;
3110 data
= nl_attr_get(attr
);
3112 tun_port
= dp_netdev_lookup_port(dp
, u32_to_odp(data
->tnl_port
));
3116 netdev_push_header(tun_port
->netdev
, packets
, cnt
, data
);
3122 dp_netdev_clone_pkt_batch(struct dp_packet
**tnl_pkt
,
3123 struct dp_packet
**packets
, int cnt
)
3127 for (i
= 0; i
< cnt
; i
++) {
3128 tnl_pkt
[i
] = dp_packet_clone(packets
[i
]);
3133 dp_execute_cb(void *aux_
, struct dp_packet
**packets
, int cnt
,
3134 const struct nlattr
*a
, bool may_steal
)
3135 OVS_NO_THREAD_SAFETY_ANALYSIS
3137 struct dp_netdev_execute_aux
*aux
= aux_
;
3138 uint32_t *depth
= recirc_depth_get();
3139 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
3140 struct dp_netdev
*dp
= pmd
->dp
;
3141 int type
= nl_attr_type(a
);
3142 struct dp_netdev_port
*p
;
3145 switch ((enum ovs_action_attr
)type
) {
3146 case OVS_ACTION_ATTR_OUTPUT
:
3147 p
= dp_netdev_lookup_port(dp
, u32_to_odp(nl_attr_get_u32(a
)));
3148 if (OVS_LIKELY(p
)) {
3149 netdev_send(p
->netdev
, pmd
->core_id
, packets
, cnt
, may_steal
);
3154 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
3155 if (*depth
< MAX_RECIRC_DEPTH
) {
3156 struct dp_packet
*tnl_pkt
[NETDEV_MAX_RX_BATCH
];
3160 dp_netdev_clone_pkt_batch(tnl_pkt
, packets
, cnt
);
3164 err
= push_tnl_action(dp
, a
, packets
, cnt
);
3167 dp_netdev_input(pmd
, packets
, cnt
);
3170 dp_netdev_drop_packets(tnl_pkt
, cnt
, !may_steal
);
3176 case OVS_ACTION_ATTR_TUNNEL_POP
:
3177 if (*depth
< MAX_RECIRC_DEPTH
) {
3178 odp_port_t portno
= u32_to_odp(nl_attr_get_u32(a
));
3180 p
= dp_netdev_lookup_port(dp
, portno
);
3182 struct dp_packet
*tnl_pkt
[NETDEV_MAX_RX_BATCH
];
3186 dp_netdev_clone_pkt_batch(tnl_pkt
, packets
, cnt
);
3190 err
= netdev_pop_header(p
->netdev
, packets
, cnt
);
3193 for (i
= 0; i
< cnt
; i
++) {
3194 packets
[i
]->md
.in_port
.odp_port
= portno
;
3198 dp_netdev_input(pmd
, packets
, cnt
);
3201 dp_netdev_drop_packets(tnl_pkt
, cnt
, !may_steal
);
3208 case OVS_ACTION_ATTR_USERSPACE
:
3209 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
3210 const struct nlattr
*userdata
;
3211 struct ofpbuf actions
;
3215 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
3216 ofpbuf_init(&actions
, 0);
3218 for (i
= 0; i
< cnt
; i
++) {
3221 ofpbuf_clear(&actions
);
3223 flow_extract(packets
[i
], &flow
);
3224 dpif_flow_hash(dp
->dpif
, &flow
, sizeof flow
, &ufid
);
3225 error
= dp_netdev_upcall(pmd
, packets
[i
], &flow
, NULL
, &ufid
,
3226 DPIF_UC_ACTION
, userdata
,&actions
,
3228 if (!error
|| error
== ENOSPC
) {
3229 dp_netdev_execute_actions(pmd
, &packets
[i
], 1, may_steal
,
3230 actions
.data
, actions
.size
);
3231 } else if (may_steal
) {
3232 dp_packet_delete(packets
[i
]);
3235 ofpbuf_uninit(&actions
);
3236 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3242 case OVS_ACTION_ATTR_RECIRC
:
3243 if (*depth
< MAX_RECIRC_DEPTH
) {
3246 for (i
= 0; i
< cnt
; i
++) {
3247 struct dp_packet
*recirc_pkt
;
3249 recirc_pkt
= (may_steal
) ? packets
[i
]
3250 : dp_packet_clone(packets
[i
]);
3252 recirc_pkt
->md
.recirc_id
= nl_attr_get_u32(a
);
3254 /* Hash is private to each packet */
3255 recirc_pkt
->md
.dp_hash
= dp_packet_get_dp_hash(packets
[i
]);
3257 dp_netdev_input(pmd
, &recirc_pkt
, 1);
3264 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
3267 case OVS_ACTION_ATTR_PUSH_VLAN
:
3268 case OVS_ACTION_ATTR_POP_VLAN
:
3269 case OVS_ACTION_ATTR_PUSH_MPLS
:
3270 case OVS_ACTION_ATTR_POP_MPLS
:
3271 case OVS_ACTION_ATTR_SET
:
3272 case OVS_ACTION_ATTR_SET_MASKED
:
3273 case OVS_ACTION_ATTR_SAMPLE
:
3274 case OVS_ACTION_ATTR_HASH
:
3275 case OVS_ACTION_ATTR_UNSPEC
:
3276 case __OVS_ACTION_ATTR_MAX
:
3280 dp_netdev_drop_packets(packets
, cnt
, may_steal
);
3284 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
3285 struct dp_packet
**packets
, int cnt
,
3287 const struct nlattr
*actions
, size_t actions_len
)
3289 struct dp_netdev_execute_aux aux
= { pmd
};
3291 odp_execute_actions(&aux
, packets
, cnt
, may_steal
, actions
,
3292 actions_len
, dp_execute_cb
);
3295 const struct dpif_class dpif_netdev_class
= {
3297 dpif_netdev_enumerate
,
3298 dpif_netdev_port_open_type
,
3301 dpif_netdev_destroy
,
3304 dpif_netdev_get_stats
,
3305 dpif_netdev_port_add
,
3306 dpif_netdev_port_del
,
3307 dpif_netdev_port_query_by_number
,
3308 dpif_netdev_port_query_by_name
,
3309 NULL
, /* port_get_pid */
3310 dpif_netdev_port_dump_start
,
3311 dpif_netdev_port_dump_next
,
3312 dpif_netdev_port_dump_done
,
3313 dpif_netdev_port_poll
,
3314 dpif_netdev_port_poll_wait
,
3315 dpif_netdev_flow_flush
,
3316 dpif_netdev_flow_dump_create
,
3317 dpif_netdev_flow_dump_destroy
,
3318 dpif_netdev_flow_dump_thread_create
,
3319 dpif_netdev_flow_dump_thread_destroy
,
3320 dpif_netdev_flow_dump_next
,
3321 dpif_netdev_operate
,
3322 NULL
, /* recv_set */
3323 NULL
, /* handlers_set */
3324 dpif_netdev_pmd_set
,
3325 dpif_netdev_queue_to_priority
,
3327 NULL
, /* recv_wait */
3328 NULL
, /* recv_purge */
3329 dpif_netdev_register_upcall_cb
,
3330 dpif_netdev_enable_upcall
,
3331 dpif_netdev_disable_upcall
,
3332 dpif_netdev_get_datapath_version
,
3336 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3337 const char *argv
[], void *aux OVS_UNUSED
)
3339 struct dp_netdev_port
*old_port
;
3340 struct dp_netdev_port
*new_port
;
3341 struct dp_netdev
*dp
;
3344 ovs_mutex_lock(&dp_netdev_mutex
);
3345 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
3346 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
3347 ovs_mutex_unlock(&dp_netdev_mutex
);
3348 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
3351 ovs_refcount_ref(&dp
->ref_cnt
);
3352 ovs_mutex_unlock(&dp_netdev_mutex
);
3354 ovs_mutex_lock(&dp
->port_mutex
);
3355 if (get_port_by_name(dp
, argv
[2], &old_port
)) {
3356 unixctl_command_reply_error(conn
, "unknown port");
3360 port_no
= u32_to_odp(atoi(argv
[3]));
3361 if (!port_no
|| port_no
== ODPP_NONE
) {
3362 unixctl_command_reply_error(conn
, "bad port number");
3365 if (dp_netdev_lookup_port(dp
, port_no
)) {
3366 unixctl_command_reply_error(conn
, "port number already in use");
3370 /* Remove old port. */
3371 cmap_remove(&dp
->ports
, &old_port
->node
, hash_port_no(old_port
->port_no
));
3372 ovsrcu_postpone(free
, old_port
);
3374 /* Insert new port (cmap semantics mean we cannot re-insert 'old_port'). */
3375 new_port
= xmemdup(old_port
, sizeof *old_port
);
3376 new_port
->port_no
= port_no
;
3377 cmap_insert(&dp
->ports
, &new_port
->node
, hash_port_no(port_no
));
3379 seq_change(dp
->port_seq
);
3380 unixctl_command_reply(conn
, NULL
);
3383 ovs_mutex_unlock(&dp
->port_mutex
);
3384 dp_netdev_unref(dp
);
3388 dpif_dummy_delete_port(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3389 const char *argv
[], void *aux OVS_UNUSED
)
3391 struct dp_netdev_port
*port
;
3392 struct dp_netdev
*dp
;
3394 ovs_mutex_lock(&dp_netdev_mutex
);
3395 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
3396 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
3397 ovs_mutex_unlock(&dp_netdev_mutex
);
3398 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
3401 ovs_refcount_ref(&dp
->ref_cnt
);
3402 ovs_mutex_unlock(&dp_netdev_mutex
);
3404 ovs_mutex_lock(&dp
->port_mutex
);
3405 if (get_port_by_name(dp
, argv
[2], &port
)) {
3406 unixctl_command_reply_error(conn
, "unknown port");
3407 } else if (port
->port_no
== ODPP_LOCAL
) {
3408 unixctl_command_reply_error(conn
, "can't delete local port");
3410 do_del_port(dp
, port
);
3411 unixctl_command_reply(conn
, NULL
);
3413 ovs_mutex_unlock(&dp
->port_mutex
);
3415 dp_netdev_unref(dp
);
3419 dpif_dummy_register__(const char *type
)
3421 struct dpif_class
*class;
3423 class = xmalloc(sizeof *class);
3424 *class = dpif_netdev_class
;
3425 class->type
= xstrdup(type
);
3426 dp_register_provider(class);
3430 dpif_dummy_register(bool override
)
3437 dp_enumerate_types(&types
);
3438 SSET_FOR_EACH (type
, &types
) {
3439 if (!dp_unregister_provider(type
)) {
3440 dpif_dummy_register__(type
);
3443 sset_destroy(&types
);
3446 dpif_dummy_register__("dummy");
3448 unixctl_command_register("dpif-dummy/change-port-number",
3449 "dp port new-number",
3450 3, 3, dpif_dummy_change_port_number
, NULL
);
3451 unixctl_command_register("dpif-dummy/delete-port", "dp port",
3452 2, 2, dpif_dummy_delete_port
, NULL
);
3455 /* Datapath Classifier. */
3457 /* A set of rules that all have the same fields wildcarded. */
3458 struct dpcls_subtable
{
3459 /* The fields are only used by writers. */
3460 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
3462 /* These fields are accessed by readers. */
3463 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
3464 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
3465 /* 'mask' must be the last field, additional space is allocated here. */
3468 /* Initializes 'cls' as a classifier that initially contains no classification
3471 dpcls_init(struct dpcls
*cls
)
3473 cmap_init(&cls
->subtables_map
);
3474 pvector_init(&cls
->subtables
);
3478 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
3480 pvector_remove(&cls
->subtables
, subtable
);
3481 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
3482 subtable
->mask
.hash
);
3483 cmap_destroy(&subtable
->rules
);
3484 ovsrcu_postpone(free
, subtable
);
3487 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
3488 * caller's responsibility.
3489 * May only be called after all the readers have been terminated. */
3491 dpcls_destroy(struct dpcls
*cls
)
3494 struct dpcls_subtable
*subtable
;
3496 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
3497 dpcls_destroy_subtable(cls
, subtable
);
3499 cmap_destroy(&cls
->subtables_map
);
3500 pvector_destroy(&cls
->subtables
);
3504 static struct dpcls_subtable
*
3505 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
3507 struct dpcls_subtable
*subtable
;
3509 /* Need to add one. */
3510 subtable
= xmalloc(sizeof *subtable
3511 - sizeof subtable
->mask
.mf
+ mask
->len
);
3512 cmap_init(&subtable
->rules
);
3513 netdev_flow_key_clone(&subtable
->mask
, mask
);
3514 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
3515 pvector_insert(&cls
->subtables
, subtable
, 0);
3516 pvector_publish(&cls
->subtables
);
3521 static inline struct dpcls_subtable
*
3522 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
3524 struct dpcls_subtable
*subtable
;
3526 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
3527 &cls
->subtables_map
) {
3528 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
3532 return dpcls_create_subtable(cls
, mask
);
3535 /* Insert 'rule' into 'cls'. */
3537 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
3538 const struct netdev_flow_key
*mask
)
3540 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
3542 rule
->mask
= &subtable
->mask
;
3543 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
3546 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
3548 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
3550 struct dpcls_subtable
*subtable
;
3552 ovs_assert(rule
->mask
);
3554 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
3556 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
3558 dpcls_destroy_subtable(cls
, subtable
);
3559 pvector_publish(&cls
->subtables
);
3563 /* Returns true if 'target' satisifies 'key' in 'mask', that is, if each 1-bit
3564 * in 'mask' the values in 'key' and 'target' are the same.
3566 * Note: 'key' and 'mask' have the same mask, and 'key' is already masked. */
3568 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
3569 const struct netdev_flow_key
*target
)
3571 const uint64_t *keyp
= rule
->flow
.mf
.inline_values
;
3572 const uint64_t *maskp
= rule
->mask
->mf
.inline_values
;
3573 uint64_t target_u64
;
3575 NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u64
, target
, rule
->flow
.mf
.map
) {
3576 if (OVS_UNLIKELY((target_u64
& *maskp
++) != *keyp
++)) {
3583 /* For each miniflow in 'flows' performs a classifier lookup writing the result
3584 * into the corresponding slot in 'rules'. If a particular entry in 'flows' is
3585 * NULL it is skipped.
3587 * This function is optimized for use in the userspace datapath and therefore
3588 * does not implement a lot of features available in the standard
3589 * classifier_lookup() function. Specifically, it does not implement
3590 * priorities, instead returning any rule which matches the flow.
3592 * Returns true if all flows found a corresponding rule. */
3594 dpcls_lookup(const struct dpcls
*cls
, const struct netdev_flow_key keys
[],
3595 struct dpcls_rule
**rules
, const size_t cnt
)
3597 /* The batch size 16 was experimentally found faster than 8 or 32. */
3598 typedef uint16_t map_type
;
3599 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
3601 #if !defined(__CHECKER__) && !defined(_WIN32)
3602 const int N_MAPS
= DIV_ROUND_UP(cnt
, MAP_BITS
);
3604 enum { N_MAPS
= DIV_ROUND_UP(NETDEV_MAX_RX_BATCH
, MAP_BITS
) };
3606 map_type maps
[N_MAPS
];
3607 struct dpcls_subtable
*subtable
;
3609 memset(maps
, 0xff, sizeof maps
);
3610 if (cnt
% MAP_BITS
) {
3611 maps
[N_MAPS
- 1] >>= MAP_BITS
- cnt
% MAP_BITS
; /* Clear extra bits. */
3613 memset(rules
, 0, cnt
* sizeof *rules
);
3615 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
3616 const struct netdev_flow_key
*mkeys
= keys
;
3617 struct dpcls_rule
**mrules
= rules
;
3618 map_type remains
= 0;
3621 BUILD_ASSERT_DECL(sizeof remains
== sizeof *maps
);
3623 for (m
= 0; m
< N_MAPS
; m
++, mkeys
+= MAP_BITS
, mrules
+= MAP_BITS
) {
3624 uint32_t hashes
[MAP_BITS
];
3625 const struct cmap_node
*nodes
[MAP_BITS
];
3626 unsigned long map
= maps
[m
];
3630 continue; /* Skip empty maps. */
3633 /* Compute hashes for the remaining keys. */
3634 ULONG_FOR_EACH_1(i
, map
) {
3635 hashes
[i
] = netdev_flow_key_hash_in_mask(&mkeys
[i
],
3639 map
= cmap_find_batch(&subtable
->rules
, map
, hashes
, nodes
);
3640 /* Check results. */
3641 ULONG_FOR_EACH_1(i
, map
) {
3642 struct dpcls_rule
*rule
;
3644 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
3645 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, &mkeys
[i
]))) {
3650 ULONG_SET0(map
, i
); /* Did not match. */
3652 ; /* Keep Sparse happy. */
3654 maps
[m
] &= ~map
; /* Clear the found rules. */
3658 return true; /* All found. */
3661 return false; /* Some misses. */