2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
25 #include <netinet/in.h>
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
38 #include "dp-packet.h"
40 #include "dpif-provider.h"
42 #include "fat-rwlock.h"
47 #include "netdev-dpdk.h"
48 #include "netdev-vport.h"
50 #include "odp-execute.h"
52 #include "openvswitch/dynamic-string.h"
53 #include "openvswitch/list.h"
54 #include "openvswitch/match.h"
55 #include "openvswitch/ofp-print.h"
56 #include "openvswitch/ofpbuf.h"
57 #include "openvswitch/vlog.h"
61 #include "poll-loop.h"
68 #include "tnl-neigh-cache.h"
69 #include "tnl-ports.h"
73 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
75 #define FLOW_DUMP_MAX_BATCH 50
76 /* Use per thread recirc_depth to prevent recirculation loop. */
77 #define MAX_RECIRC_DEPTH 5
78 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
80 /* Configuration parameters. */
81 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
83 /* Protects against changes to 'dp_netdevs'. */
84 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
86 /* Contains all 'struct dp_netdev's. */
87 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
88 = SHASH_INITIALIZER(&dp_netdevs
);
90 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
92 static struct odp_support dp_netdev_support
= {
93 .max_mpls_depth
= SIZE_MAX
,
97 /* Stores a miniflow with inline values */
99 struct netdev_flow_key
{
100 uint32_t hash
; /* Hash function differs for different users. */
101 uint32_t len
; /* Length of the following miniflow (incl. map). */
103 uint64_t buf
[FLOW_MAX_PACKET_U64S
];
106 /* Exact match cache for frequently used flows
108 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
109 * search its entries for a miniflow that matches exactly the miniflow of the
110 * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow.
112 * A cache entry holds a reference to its 'dp_netdev_flow'.
114 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
115 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
116 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
117 * value is the index of a cache entry where the miniflow could be.
123 * Each pmd_thread has its own private exact match cache.
124 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
127 #define EM_FLOW_HASH_SHIFT 13
128 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
129 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
130 #define EM_FLOW_HASH_SEGS 2
133 struct dp_netdev_flow
*flow
;
134 struct netdev_flow_key key
; /* key.hash used for emc hash value. */
138 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
139 int sweep_idx
; /* For emc_cache_slow_sweep(). */
142 /* Iterate in the exact match cache through every entry that might contain a
143 * miniflow with hash 'HASH'. */
144 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
145 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
146 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
147 i__ < EM_FLOW_HASH_SEGS; \
148 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
150 /* Simple non-wildcarding single-priority classifier. */
153 struct cmap subtables_map
;
154 struct pvector subtables
;
157 /* A rule to be inserted to the classifier. */
159 struct cmap_node cmap_node
; /* Within struct dpcls_subtable 'rules'. */
160 struct netdev_flow_key
*mask
; /* Subtable's mask. */
161 struct netdev_flow_key flow
; /* Matching key. */
162 /* 'flow' must be the last field, additional space is allocated here. */
165 static void dpcls_init(struct dpcls
*);
166 static void dpcls_destroy(struct dpcls
*);
167 static void dpcls_insert(struct dpcls
*, struct dpcls_rule
*,
168 const struct netdev_flow_key
*mask
);
169 static void dpcls_remove(struct dpcls
*, struct dpcls_rule
*);
170 static bool dpcls_lookup(const struct dpcls
*cls
,
171 const struct netdev_flow_key keys
[],
172 struct dpcls_rule
**rules
, size_t cnt
);
174 /* Datapath based on the network device interface from netdev.h.
180 * Some members, marked 'const', are immutable. Accessing other members
181 * requires synchronization, as noted in more detail below.
183 * Acquisition order is, from outermost to innermost:
185 * dp_netdev_mutex (global)
189 const struct dpif_class
*const class;
190 const char *const name
;
192 struct ovs_refcount ref_cnt
;
193 atomic_flag destroyed
;
197 * Protected by RCU. Take the mutex to add or remove ports. */
198 struct ovs_mutex port_mutex
;
200 struct seq
*port_seq
; /* Incremented whenever a port changes. */
202 /* Protects access to ofproto-dpif-upcall interface during revalidator
203 * thread synchronization. */
204 struct fat_rwlock upcall_rwlock
;
205 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
208 /* Callback function for notifying the purging of dp flows (during
209 * reseting pmd deletion). */
210 dp_purge_callback
*dp_purge_cb
;
213 /* Stores all 'struct dp_netdev_pmd_thread's. */
214 struct cmap poll_threads
;
216 /* Protects the access of the 'struct dp_netdev_pmd_thread'
217 * instance for non-pmd thread. */
218 struct ovs_mutex non_pmd_mutex
;
220 /* Each pmd thread will store its pointer to
221 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
222 ovsthread_key_t per_pmd_key
;
224 /* Cpu mask for pin of pmd threads. */
226 uint64_t last_tnl_conf_seq
;
229 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
233 DP_STAT_EXACT_HIT
, /* Packets that had an exact match (emc). */
234 DP_STAT_MASKED_HIT
, /* Packets that matched in the flow table. */
235 DP_STAT_MISS
, /* Packets that did not match. */
236 DP_STAT_LOST
, /* Packets not passed up to the client. */
240 enum pmd_cycles_counter_type
{
241 PMD_CYCLES_POLLING
, /* Cycles spent polling NICs. */
242 PMD_CYCLES_PROCESSING
, /* Cycles spent processing packets */
246 /* A port in a netdev-based datapath. */
247 struct dp_netdev_port
{
249 struct netdev
*netdev
;
250 struct cmap_node node
; /* Node in dp_netdev's 'ports'. */
251 struct netdev_saved_flags
*sf
;
252 unsigned n_rxq
; /* Number of elements in 'rxq' */
253 struct netdev_rxq
**rxq
;
254 char *type
; /* Port type as requested by user. */
255 int latest_requested_n_rxq
; /* Latest requested from netdev number
259 /* Contained by struct dp_netdev_flow's 'stats' member. */
260 struct dp_netdev_flow_stats
{
261 atomic_llong used
; /* Last used time, in monotonic msecs. */
262 atomic_ullong packet_count
; /* Number of packets matched. */
263 atomic_ullong byte_count
; /* Number of bytes matched. */
264 atomic_uint16_t tcp_flags
; /* Bitwise-OR of seen tcp_flags values. */
267 /* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
273 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
274 * its pmd thread's classifier. The text below calls this classifier 'cls'.
279 * The thread safety rules described here for "struct dp_netdev_flow" are
280 * motivated by two goals:
282 * - Prevent threads that read members of "struct dp_netdev_flow" from
283 * reading bad data due to changes by some thread concurrently modifying
286 * - Prevent two threads making changes to members of a given "struct
287 * dp_netdev_flow" from interfering with each other.
293 * A flow 'flow' may be accessed without a risk of being freed during an RCU
294 * grace period. Code that needs to hold onto a flow for a while
295 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
297 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
298 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
301 * Some members, marked 'const', are immutable. Accessing other members
302 * requires synchronization, as noted in more detail below.
304 struct dp_netdev_flow
{
305 const struct flow flow
; /* Unmasked flow that created this entry. */
306 /* Hash table index by unmasked flow. */
307 const struct cmap_node node
; /* In owning dp_netdev_pmd_thread's */
309 const ovs_u128 ufid
; /* Unique flow identifier. */
310 const unsigned pmd_id
; /* The 'core_id' of pmd thread owning this */
313 /* Number of references.
314 * The classifier owns one reference.
315 * Any thread trying to keep a rule from being freed should hold its own
317 struct ovs_refcount ref_cnt
;
322 struct dp_netdev_flow_stats stats
;
325 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
327 /* While processing a group of input packets, the datapath uses the next
328 * member to store a pointer to the output batch for the flow. It is
329 * reset after the batch has been sent out (See dp_netdev_queue_batches(),
330 * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
331 struct packet_batch_per_flow
*batch
;
333 /* Packet classification. */
334 struct dpcls_rule cr
; /* In owning dp_netdev's 'cls'. */
335 /* 'cr' must be the last member. */
338 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
339 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
340 static int dpif_netdev_flow_from_nlattrs(const struct nlattr
*, uint32_t,
343 /* A set of datapath actions within a "struct dp_netdev_flow".
349 * A struct dp_netdev_actions 'actions' is protected with RCU. */
350 struct dp_netdev_actions
{
351 /* These members are immutable: they do not change during the struct's
353 unsigned int size
; /* Size of 'actions', in bytes. */
354 struct nlattr actions
[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
357 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
359 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
360 const struct dp_netdev_flow
*);
361 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
363 /* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
364 struct dp_netdev_pmd_stats
{
365 /* Indexed by DP_STAT_*. */
366 atomic_ullong n
[DP_N_STATS
];
369 /* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */
370 struct dp_netdev_pmd_cycles
{
371 /* Indexed by PMD_CYCLES_*. */
372 atomic_ullong n
[PMD_N_CYCLES
];
375 /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
377 struct dp_netdev_port
*port
;
378 struct netdev_rxq
*rx
;
379 struct ovs_list node
;
382 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
383 * the performance overhead of interrupt processing. Therefore netdev can
384 * not implement rx-wait for these devices. dpif-netdev needs to poll
385 * these device to check for recv buffer. pmd-thread does polling for
386 * devices assigned to itself.
388 * DPDK used PMD for accessing NIC.
390 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
391 * I/O of all non-pmd threads. There will be no actual thread created
394 * Each struct has its own flow table and classifier. Packets received
395 * from managed ports are looked up in the corresponding pmd thread's
396 * flow table, and are executed with the found actions.
398 struct dp_netdev_pmd_thread
{
399 struct dp_netdev
*dp
;
400 struct ovs_refcount ref_cnt
; /* Every reference must be refcount'ed. */
401 struct cmap_node node
; /* In 'dp->poll_threads'. */
403 pthread_cond_t cond
; /* For synchronizing pmd thread reload. */
404 struct ovs_mutex cond_mutex
; /* Mutex for condition variable. */
406 /* Per thread exact-match cache. Note, the instance for cpu core
407 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
408 * need to be protected (e.g. by 'dp_netdev_mutex'). All other
409 * instances will only be accessed by its own pmd thread. */
410 struct emc_cache flow_cache
;
412 /* Classifier and Flow-Table.
414 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
415 * changes to 'cls' must be made while still holding the 'flow_mutex'.
417 struct ovs_mutex flow_mutex
;
419 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
422 struct dp_netdev_pmd_stats stats
;
424 /* Cycles counters */
425 struct dp_netdev_pmd_cycles cycles
;
427 /* Used to count cicles. See 'cycles_counter_end()' */
428 unsigned long long last_cycles
;
430 struct latch exit_latch
; /* For terminating the pmd thread. */
431 atomic_uint change_seq
; /* For reloading pmd ports. */
433 unsigned core_id
; /* CPU core id of this pmd thread. */
434 int numa_id
; /* numa node id of this pmd thread. */
435 atomic_int tx_qid
; /* Queue id used by this pmd thread to
436 * send packets on all netdevs */
438 struct ovs_mutex poll_mutex
; /* Mutex for poll_list. */
439 /* List of rx queues to poll. */
440 struct ovs_list poll_list OVS_GUARDED
;
441 int poll_cnt
; /* Number of elemints in poll_list. */
443 /* Only a pmd thread can write on its own 'cycles' and 'stats'.
444 * The main thread keeps 'stats_zero' and 'cycles_zero' as base
445 * values and subtracts them from 'stats' and 'cycles' before
446 * reporting to the user */
447 unsigned long long stats_zero
[DP_N_STATS
];
448 uint64_t cycles_zero
[PMD_N_CYCLES
];
451 #define PMD_INITIAL_SEQ 1
453 /* Interface to netdev-based datapath. */
456 struct dp_netdev
*dp
;
457 uint64_t last_port_seq
;
460 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
461 struct dp_netdev_port
**portp
);
462 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
463 struct dp_netdev_port
**portp
);
464 static void dp_netdev_free(struct dp_netdev
*)
465 OVS_REQUIRES(dp_netdev_mutex
);
466 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
467 const char *type
, odp_port_t port_no
)
468 OVS_REQUIRES(dp
->port_mutex
);
469 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
470 OVS_REQUIRES(dp
->port_mutex
);
471 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
472 bool create
, struct dpif
**);
473 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
474 struct dp_packet_batch
*,
476 const struct nlattr
*actions
,
478 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
479 struct dp_packet_batch
*, odp_port_t port_no
);
480 static void dp_netdev_recirculate(struct dp_netdev_pmd_thread
*,
481 struct dp_packet_batch
*);
483 static void dp_netdev_disable_upcall(struct dp_netdev
*);
484 static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
);
485 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
486 struct dp_netdev
*dp
, unsigned core_id
,
488 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
);
489 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
);
490 static struct dp_netdev_pmd_thread
*dp_netdev_get_pmd(struct dp_netdev
*dp
,
492 static struct dp_netdev_pmd_thread
*
493 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
);
494 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
);
495 static void dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
496 static void dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
497 static void dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread
*pmd
);
498 static void dp_netdev_del_port_from_all_pmds(struct dp_netdev
*dp
,
499 struct dp_netdev_port
*port
);
501 dp_netdev_add_port_to_pmds(struct dp_netdev
*dp
, struct dp_netdev_port
*port
);
503 dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
504 struct dp_netdev_port
*port
, struct netdev_rxq
*rx
);
505 static struct dp_netdev_pmd_thread
*
506 dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev
*dp
, int numa_id
);
507 static void dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
);
508 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
);
509 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
);
510 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
);
512 static inline bool emc_entry_alive(struct emc_entry
*ce
);
513 static void emc_clear_entry(struct emc_entry
*ce
);
516 emc_cache_init(struct emc_cache
*flow_cache
)
520 flow_cache
->sweep_idx
= 0;
521 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
522 flow_cache
->entries
[i
].flow
= NULL
;
523 flow_cache
->entries
[i
].key
.hash
= 0;
524 flow_cache
->entries
[i
].key
.len
= sizeof(struct miniflow
);
525 flowmap_init(&flow_cache
->entries
[i
].key
.mf
.map
);
530 emc_cache_uninit(struct emc_cache
*flow_cache
)
534 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
535 emc_clear_entry(&flow_cache
->entries
[i
]);
539 /* Check and clear dead flow references slowly (one entry at each
542 emc_cache_slow_sweep(struct emc_cache
*flow_cache
)
544 struct emc_entry
*entry
= &flow_cache
->entries
[flow_cache
->sweep_idx
];
546 if (!emc_entry_alive(entry
)) {
547 emc_clear_entry(entry
);
549 flow_cache
->sweep_idx
= (flow_cache
->sweep_idx
+ 1) & EM_FLOW_HASH_MASK
;
552 /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
554 dpif_is_netdev(const struct dpif
*dpif
)
556 return dpif
->dpif_class
->open
== dpif_netdev_open
;
559 static struct dpif_netdev
*
560 dpif_netdev_cast(const struct dpif
*dpif
)
562 ovs_assert(dpif_is_netdev(dpif
));
563 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
566 static struct dp_netdev
*
567 get_dp_netdev(const struct dpif
*dpif
)
569 return dpif_netdev_cast(dpif
)->dp
;
573 PMD_INFO_SHOW_STATS
, /* Show how cpu cycles are spent. */
574 PMD_INFO_CLEAR_STATS
, /* Set the cycles count to 0. */
575 PMD_INFO_SHOW_RXQ
/* Show poll-lists of pmd threads. */
579 pmd_info_show_stats(struct ds
*reply
,
580 struct dp_netdev_pmd_thread
*pmd
,
581 unsigned long long stats
[DP_N_STATS
],
582 uint64_t cycles
[PMD_N_CYCLES
])
584 unsigned long long total_packets
= 0;
585 uint64_t total_cycles
= 0;
588 /* These loops subtracts reference values ('*_zero') from the counters.
589 * Since loads and stores are relaxed, it might be possible for a '*_zero'
590 * value to be more recent than the current value we're reading from the
591 * counter. This is not a big problem, since these numbers are not
592 * supposed to be too accurate, but we should at least make sure that
593 * the result is not negative. */
594 for (i
= 0; i
< DP_N_STATS
; i
++) {
595 if (stats
[i
] > pmd
->stats_zero
[i
]) {
596 stats
[i
] -= pmd
->stats_zero
[i
];
601 if (i
!= DP_STAT_LOST
) {
602 /* Lost packets are already included in DP_STAT_MISS */
603 total_packets
+= stats
[i
];
607 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
608 if (cycles
[i
] > pmd
->cycles_zero
[i
]) {
609 cycles
[i
] -= pmd
->cycles_zero
[i
];
614 total_cycles
+= cycles
[i
];
617 ds_put_cstr(reply
, (pmd
->core_id
== NON_PMD_CORE_ID
)
618 ? "main thread" : "pmd thread");
620 if (pmd
->numa_id
!= OVS_NUMA_UNSPEC
) {
621 ds_put_format(reply
, " numa_id %d", pmd
->numa_id
);
623 if (pmd
->core_id
!= OVS_CORE_UNSPEC
&& pmd
->core_id
!= NON_PMD_CORE_ID
) {
624 ds_put_format(reply
, " core_id %u", pmd
->core_id
);
626 ds_put_cstr(reply
, ":\n");
629 "\temc hits:%llu\n\tmegaflow hits:%llu\n"
630 "\tmiss:%llu\n\tlost:%llu\n",
631 stats
[DP_STAT_EXACT_HIT
], stats
[DP_STAT_MASKED_HIT
],
632 stats
[DP_STAT_MISS
], stats
[DP_STAT_LOST
]);
634 if (total_cycles
== 0) {
639 "\tpolling cycles:%"PRIu64
" (%.02f%%)\n"
640 "\tprocessing cycles:%"PRIu64
" (%.02f%%)\n",
641 cycles
[PMD_CYCLES_POLLING
],
642 cycles
[PMD_CYCLES_POLLING
] / (double)total_cycles
* 100,
643 cycles
[PMD_CYCLES_PROCESSING
],
644 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_cycles
* 100);
646 if (total_packets
== 0) {
651 "\tavg cycles per packet: %.02f (%"PRIu64
"/%llu)\n",
652 total_cycles
/ (double)total_packets
,
653 total_cycles
, total_packets
);
656 "\tavg processing cycles per packet: "
657 "%.02f (%"PRIu64
"/%llu)\n",
658 cycles
[PMD_CYCLES_PROCESSING
] / (double)total_packets
,
659 cycles
[PMD_CYCLES_PROCESSING
], total_packets
);
663 pmd_info_clear_stats(struct ds
*reply OVS_UNUSED
,
664 struct dp_netdev_pmd_thread
*pmd
,
665 unsigned long long stats
[DP_N_STATS
],
666 uint64_t cycles
[PMD_N_CYCLES
])
670 /* We cannot write 'stats' and 'cycles' (because they're written by other
671 * threads) and we shouldn't change 'stats' (because they're used to count
672 * datapath stats, which must not be cleared here). Instead, we save the
673 * current values and subtract them from the values to be displayed in the
675 for (i
= 0; i
< DP_N_STATS
; i
++) {
676 pmd
->stats_zero
[i
] = stats
[i
];
678 for (i
= 0; i
< PMD_N_CYCLES
; i
++) {
679 pmd
->cycles_zero
[i
] = cycles
[i
];
684 pmd_info_show_rxq(struct ds
*reply
, struct dp_netdev_pmd_thread
*pmd
)
686 if (pmd
->core_id
!= NON_PMD_CORE_ID
) {
687 struct rxq_poll
*poll
;
688 const char *prev_name
= NULL
;
690 ds_put_format(reply
, "pmd thread numa_id %d core_id %u:\n",
691 pmd
->numa_id
, pmd
->core_id
);
693 ovs_mutex_lock(&pmd
->poll_mutex
);
694 LIST_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
695 const char *name
= netdev_get_name(poll
->port
->netdev
);
697 if (!prev_name
|| strcmp(name
, prev_name
)) {
699 ds_put_cstr(reply
, "\n");
701 ds_put_format(reply
, "\tport: %s\tqueue-id:",
702 netdev_get_name(poll
->port
->netdev
));
704 ds_put_format(reply
, " %d", netdev_rxq_get_queue_id(poll
->rx
));
707 ovs_mutex_unlock(&pmd
->poll_mutex
);
708 ds_put_cstr(reply
, "\n");
713 dpif_netdev_pmd_info(struct unixctl_conn
*conn
, int argc
, const char *argv
[],
716 struct ds reply
= DS_EMPTY_INITIALIZER
;
717 struct dp_netdev_pmd_thread
*pmd
;
718 struct dp_netdev
*dp
= NULL
;
719 enum pmd_info_type type
= *(enum pmd_info_type
*) aux
;
721 ovs_mutex_lock(&dp_netdev_mutex
);
724 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
725 } else if (shash_count(&dp_netdevs
) == 1) {
726 /* There's only one datapath */
727 dp
= shash_first(&dp_netdevs
)->data
;
731 ovs_mutex_unlock(&dp_netdev_mutex
);
732 unixctl_command_reply_error(conn
,
733 "please specify an existing datapath");
737 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
738 if (type
== PMD_INFO_SHOW_RXQ
) {
739 pmd_info_show_rxq(&reply
, pmd
);
741 unsigned long long stats
[DP_N_STATS
];
742 uint64_t cycles
[PMD_N_CYCLES
];
745 /* Read current stats and cycle counters */
746 for (i
= 0; i
< ARRAY_SIZE(stats
); i
++) {
747 atomic_read_relaxed(&pmd
->stats
.n
[i
], &stats
[i
]);
749 for (i
= 0; i
< ARRAY_SIZE(cycles
); i
++) {
750 atomic_read_relaxed(&pmd
->cycles
.n
[i
], &cycles
[i
]);
753 if (type
== PMD_INFO_CLEAR_STATS
) {
754 pmd_info_clear_stats(&reply
, pmd
, stats
, cycles
);
755 } else if (type
== PMD_INFO_SHOW_STATS
) {
756 pmd_info_show_stats(&reply
, pmd
, stats
, cycles
);
761 ovs_mutex_unlock(&dp_netdev_mutex
);
763 unixctl_command_reply(conn
, ds_cstr(&reply
));
768 dpif_netdev_init(void)
770 static enum pmd_info_type show_aux
= PMD_INFO_SHOW_STATS
,
771 clear_aux
= PMD_INFO_CLEAR_STATS
,
772 poll_aux
= PMD_INFO_SHOW_RXQ
;
774 unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
775 0, 1, dpif_netdev_pmd_info
,
777 unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
778 0, 1, dpif_netdev_pmd_info
,
780 unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]",
781 0, 1, dpif_netdev_pmd_info
,
787 dpif_netdev_enumerate(struct sset
*all_dps
,
788 const struct dpif_class
*dpif_class
)
790 struct shash_node
*node
;
792 ovs_mutex_lock(&dp_netdev_mutex
);
793 SHASH_FOR_EACH(node
, &dp_netdevs
) {
794 struct dp_netdev
*dp
= node
->data
;
795 if (dpif_class
!= dp
->class) {
796 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
797 * If the class doesn't match, skip this dpif. */
800 sset_add(all_dps
, node
->name
);
802 ovs_mutex_unlock(&dp_netdev_mutex
);
808 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
810 return class != &dpif_netdev_class
;
814 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
816 return strcmp(type
, "internal") ? type
817 : dpif_netdev_class_is_dummy(class) ? "dummy"
822 create_dpif_netdev(struct dp_netdev
*dp
)
824 uint16_t netflow_id
= hash_string(dp
->name
, 0);
825 struct dpif_netdev
*dpif
;
827 ovs_refcount_ref(&dp
->ref_cnt
);
829 dpif
= xmalloc(sizeof *dpif
);
830 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
832 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
837 /* Choose an unused, non-zero port number and return it on success.
838 * Return ODPP_NONE on failure. */
840 choose_port(struct dp_netdev
*dp
, const char *name
)
841 OVS_REQUIRES(dp
->port_mutex
)
845 if (dp
->class != &dpif_netdev_class
) {
849 /* If the port name begins with "br", start the number search at
850 * 100 to make writing tests easier. */
851 if (!strncmp(name
, "br", 2)) {
855 /* If the port name contains a number, try to assign that port number.
856 * This can make writing unit tests easier because port numbers are
858 for (p
= name
; *p
!= '\0'; p
++) {
859 if (isdigit((unsigned char) *p
)) {
860 port_no
= start_no
+ strtol(p
, NULL
, 10);
861 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
862 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
863 return u32_to_odp(port_no
);
870 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
871 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
872 return u32_to_odp(port_no
);
880 create_dp_netdev(const char *name
, const struct dpif_class
*class,
881 struct dp_netdev
**dpp
)
882 OVS_REQUIRES(dp_netdev_mutex
)
884 struct dp_netdev
*dp
;
887 dp
= xzalloc(sizeof *dp
);
888 shash_add(&dp_netdevs
, name
, dp
);
890 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
891 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
892 ovs_refcount_init(&dp
->ref_cnt
);
893 atomic_flag_clear(&dp
->destroyed
);
895 ovs_mutex_init(&dp
->port_mutex
);
896 cmap_init(&dp
->ports
);
897 dp
->port_seq
= seq_create();
898 fat_rwlock_init(&dp
->upcall_rwlock
);
900 /* Disable upcalls by default. */
901 dp_netdev_disable_upcall(dp
);
902 dp
->upcall_aux
= NULL
;
903 dp
->upcall_cb
= NULL
;
905 cmap_init(&dp
->poll_threads
);
906 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
907 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
909 dp_netdev_set_nonpmd(dp
);
911 ovs_mutex_lock(&dp
->port_mutex
);
912 error
= do_add_port(dp
, name
, "internal", ODPP_LOCAL
);
913 ovs_mutex_unlock(&dp
->port_mutex
);
919 dp
->last_tnl_conf_seq
= seq_read(tnl_conf_seq
);
925 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
926 bool create
, struct dpif
**dpifp
)
928 struct dp_netdev
*dp
;
931 ovs_mutex_lock(&dp_netdev_mutex
);
932 dp
= shash_find_data(&dp_netdevs
, name
);
934 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
936 error
= (dp
->class != class ? EINVAL
941 *dpifp
= create_dpif_netdev(dp
);
944 ovs_mutex_unlock(&dp_netdev_mutex
);
950 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
951 OVS_NO_THREAD_SAFETY_ANALYSIS
953 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
954 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
956 /* Before freeing a lock we should release it */
957 fat_rwlock_unlock(&dp
->upcall_rwlock
);
958 fat_rwlock_destroy(&dp
->upcall_rwlock
);
961 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
962 * through the 'dp_netdevs' shash while freeing 'dp'. */
964 dp_netdev_free(struct dp_netdev
*dp
)
965 OVS_REQUIRES(dp_netdev_mutex
)
967 struct dp_netdev_port
*port
;
969 shash_find_and_delete(&dp_netdevs
, dp
->name
);
971 dp_netdev_destroy_all_pmds(dp
);
972 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
973 ovsthread_key_delete(dp
->per_pmd_key
);
975 ovs_mutex_lock(&dp
->port_mutex
);
976 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
977 /* PMD threads are destroyed here. do_del_port() cannot quiesce */
978 do_del_port(dp
, port
);
980 ovs_mutex_unlock(&dp
->port_mutex
);
981 cmap_destroy(&dp
->poll_threads
);
983 seq_destroy(dp
->port_seq
);
984 cmap_destroy(&dp
->ports
);
985 ovs_mutex_destroy(&dp
->port_mutex
);
987 /* Upcalls must be disabled at this point */
988 dp_netdev_destroy_upcall_lock(dp
);
991 free(CONST_CAST(char *, dp
->name
));
996 dp_netdev_unref(struct dp_netdev
*dp
)
999 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
1000 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
1001 ovs_mutex_lock(&dp_netdev_mutex
);
1002 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1005 ovs_mutex_unlock(&dp_netdev_mutex
);
1010 dpif_netdev_close(struct dpif
*dpif
)
1012 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1014 dp_netdev_unref(dp
);
1019 dpif_netdev_destroy(struct dpif
*dpif
)
1021 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1023 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
1024 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
1025 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
1033 /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
1034 * load/store semantics. While the increment is not atomic, the load and
1035 * store operations are, making it impossible to read inconsistent values.
1037 * This is used to update thread local stats counters. */
1039 non_atomic_ullong_add(atomic_ullong
*var
, unsigned long long n
)
1041 unsigned long long tmp
;
1043 atomic_read_relaxed(var
, &tmp
);
1045 atomic_store_relaxed(var
, tmp
);
1049 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
1051 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1052 struct dp_netdev_pmd_thread
*pmd
;
1054 stats
->n_flows
= stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
1055 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1056 unsigned long long n
;
1057 stats
->n_flows
+= cmap_count(&pmd
->flow_table
);
1059 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MASKED_HIT
], &n
);
1061 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_EXACT_HIT
], &n
);
1063 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_MISS
], &n
);
1064 stats
->n_missed
+= n
;
1065 atomic_read_relaxed(&pmd
->stats
.n
[DP_STAT_LOST
], &n
);
1068 stats
->n_masks
= UINT32_MAX
;
1069 stats
->n_mask_hit
= UINT64_MAX
;
1075 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
1079 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1083 ovs_mutex_lock(&pmd
->cond_mutex
);
1084 atomic_add_relaxed(&pmd
->change_seq
, 1, &old_seq
);
1085 ovs_mutex_cond_wait(&pmd
->cond
, &pmd
->cond_mutex
);
1086 ovs_mutex_unlock(&pmd
->cond_mutex
);
1090 hash_port_no(odp_port_t port_no
)
1092 return hash_int(odp_to_u32(port_no
), 0);
1096 port_create(const char *devname
, const char *open_type
, const char *type
,
1097 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1099 struct netdev_saved_flags
*sf
;
1100 struct dp_netdev_port
*port
;
1101 enum netdev_flags flags
;
1102 struct netdev
*netdev
;
1103 int n_open_rxqs
= 0;
1108 /* Open and validate network device. */
1109 error
= netdev_open(devname
, open_type
, &netdev
);
1113 /* XXX reject non-Ethernet devices */
1115 netdev_get_flags(netdev
, &flags
);
1116 if (flags
& NETDEV_LOOPBACK
) {
1117 VLOG_ERR("%s: cannot add a loopback device", devname
);
1122 if (netdev_is_pmd(netdev
)) {
1123 int n_cores
= ovs_numa_get_n_cores();
1125 if (n_cores
== OVS_CORE_UNSPEC
) {
1126 VLOG_ERR("%s, cannot get cpu core info", devname
);
1130 /* There can only be ovs_numa_get_n_cores() pmd threads,
1131 * so creates a txq for each, and one extra for the non
1133 error
= netdev_set_multiq(netdev
, n_cores
+ 1,
1134 netdev_requested_n_rxq(netdev
));
1135 if (error
&& (error
!= EOPNOTSUPP
)) {
1136 VLOG_ERR("%s, cannot set multiq", devname
);
1140 port
= xzalloc(sizeof *port
);
1141 port
->port_no
= port_no
;
1142 port
->netdev
= netdev
;
1143 port
->n_rxq
= netdev_n_rxq(netdev
);
1144 port
->rxq
= xcalloc(port
->n_rxq
, sizeof *port
->rxq
);
1145 port
->type
= xstrdup(type
);
1146 port
->latest_requested_n_rxq
= netdev_requested_n_rxq(netdev
);
1148 for (i
= 0; i
< port
->n_rxq
; i
++) {
1149 error
= netdev_rxq_open(netdev
, &port
->rxq
[i
], i
);
1151 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
1152 devname
, ovs_strerror(errno
));
1158 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
1169 for (i
= 0; i
< n_open_rxqs
; i
++) {
1170 netdev_rxq_close(port
->rxq
[i
]);
1177 netdev_close(netdev
);
1182 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
1184 OVS_REQUIRES(dp
->port_mutex
)
1186 struct dp_netdev_port
*port
;
1189 /* Reject devices already in 'dp'. */
1190 if (!get_port_by_name(dp
, devname
, &port
)) {
1194 error
= port_create(devname
, dpif_netdev_port_open_type(dp
->class, type
),
1195 type
, port_no
, &port
);
1200 cmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
1202 if (netdev_is_pmd(port
->netdev
)) {
1203 dp_netdev_add_port_to_pmds(dp
, port
);
1205 seq_change(dp
->port_seq
);
1211 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
1212 odp_port_t
*port_nop
)
1214 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1215 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
1216 const char *dpif_port
;
1220 ovs_mutex_lock(&dp
->port_mutex
);
1221 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
1222 if (*port_nop
!= ODPP_NONE
) {
1223 port_no
= *port_nop
;
1224 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
1226 port_no
= choose_port(dp
, dpif_port
);
1227 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
1230 *port_nop
= port_no
;
1231 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
1233 ovs_mutex_unlock(&dp
->port_mutex
);
1239 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
1241 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1244 ovs_mutex_lock(&dp
->port_mutex
);
1245 if (port_no
== ODPP_LOCAL
) {
1248 struct dp_netdev_port
*port
;
1250 error
= get_port_by_number(dp
, port_no
, &port
);
1252 do_del_port(dp
, port
);
1255 ovs_mutex_unlock(&dp
->port_mutex
);
1261 is_valid_port_number(odp_port_t port_no
)
1263 return port_no
!= ODPP_NONE
;
1266 static struct dp_netdev_port
*
1267 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
1269 struct dp_netdev_port
*port
;
1271 CMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
1272 if (port
->port_no
== port_no
) {
1280 get_port_by_number(struct dp_netdev
*dp
,
1281 odp_port_t port_no
, struct dp_netdev_port
**portp
)
1283 if (!is_valid_port_number(port_no
)) {
1287 *portp
= dp_netdev_lookup_port(dp
, port_no
);
1288 return *portp
? 0 : ENOENT
;
1293 port_destroy(struct dp_netdev_port
*port
)
1299 netdev_close(port
->netdev
);
1300 netdev_restore_flags(port
->sf
);
1302 for (unsigned i
= 0; i
< port
->n_rxq
; i
++) {
1303 netdev_rxq_close(port
->rxq
[i
]);
1312 get_port_by_name(struct dp_netdev
*dp
,
1313 const char *devname
, struct dp_netdev_port
**portp
)
1314 OVS_REQUIRES(dp
->port_mutex
)
1316 struct dp_netdev_port
*port
;
1318 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1319 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
1328 get_n_pmd_threads(struct dp_netdev
*dp
)
1330 /* There is one non pmd thread in dp->poll_threads */
1331 return cmap_count(&dp
->poll_threads
) - 1;
1335 get_n_pmd_threads_on_numa(struct dp_netdev
*dp
, int numa_id
)
1337 struct dp_netdev_pmd_thread
*pmd
;
1340 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1341 if (pmd
->numa_id
== numa_id
) {
1349 /* Returns 'true' if there is a port with pmd netdev and the netdev
1350 * is on numa node 'numa_id'. */
1352 has_pmd_port_for_numa(struct dp_netdev
*dp
, int numa_id
)
1354 struct dp_netdev_port
*port
;
1356 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1357 if (netdev_is_pmd(port
->netdev
)
1358 && netdev_get_numa_id(port
->netdev
) == numa_id
) {
1368 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1369 OVS_REQUIRES(dp
->port_mutex
)
1371 cmap_remove(&dp
->ports
, &port
->node
, hash_odp_port(port
->port_no
));
1372 seq_change(dp
->port_seq
);
1373 if (netdev_is_pmd(port
->netdev
)) {
1374 int numa_id
= netdev_get_numa_id(port
->netdev
);
1376 /* PMD threads can not be on invalid numa node. */
1377 ovs_assert(ovs_numa_numa_id_is_valid(numa_id
));
1378 /* If there is no netdev on the numa node, deletes the pmd threads
1379 * for that numa. Else, deletes the queues from polling lists. */
1380 if (!has_pmd_port_for_numa(dp
, numa_id
)) {
1381 dp_netdev_del_pmds_on_numa(dp
, numa_id
);
1383 dp_netdev_del_port_from_all_pmds(dp
, port
);
1391 answer_port_query(const struct dp_netdev_port
*port
,
1392 struct dpif_port
*dpif_port
)
1394 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1395 dpif_port
->type
= xstrdup(port
->type
);
1396 dpif_port
->port_no
= port
->port_no
;
1400 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1401 struct dpif_port
*dpif_port
)
1403 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1404 struct dp_netdev_port
*port
;
1407 error
= get_port_by_number(dp
, port_no
, &port
);
1408 if (!error
&& dpif_port
) {
1409 answer_port_query(port
, dpif_port
);
1416 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1417 struct dpif_port
*dpif_port
)
1419 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1420 struct dp_netdev_port
*port
;
1423 ovs_mutex_lock(&dp
->port_mutex
);
1424 error
= get_port_by_name(dp
, devname
, &port
);
1425 if (!error
&& dpif_port
) {
1426 answer_port_query(port
, dpif_port
);
1428 ovs_mutex_unlock(&dp
->port_mutex
);
1434 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1436 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1440 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1442 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1443 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1448 dp_netdev_flow_hash(const ovs_u128
*ufid
)
1450 return ufid
->u32
[0];
1454 dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread
*pmd
,
1455 struct dp_netdev_flow
*flow
)
1456 OVS_REQUIRES(pmd
->flow_mutex
)
1458 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1460 dpcls_remove(&pmd
->cls
, &flow
->cr
);
1461 cmap_remove(&pmd
->flow_table
, node
, dp_netdev_flow_hash(&flow
->ufid
));
1464 dp_netdev_flow_unref(flow
);
1468 dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread
*pmd
)
1470 struct dp_netdev_flow
*netdev_flow
;
1472 ovs_mutex_lock(&pmd
->flow_mutex
);
1473 CMAP_FOR_EACH (netdev_flow
, node
, &pmd
->flow_table
) {
1474 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
1476 ovs_mutex_unlock(&pmd
->flow_mutex
);
1480 dpif_netdev_flow_flush(struct dpif
*dpif
)
1482 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1483 struct dp_netdev_pmd_thread
*pmd
;
1485 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1486 dp_netdev_pmd_flow_flush(pmd
);
1492 struct dp_netdev_port_state
{
1493 struct cmap_position position
;
1498 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1500 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
1505 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
1506 struct dpif_port
*dpif_port
)
1508 struct dp_netdev_port_state
*state
= state_
;
1509 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1510 struct cmap_node
*node
;
1513 node
= cmap_next_position(&dp
->ports
, &state
->position
);
1515 struct dp_netdev_port
*port
;
1517 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
1520 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
1521 dpif_port
->name
= state
->name
;
1522 dpif_port
->type
= port
->type
;
1523 dpif_port
->port_no
= port
->port_no
;
1534 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
1536 struct dp_netdev_port_state
*state
= state_
;
1543 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
1545 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1546 uint64_t new_port_seq
;
1549 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
1550 if (dpif
->last_port_seq
!= new_port_seq
) {
1551 dpif
->last_port_seq
= new_port_seq
;
1561 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
1563 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1565 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
1568 static struct dp_netdev_flow
*
1569 dp_netdev_flow_cast(const struct dpcls_rule
*cr
)
1571 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
1574 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
1576 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
1579 /* netdev_flow_key utilities.
1581 * netdev_flow_key is basically a miniflow. We use these functions
1582 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1583 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1585 * - Since we are dealing exclusively with miniflows created by
1586 * miniflow_extract(), if the map is different the miniflow is different.
1587 * Therefore we can be faster by comparing the map and the miniflow in a
1589 * - These functions can be inlined by the compiler. */
1591 /* Given the number of bits set in miniflow's maps, returns the size of the
1592 * 'netdev_flow_key.mf' */
1593 static inline size_t
1594 netdev_flow_key_size(size_t flow_u64s
)
1596 return sizeof(struct miniflow
) + MINIFLOW_VALUES_SIZE(flow_u64s
);
1600 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
1601 const struct netdev_flow_key
*b
)
1603 /* 'b->len' may be not set yet. */
1604 return a
->hash
== b
->hash
&& !memcmp(&a
->mf
, &b
->mf
, a
->len
);
1607 /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow.
1608 * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been
1609 * generated by miniflow_extract. */
1611 netdev_flow_key_equal_mf(const struct netdev_flow_key
*key
,
1612 const struct miniflow
*mf
)
1614 return !memcmp(&key
->mf
, mf
, key
->len
);
1618 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
1619 const struct netdev_flow_key
*src
)
1622 offsetof(struct netdev_flow_key
, mf
) + src
->len
);
1627 netdev_flow_key_from_flow(struct netdev_flow_key
*dst
,
1628 const struct flow
*src
)
1630 struct dp_packet packet
;
1631 uint64_t buf_stub
[512 / 8];
1633 dp_packet_use_stub(&packet
, buf_stub
, sizeof buf_stub
);
1634 pkt_metadata_from_flow(&packet
.md
, src
);
1635 flow_compose(&packet
, src
);
1636 miniflow_extract(&packet
, &dst
->mf
);
1637 dp_packet_uninit(&packet
);
1639 dst
->len
= netdev_flow_key_size(miniflow_n_values(&dst
->mf
));
1640 dst
->hash
= 0; /* Not computed yet. */
1643 /* Initialize a netdev_flow_key 'mask' from 'match'. */
1645 netdev_flow_mask_init(struct netdev_flow_key
*mask
,
1646 const struct match
*match
)
1648 uint64_t *dst
= miniflow_values(&mask
->mf
);
1649 struct flowmap fmap
;
1653 /* Only check masks that make sense for the flow. */
1654 flow_wc_map(&match
->flow
, &fmap
);
1655 flowmap_init(&mask
->mf
.map
);
1657 FLOWMAP_FOR_EACH_INDEX(idx
, fmap
) {
1658 uint64_t mask_u64
= flow_u64_value(&match
->wc
.masks
, idx
);
1661 flowmap_set(&mask
->mf
.map
, idx
, 1);
1663 hash
= hash_add64(hash
, mask_u64
);
1669 FLOWMAP_FOR_EACH_MAP (map
, mask
->mf
.map
) {
1670 hash
= hash_add64(hash
, map
);
1673 size_t n
= dst
- miniflow_get_values(&mask
->mf
);
1675 mask
->hash
= hash_finish(hash
, n
* 8);
1676 mask
->len
= netdev_flow_key_size(n
);
1679 /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
1681 netdev_flow_key_init_masked(struct netdev_flow_key
*dst
,
1682 const struct flow
*flow
,
1683 const struct netdev_flow_key
*mask
)
1685 uint64_t *dst_u64
= miniflow_values(&dst
->mf
);
1686 const uint64_t *mask_u64
= miniflow_get_values(&mask
->mf
);
1690 dst
->len
= mask
->len
;
1691 dst
->mf
= mask
->mf
; /* Copy maps. */
1693 FLOW_FOR_EACH_IN_MAPS(value
, flow
, mask
->mf
.map
) {
1694 *dst_u64
= value
& *mask_u64
++;
1695 hash
= hash_add64(hash
, *dst_u64
++);
1697 dst
->hash
= hash_finish(hash
,
1698 (dst_u64
- miniflow_get_values(&dst
->mf
)) * 8);
1701 /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
1702 #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
1703 MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
1705 /* Returns a hash value for the bits of 'key' where there are 1-bits in
1707 static inline uint32_t
1708 netdev_flow_key_hash_in_mask(const struct netdev_flow_key
*key
,
1709 const struct netdev_flow_key
*mask
)
1711 const uint64_t *p
= miniflow_get_values(&mask
->mf
);
1715 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, key
, mask
->mf
.map
) {
1716 hash
= hash_add64(hash
, value
& *p
++);
1719 return hash_finish(hash
, (p
- miniflow_get_values(&mask
->mf
)) * 8);
1723 emc_entry_alive(struct emc_entry
*ce
)
1725 return ce
->flow
&& !ce
->flow
->dead
;
1729 emc_clear_entry(struct emc_entry
*ce
)
1732 dp_netdev_flow_unref(ce
->flow
);
1738 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
1739 const struct netdev_flow_key
*key
)
1741 if (ce
->flow
!= flow
) {
1743 dp_netdev_flow_unref(ce
->flow
);
1746 if (dp_netdev_flow_ref(flow
)) {
1753 netdev_flow_key_clone(&ce
->key
, key
);
1758 emc_insert(struct emc_cache
*cache
, const struct netdev_flow_key
*key
,
1759 struct dp_netdev_flow
*flow
)
1761 struct emc_entry
*to_be_replaced
= NULL
;
1762 struct emc_entry
*current_entry
;
1764 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1765 if (netdev_flow_key_equal(¤t_entry
->key
, key
)) {
1766 /* We found the entry with the 'mf' miniflow */
1767 emc_change_entry(current_entry
, flow
, NULL
);
1771 /* Replacement policy: put the flow in an empty (not alive) entry, or
1772 * in the first entry where it can be */
1774 || (emc_entry_alive(to_be_replaced
)
1775 && !emc_entry_alive(current_entry
))
1776 || current_entry
->key
.hash
< to_be_replaced
->key
.hash
) {
1777 to_be_replaced
= current_entry
;
1780 /* We didn't find the miniflow in the cache.
1781 * The 'to_be_replaced' entry is where the new flow will be stored */
1783 emc_change_entry(to_be_replaced
, flow
, key
);
1786 static inline struct dp_netdev_flow
*
1787 emc_lookup(struct emc_cache
*cache
, const struct netdev_flow_key
*key
)
1789 struct emc_entry
*current_entry
;
1791 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, key
->hash
) {
1792 if (current_entry
->key
.hash
== key
->hash
1793 && emc_entry_alive(current_entry
)
1794 && netdev_flow_key_equal_mf(¤t_entry
->key
, &key
->mf
)) {
1796 /* We found the entry with the 'key->mf' miniflow */
1797 return current_entry
->flow
;
1804 static struct dp_netdev_flow
*
1805 dp_netdev_pmd_lookup_flow(const struct dp_netdev_pmd_thread
*pmd
,
1806 const struct netdev_flow_key
*key
)
1808 struct dp_netdev_flow
*netdev_flow
;
1809 struct dpcls_rule
*rule
;
1811 dpcls_lookup(&pmd
->cls
, key
, &rule
, 1);
1812 netdev_flow
= dp_netdev_flow_cast(rule
);
1817 static struct dp_netdev_flow
*
1818 dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread
*pmd
,
1819 const ovs_u128
*ufidp
, const struct nlattr
*key
,
1822 struct dp_netdev_flow
*netdev_flow
;
1826 /* If a UFID is not provided, determine one based on the key. */
1827 if (!ufidp
&& key
&& key_len
1828 && !dpif_netdev_flow_from_nlattrs(key
, key_len
, &flow
)) {
1829 dpif_flow_hash(pmd
->dp
->dpif
, &flow
, sizeof flow
, &ufid
);
1834 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, dp_netdev_flow_hash(ufidp
),
1836 if (ovs_u128_equals(netdev_flow
->ufid
, *ufidp
)) {
1846 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_
,
1847 struct dpif_flow_stats
*stats
)
1849 struct dp_netdev_flow
*netdev_flow
;
1850 unsigned long long n
;
1854 netdev_flow
= CONST_CAST(struct dp_netdev_flow
*, netdev_flow_
);
1856 atomic_read_relaxed(&netdev_flow
->stats
.packet_count
, &n
);
1857 stats
->n_packets
= n
;
1858 atomic_read_relaxed(&netdev_flow
->stats
.byte_count
, &n
);
1860 atomic_read_relaxed(&netdev_flow
->stats
.used
, &used
);
1862 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
1863 stats
->tcp_flags
= flags
;
1866 /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
1867 * storing the netlink-formatted key/mask. 'key_buf' may be the same as
1868 * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
1871 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
1872 struct ofpbuf
*key_buf
, struct ofpbuf
*mask_buf
,
1873 struct dpif_flow
*flow
, bool terse
)
1876 memset(flow
, 0, sizeof *flow
);
1878 struct flow_wildcards wc
;
1879 struct dp_netdev_actions
*actions
;
1881 struct odp_flow_key_parms odp_parms
= {
1882 .flow
= &netdev_flow
->flow
,
1884 .support
= dp_netdev_support
,
1887 miniflow_expand(&netdev_flow
->cr
.mask
->mf
, &wc
.masks
);
1890 offset
= key_buf
->size
;
1891 flow
->key
= ofpbuf_tail(key_buf
);
1892 odp_parms
.odp_in_port
= netdev_flow
->flow
.in_port
.odp_port
;
1893 odp_flow_key_from_flow(&odp_parms
, key_buf
);
1894 flow
->key_len
= key_buf
->size
- offset
;
1897 offset
= mask_buf
->size
;
1898 flow
->mask
= ofpbuf_tail(mask_buf
);
1899 odp_parms
.odp_in_port
= wc
.masks
.in_port
.odp_port
;
1900 odp_parms
.key_buf
= key_buf
;
1901 odp_flow_key_from_mask(&odp_parms
, mask_buf
);
1902 flow
->mask_len
= mask_buf
->size
- offset
;
1905 actions
= dp_netdev_flow_get_actions(netdev_flow
);
1906 flow
->actions
= actions
->actions
;
1907 flow
->actions_len
= actions
->size
;
1910 flow
->ufid
= netdev_flow
->ufid
;
1911 flow
->ufid_present
= true;
1912 flow
->pmd_id
= netdev_flow
->pmd_id
;
1913 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
1917 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1918 const struct nlattr
*mask_key
,
1919 uint32_t mask_key_len
, const struct flow
*flow
,
1920 struct flow_wildcards
*wc
)
1922 enum odp_key_fitness fitness
;
1924 fitness
= odp_flow_key_to_mask_udpif(mask_key
, mask_key_len
, key
,
1927 /* This should not happen: it indicates that
1928 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
1929 * disagree on the acceptable form of a mask. Log the problem
1930 * as an error, with enough details to enable debugging. */
1931 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1933 if (!VLOG_DROP_ERR(&rl
)) {
1937 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
1939 VLOG_ERR("internal error parsing flow mask %s (%s)",
1940 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
1951 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1956 if (odp_flow_key_to_flow_udpif(key
, key_len
, flow
)) {
1957 /* This should not happen: it indicates that odp_flow_key_from_flow()
1958 * and odp_flow_key_to_flow() disagree on the acceptable form of a
1959 * flow. Log the problem as an error, with enough details to enable
1961 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1963 if (!VLOG_DROP_ERR(&rl
)) {
1967 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
1968 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
1975 in_port
= flow
->in_port
.odp_port
;
1976 if (!is_valid_port_number(in_port
) && in_port
!= ODPP_NONE
) {
1980 /* Userspace datapath doesn't support conntrack. */
1981 if (flow
->ct_state
|| flow
->ct_zone
|| flow
->ct_mark
1982 || !ovs_u128_is_zero(flow
->ct_label
)) {
1990 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
1992 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1993 struct dp_netdev_flow
*netdev_flow
;
1994 struct dp_netdev_pmd_thread
*pmd
;
1995 unsigned pmd_id
= get
->pmd_id
== PMD_ID_NULL
1996 ? NON_PMD_CORE_ID
: get
->pmd_id
;
1999 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2004 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, get
->ufid
, get
->key
,
2007 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->buffer
,
2012 dp_netdev_pmd_unref(pmd
);
2018 static struct dp_netdev_flow
*
2019 dp_netdev_flow_add(struct dp_netdev_pmd_thread
*pmd
,
2020 struct match
*match
, const ovs_u128
*ufid
,
2021 const struct nlattr
*actions
, size_t actions_len
)
2022 OVS_REQUIRES(pmd
->flow_mutex
)
2024 struct dp_netdev_flow
*flow
;
2025 struct netdev_flow_key mask
;
2027 netdev_flow_mask_init(&mask
, match
);
2028 /* Make sure wc does not have metadata. */
2029 ovs_assert(!FLOWMAP_HAS_FIELD(&mask
.mf
.map
, metadata
)
2030 && !FLOWMAP_HAS_FIELD(&mask
.mf
.map
, regs
));
2032 /* Do not allocate extra space. */
2033 flow
= xmalloc(sizeof *flow
- sizeof flow
->cr
.flow
.mf
+ mask
.len
);
2034 memset(&flow
->stats
, 0, sizeof flow
->stats
);
2037 *CONST_CAST(unsigned *, &flow
->pmd_id
) = pmd
->core_id
;
2038 *CONST_CAST(struct flow
*, &flow
->flow
) = match
->flow
;
2039 *CONST_CAST(ovs_u128
*, &flow
->ufid
) = *ufid
;
2040 ovs_refcount_init(&flow
->ref_cnt
);
2041 ovsrcu_set(&flow
->actions
, dp_netdev_actions_create(actions
, actions_len
));
2043 netdev_flow_key_init_masked(&flow
->cr
.flow
, &match
->flow
, &mask
);
2044 dpcls_insert(&pmd
->cls
, &flow
->cr
, &mask
);
2046 cmap_insert(&pmd
->flow_table
, CONST_CAST(struct cmap_node
*, &flow
->node
),
2047 dp_netdev_flow_hash(&flow
->ufid
));
2049 if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
2051 struct ds ds
= DS_EMPTY_INITIALIZER
;
2053 match
.tun_md
.valid
= false;
2054 match
.flow
= flow
->flow
;
2055 miniflow_expand(&flow
->cr
.mask
->mf
, &match
.wc
.masks
);
2057 ds_put_cstr(&ds
, "flow_add: ");
2058 odp_format_ufid(ufid
, &ds
);
2059 ds_put_cstr(&ds
, " ");
2060 match_format(&match
, &ds
, OFP_DEFAULT_PRIORITY
);
2061 ds_put_cstr(&ds
, ", actions:");
2062 format_odp_actions(&ds
, actions
, actions_len
);
2064 VLOG_DBG_RL(&upcall_rl
, "%s", ds_cstr(&ds
));
2073 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
2075 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2076 struct dp_netdev_flow
*netdev_flow
;
2077 struct netdev_flow_key key
;
2078 struct dp_netdev_pmd_thread
*pmd
;
2081 unsigned pmd_id
= put
->pmd_id
== PMD_ID_NULL
2082 ? NON_PMD_CORE_ID
: put
->pmd_id
;
2085 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
);
2089 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
2090 put
->mask
, put
->mask_len
,
2091 &match
.flow
, &match
.wc
);
2096 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2101 /* Must produce a netdev_flow_key for lookup.
2102 * This interface is no longer performance critical, since it is not used
2103 * for upcall processing any more. */
2104 netdev_flow_key_from_flow(&key
, &match
.flow
);
2109 dpif_flow_hash(dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
2112 ovs_mutex_lock(&pmd
->flow_mutex
);
2113 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &key
);
2115 if (put
->flags
& DPIF_FP_CREATE
) {
2116 if (cmap_count(&pmd
->flow_table
) < MAX_FLOWS
) {
2118 memset(put
->stats
, 0, sizeof *put
->stats
);
2120 dp_netdev_flow_add(pmd
, &match
, &ufid
, put
->actions
,
2130 if (put
->flags
& DPIF_FP_MODIFY
2131 && flow_equal(&match
.flow
, &netdev_flow
->flow
)) {
2132 struct dp_netdev_actions
*new_actions
;
2133 struct dp_netdev_actions
*old_actions
;
2135 new_actions
= dp_netdev_actions_create(put
->actions
,
2138 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
2139 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
2142 get_dpif_flow_stats(netdev_flow
, put
->stats
);
2144 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
2145 /* XXX: The userspace datapath uses thread local statistics
2146 * (for flows), which should be updated only by the owning
2147 * thread. Since we cannot write on stats memory here,
2148 * we choose not to support this flag. Please note:
2149 * - This feature is currently used only by dpctl commands with
2151 * - Should the need arise, this operation can be implemented
2152 * by keeping a base value (to be update here) for each
2153 * counter, and subtracting it before outputting the stats */
2157 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
2158 } else if (put
->flags
& DPIF_FP_CREATE
) {
2161 /* Overlapping flow. */
2165 ovs_mutex_unlock(&pmd
->flow_mutex
);
2166 dp_netdev_pmd_unref(pmd
);
2172 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
2174 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2175 struct dp_netdev_flow
*netdev_flow
;
2176 struct dp_netdev_pmd_thread
*pmd
;
2177 unsigned pmd_id
= del
->pmd_id
== PMD_ID_NULL
2178 ? NON_PMD_CORE_ID
: del
->pmd_id
;
2181 pmd
= dp_netdev_get_pmd(dp
, pmd_id
);
2186 ovs_mutex_lock(&pmd
->flow_mutex
);
2187 netdev_flow
= dp_netdev_pmd_find_flow(pmd
, del
->ufid
, del
->key
,
2191 get_dpif_flow_stats(netdev_flow
, del
->stats
);
2193 dp_netdev_pmd_remove_flow(pmd
, netdev_flow
);
2197 ovs_mutex_unlock(&pmd
->flow_mutex
);
2198 dp_netdev_pmd_unref(pmd
);
2203 struct dpif_netdev_flow_dump
{
2204 struct dpif_flow_dump up
;
2205 struct cmap_position poll_thread_pos
;
2206 struct cmap_position flow_pos
;
2207 struct dp_netdev_pmd_thread
*cur_pmd
;
2209 struct ovs_mutex mutex
;
2212 static struct dpif_netdev_flow_dump
*
2213 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
2215 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
2218 static struct dpif_flow_dump
*
2219 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
, bool terse
)
2221 struct dpif_netdev_flow_dump
*dump
;
2223 dump
= xzalloc(sizeof *dump
);
2224 dpif_flow_dump_init(&dump
->up
, dpif_
);
2225 dump
->up
.terse
= terse
;
2226 ovs_mutex_init(&dump
->mutex
);
2232 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
2234 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2236 ovs_mutex_destroy(&dump
->mutex
);
2241 struct dpif_netdev_flow_dump_thread
{
2242 struct dpif_flow_dump_thread up
;
2243 struct dpif_netdev_flow_dump
*dump
;
2244 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
2245 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
2248 static struct dpif_netdev_flow_dump_thread
*
2249 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
2251 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
2254 static struct dpif_flow_dump_thread
*
2255 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
2257 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
2258 struct dpif_netdev_flow_dump_thread
*thread
;
2260 thread
= xmalloc(sizeof *thread
);
2261 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
2262 thread
->dump
= dump
;
2267 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
2269 struct dpif_netdev_flow_dump_thread
*thread
2270 = dpif_netdev_flow_dump_thread_cast(thread_
);
2276 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
2277 struct dpif_flow
*flows
, int max_flows
)
2279 struct dpif_netdev_flow_dump_thread
*thread
2280 = dpif_netdev_flow_dump_thread_cast(thread_
);
2281 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
2282 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
2286 ovs_mutex_lock(&dump
->mutex
);
2287 if (!dump
->status
) {
2288 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
2289 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
2290 struct dp_netdev_pmd_thread
*pmd
= dump
->cur_pmd
;
2291 int flow_limit
= MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
2293 /* First call to dump_next(), extracts the first pmd thread.
2294 * If there is no pmd thread, returns immediately. */
2296 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2298 ovs_mutex_unlock(&dump
->mutex
);
2305 for (n_flows
= 0; n_flows
< flow_limit
; n_flows
++) {
2306 struct cmap_node
*node
;
2308 node
= cmap_next_position(&pmd
->flow_table
, &dump
->flow_pos
);
2312 netdev_flows
[n_flows
] = CONTAINER_OF(node
,
2313 struct dp_netdev_flow
,
2316 /* When finishing dumping the current pmd thread, moves to
2318 if (n_flows
< flow_limit
) {
2319 memset(&dump
->flow_pos
, 0, sizeof dump
->flow_pos
);
2320 dp_netdev_pmd_unref(pmd
);
2321 pmd
= dp_netdev_pmd_get_next(dp
, &dump
->poll_thread_pos
);
2327 /* Keeps the reference to next caller. */
2328 dump
->cur_pmd
= pmd
;
2330 /* If the current dump is empty, do not exit the loop, since the
2331 * remaining pmds could have flows to be dumped. Just dumps again
2332 * on the new 'pmd'. */
2335 ovs_mutex_unlock(&dump
->mutex
);
2337 for (i
= 0; i
< n_flows
; i
++) {
2338 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
2339 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
2340 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
2341 struct dpif_flow
*f
= &flows
[i
];
2342 struct ofpbuf key
, mask
;
2344 ofpbuf_use_stack(&key
, keybuf
, sizeof *keybuf
);
2345 ofpbuf_use_stack(&mask
, maskbuf
, sizeof *maskbuf
);
2346 dp_netdev_flow_to_dpif_flow(netdev_flow
, &key
, &mask
, f
,
2354 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
2355 OVS_NO_THREAD_SAFETY_ANALYSIS
2357 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2358 struct dp_netdev_pmd_thread
*pmd
;
2359 struct dp_packet_batch pp
;
2361 if (dp_packet_size(execute
->packet
) < ETH_HEADER_LEN
||
2362 dp_packet_size(execute
->packet
) > UINT16_MAX
) {
2366 /* Tries finding the 'pmd'. If NULL is returned, that means
2367 * the current thread is a non-pmd thread and should use
2368 * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */
2369 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
2371 pmd
= dp_netdev_get_pmd(dp
, NON_PMD_CORE_ID
);
2374 /* If the current thread is non-pmd thread, acquires
2375 * the 'non_pmd_mutex'. */
2376 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2377 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2378 ovs_mutex_lock(&dp
->port_mutex
);
2381 /* The action processing expects the RSS hash to be valid, because
2382 * it's always initialized at the beginning of datapath processing.
2383 * In this case, though, 'execute->packet' may not have gone through
2384 * the datapath at all, it may have been generated by the upper layer
2385 * (OpenFlow packet-out, BFD frame, ...). */
2386 if (!dp_packet_rss_valid(execute
->packet
)) {
2387 dp_packet_set_rss_hash(execute
->packet
,
2388 flow_hash_5tuple(execute
->flow
, 0));
2391 packet_batch_init_packet(&pp
, execute
->packet
);
2392 dp_netdev_execute_actions(pmd
, &pp
, false, execute
->actions
,
2393 execute
->actions_len
);
2395 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2396 dp_netdev_pmd_unref(pmd
);
2397 ovs_mutex_unlock(&dp
->port_mutex
);
2398 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2405 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
2409 for (i
= 0; i
< n_ops
; i
++) {
2410 struct dpif_op
*op
= ops
[i
];
2413 case DPIF_OP_FLOW_PUT
:
2414 op
->error
= dpif_netdev_flow_put(dpif
, &op
->u
.flow_put
);
2417 case DPIF_OP_FLOW_DEL
:
2418 op
->error
= dpif_netdev_flow_del(dpif
, &op
->u
.flow_del
);
2421 case DPIF_OP_EXECUTE
:
2422 op
->error
= dpif_netdev_execute(dpif
, &op
->u
.execute
);
2425 case DPIF_OP_FLOW_GET
:
2426 op
->error
= dpif_netdev_flow_get(dpif
, &op
->u
.flow_get
);
2432 /* Returns true if the configuration for rx queues or cpu mask
2435 pmd_config_changed(const struct dp_netdev
*dp
, const char *cmask
)
2437 struct dp_netdev_port
*port
;
2439 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2440 struct netdev
*netdev
= port
->netdev
;
2441 int requested_n_rxq
= netdev_requested_n_rxq(netdev
);
2442 if (netdev_is_pmd(netdev
)
2443 && port
->latest_requested_n_rxq
!= requested_n_rxq
) {
2448 if (dp
->pmd_cmask
!= NULL
&& cmask
!= NULL
) {
2449 return strcmp(dp
->pmd_cmask
, cmask
);
2451 return (dp
->pmd_cmask
!= NULL
|| cmask
!= NULL
);
2455 /* Resets pmd threads if the configuration for 'rxq's or cpu mask changes. */
2457 dpif_netdev_pmd_set(struct dpif
*dpif
, const char *cmask
)
2459 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2461 if (pmd_config_changed(dp
, cmask
)) {
2462 struct dp_netdev_port
*port
;
2464 dp_netdev_destroy_all_pmds(dp
);
2466 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2467 struct netdev
*netdev
= port
->netdev
;
2468 int requested_n_rxq
= netdev_requested_n_rxq(netdev
);
2469 if (netdev_is_pmd(port
->netdev
)
2470 && port
->latest_requested_n_rxq
!= requested_n_rxq
) {
2473 /* Closes the existing 'rxq's. */
2474 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2475 netdev_rxq_close(port
->rxq
[i
]);
2476 port
->rxq
[i
] = NULL
;
2480 /* Sets the new rx queue config. */
2481 err
= netdev_set_multiq(port
->netdev
,
2482 ovs_numa_get_n_cores() + 1,
2484 if (err
&& (err
!= EOPNOTSUPP
)) {
2485 VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
2486 " %u", netdev_get_name(port
->netdev
),
2490 port
->latest_requested_n_rxq
= requested_n_rxq
;
2491 /* If the set_multiq() above succeeds, reopens the 'rxq's. */
2492 port
->n_rxq
= netdev_n_rxq(port
->netdev
);
2493 port
->rxq
= xrealloc(port
->rxq
, sizeof *port
->rxq
* port
->n_rxq
);
2494 for (i
= 0; i
< port
->n_rxq
; i
++) {
2495 netdev_rxq_open(port
->netdev
, &port
->rxq
[i
], i
);
2499 /* Reconfigures the cpu mask. */
2500 ovs_numa_set_cpu_mask(cmask
);
2501 free(dp
->pmd_cmask
);
2502 dp
->pmd_cmask
= cmask
? xstrdup(cmask
) : NULL
;
2504 /* Restores the non-pmd. */
2505 dp_netdev_set_nonpmd(dp
);
2506 /* Restores all pmd threads. */
2507 dp_netdev_reset_pmd_threads(dp
);
2514 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2515 uint32_t queue_id
, uint32_t *priority
)
2517 *priority
= queue_id
;
2522 /* Creates and returns a new 'struct dp_netdev_actions', whose actions are
2523 * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */
2524 struct dp_netdev_actions
*
2525 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
2527 struct dp_netdev_actions
*netdev_actions
;
2529 netdev_actions
= xmalloc(sizeof *netdev_actions
+ size
);
2530 memcpy(netdev_actions
->actions
, actions
, size
);
2531 netdev_actions
->size
= size
;
2533 return netdev_actions
;
2536 struct dp_netdev_actions
*
2537 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
2539 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
2543 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
2548 static inline unsigned long long
2549 cycles_counter(void)
2552 return rte_get_tsc_cycles();
2558 /* Fake mutex to make sure that the calls to cycles_count_* are balanced */
2559 extern struct ovs_mutex cycles_counter_fake_mutex
;
2561 /* Start counting cycles. Must be followed by 'cycles_count_end()' */
2563 cycles_count_start(struct dp_netdev_pmd_thread
*pmd
)
2564 OVS_ACQUIRES(&cycles_counter_fake_mutex
)
2565 OVS_NO_THREAD_SAFETY_ANALYSIS
2567 pmd
->last_cycles
= cycles_counter();
2570 /* Stop counting cycles and add them to the counter 'type' */
2572 cycles_count_end(struct dp_netdev_pmd_thread
*pmd
,
2573 enum pmd_cycles_counter_type type
)
2574 OVS_RELEASES(&cycles_counter_fake_mutex
)
2575 OVS_NO_THREAD_SAFETY_ANALYSIS
2577 unsigned long long interval
= cycles_counter() - pmd
->last_cycles
;
2579 non_atomic_ullong_add(&pmd
->cycles
.n
[type
], interval
);
2583 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
2584 struct dp_netdev_port
*port
,
2585 struct netdev_rxq
*rxq
)
2587 struct dp_packet_batch batch
;
2590 dp_packet_batch_init(&batch
);
2591 cycles_count_start(pmd
);
2592 error
= netdev_rxq_recv(rxq
, &batch
);
2593 cycles_count_end(pmd
, PMD_CYCLES_POLLING
);
2595 *recirc_depth_get() = 0;
2597 cycles_count_start(pmd
);
2598 dp_netdev_input(pmd
, &batch
, port
->port_no
);
2599 cycles_count_end(pmd
, PMD_CYCLES_PROCESSING
);
2600 } else if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
2601 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2603 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
2604 netdev_get_name(port
->netdev
), ovs_strerror(error
));
2608 /* Return true if needs to revalidate datapath flows. */
2610 dpif_netdev_run(struct dpif
*dpif
)
2612 struct dp_netdev_port
*port
;
2613 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2614 struct dp_netdev_pmd_thread
*non_pmd
= dp_netdev_get_pmd(dp
,
2616 uint64_t new_tnl_seq
;
2618 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2619 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2620 if (!netdev_is_pmd(port
->netdev
)) {
2623 for (i
= 0; i
< port
->n_rxq
; i
++) {
2624 dp_netdev_process_rxq_port(non_pmd
, port
, port
->rxq
[i
]);
2628 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2629 dp_netdev_pmd_unref(non_pmd
);
2631 tnl_neigh_cache_run();
2633 new_tnl_seq
= seq_read(tnl_conf_seq
);
2635 if (dp
->last_tnl_conf_seq
!= new_tnl_seq
) {
2636 dp
->last_tnl_conf_seq
= new_tnl_seq
;
2643 dpif_netdev_wait(struct dpif
*dpif
)
2645 struct dp_netdev_port
*port
;
2646 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2648 ovs_mutex_lock(&dp_netdev_mutex
);
2649 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2650 if (!netdev_is_pmd(port
->netdev
)) {
2653 for (i
= 0; i
< port
->n_rxq
; i
++) {
2654 netdev_rxq_wait(port
->rxq
[i
]);
2658 ovs_mutex_unlock(&dp_netdev_mutex
);
2659 seq_wait(tnl_conf_seq
, dp
->last_tnl_conf_seq
);
2663 pmd_load_queues(struct dp_netdev_pmd_thread
*pmd
, struct rxq_poll
**ppoll_list
)
2664 OVS_REQUIRES(pmd
->poll_mutex
)
2666 struct rxq_poll
*poll_list
= *ppoll_list
;
2667 struct rxq_poll
*poll
;
2670 poll_list
= xrealloc(poll_list
, pmd
->poll_cnt
* sizeof *poll_list
);
2673 LIST_FOR_EACH (poll
, node
, &pmd
->poll_list
) {
2674 poll_list
[i
++] = *poll
;
2677 *ppoll_list
= poll_list
;
2678 return pmd
->poll_cnt
;
2682 pmd_thread_main(void *f_
)
2684 struct dp_netdev_pmd_thread
*pmd
= f_
;
2685 unsigned int lc
= 0;
2686 struct rxq_poll
*poll_list
;
2687 unsigned int port_seq
= PMD_INITIAL_SEQ
;
2694 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
2695 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
2696 pmd_thread_setaffinity_cpu(pmd
->core_id
);
2698 emc_cache_init(&pmd
->flow_cache
);
2700 ovs_mutex_lock(&pmd
->poll_mutex
);
2701 poll_cnt
= pmd_load_queues(pmd
, &poll_list
);
2702 ovs_mutex_unlock(&pmd
->poll_mutex
);
2704 /* List port/core affinity */
2705 for (i
= 0; i
< poll_cnt
; i
++) {
2706 VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
2707 pmd
->core_id
, netdev_get_name(poll_list
[i
].port
->netdev
),
2708 netdev_rxq_get_queue_id(poll_list
[i
].rx
));
2711 /* Signal here to make sure the pmd finishes
2712 * reloading the updated configuration. */
2713 dp_netdev_pmd_reload_done(pmd
);
2716 for (i
= 0; i
< poll_cnt
; i
++) {
2717 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].port
, poll_list
[i
].rx
);
2725 emc_cache_slow_sweep(&pmd
->flow_cache
);
2726 coverage_try_clear();
2729 atomic_read_relaxed(&pmd
->change_seq
, &seq
);
2730 if (seq
!= port_seq
) {
2737 emc_cache_uninit(&pmd
->flow_cache
);
2739 if (!latch_is_set(&pmd
->exit_latch
)){
2743 dp_netdev_pmd_reload_done(pmd
);
2750 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
2751 OVS_ACQUIRES(dp
->upcall_rwlock
)
2753 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
2757 dpif_netdev_disable_upcall(struct dpif
*dpif
)
2758 OVS_NO_THREAD_SAFETY_ANALYSIS
2760 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2761 dp_netdev_disable_upcall(dp
);
2765 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
2766 OVS_RELEASES(dp
->upcall_rwlock
)
2768 fat_rwlock_unlock(&dp
->upcall_rwlock
);
2772 dpif_netdev_enable_upcall(struct dpif
*dpif
)
2773 OVS_NO_THREAD_SAFETY_ANALYSIS
2775 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2776 dp_netdev_enable_upcall(dp
);
2780 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread
*pmd
)
2782 ovs_mutex_lock(&pmd
->cond_mutex
);
2783 xpthread_cond_signal(&pmd
->cond
);
2784 ovs_mutex_unlock(&pmd
->cond_mutex
);
2787 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
2788 * the pointer if succeeds, otherwise, NULL.
2790 * Caller must unrefs the returned reference. */
2791 static struct dp_netdev_pmd_thread
*
2792 dp_netdev_get_pmd(struct dp_netdev
*dp
, unsigned core_id
)
2794 struct dp_netdev_pmd_thread
*pmd
;
2795 const struct cmap_node
*pnode
;
2797 pnode
= cmap_find(&dp
->poll_threads
, hash_int(core_id
, 0));
2801 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
2803 return dp_netdev_pmd_try_ref(pmd
) ? pmd
: NULL
;
2806 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
2808 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
2810 struct dp_netdev_pmd_thread
*non_pmd
;
2812 non_pmd
= xzalloc(sizeof *non_pmd
);
2813 dp_netdev_configure_pmd(non_pmd
, dp
, NON_PMD_CORE_ID
, OVS_NUMA_UNSPEC
);
2816 /* Caller must have valid pointer to 'pmd'. */
2818 dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread
*pmd
)
2820 return ovs_refcount_try_ref_rcu(&pmd
->ref_cnt
);
2824 dp_netdev_pmd_unref(struct dp_netdev_pmd_thread
*pmd
)
2826 if (pmd
&& ovs_refcount_unref(&pmd
->ref_cnt
) == 1) {
2827 ovsrcu_postpone(dp_netdev_destroy_pmd
, pmd
);
2831 /* Given cmap position 'pos', tries to ref the next node. If try_ref()
2832 * fails, keeps checking for next node until reaching the end of cmap.
2834 * Caller must unrefs the returned reference. */
2835 static struct dp_netdev_pmd_thread
*
2836 dp_netdev_pmd_get_next(struct dp_netdev
*dp
, struct cmap_position
*pos
)
2838 struct dp_netdev_pmd_thread
*next
;
2841 struct cmap_node
*node
;
2843 node
= cmap_next_position(&dp
->poll_threads
, pos
);
2844 next
= node
? CONTAINER_OF(node
, struct dp_netdev_pmd_thread
, node
)
2846 } while (next
&& !dp_netdev_pmd_try_ref(next
));
2851 /* Configures the 'pmd' based on the input argument. */
2853 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
2854 unsigned core_id
, int numa_id
)
2857 pmd
->core_id
= core_id
;
2858 pmd
->numa_id
= numa_id
;
2861 atomic_init(&pmd
->tx_qid
,
2862 (core_id
== NON_PMD_CORE_ID
)
2863 ? ovs_numa_get_n_cores()
2864 : get_n_pmd_threads(dp
));
2866 ovs_refcount_init(&pmd
->ref_cnt
);
2867 latch_init(&pmd
->exit_latch
);
2868 atomic_init(&pmd
->change_seq
, PMD_INITIAL_SEQ
);
2869 xpthread_cond_init(&pmd
->cond
, NULL
);
2870 ovs_mutex_init(&pmd
->cond_mutex
);
2871 ovs_mutex_init(&pmd
->flow_mutex
);
2872 ovs_mutex_init(&pmd
->poll_mutex
);
2873 dpcls_init(&pmd
->cls
);
2874 cmap_init(&pmd
->flow_table
);
2875 ovs_list_init(&pmd
->poll_list
);
2876 /* init the 'flow_cache' since there is no
2877 * actual thread created for NON_PMD_CORE_ID. */
2878 if (core_id
== NON_PMD_CORE_ID
) {
2879 emc_cache_init(&pmd
->flow_cache
);
2881 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
2882 hash_int(core_id
, 0));
2886 dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread
*pmd
)
2888 dp_netdev_pmd_flow_flush(pmd
);
2889 dpcls_destroy(&pmd
->cls
);
2890 cmap_destroy(&pmd
->flow_table
);
2891 ovs_mutex_destroy(&pmd
->flow_mutex
);
2892 latch_destroy(&pmd
->exit_latch
);
2893 xpthread_cond_destroy(&pmd
->cond
);
2894 ovs_mutex_destroy(&pmd
->cond_mutex
);
2895 ovs_mutex_destroy(&pmd
->poll_mutex
);
2899 /* Stops the pmd thread, removes it from the 'dp->poll_threads',
2900 * and unrefs the struct. */
2902 dp_netdev_del_pmd(struct dp_netdev
*dp
, struct dp_netdev_pmd_thread
*pmd
)
2904 /* Uninit the 'flow_cache' since there is
2905 * no actual thread uninit it for NON_PMD_CORE_ID. */
2906 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2907 emc_cache_uninit(&pmd
->flow_cache
);
2909 latch_set(&pmd
->exit_latch
);
2910 dp_netdev_reload_pmd__(pmd
);
2911 ovs_numa_unpin_core(pmd
->core_id
);
2912 xpthread_join(pmd
->thread
, NULL
);
2915 /* Unref all ports and free poll_list. */
2916 dp_netdev_pmd_clear_poll_list(pmd
);
2918 /* Purges the 'pmd''s flows after stopping the thread, but before
2919 * destroying the flows, so that the flow stats can be collected. */
2920 if (dp
->dp_purge_cb
) {
2921 dp
->dp_purge_cb(dp
->dp_purge_aux
, pmd
->core_id
);
2923 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
2924 dp_netdev_pmd_unref(pmd
);
2927 /* Destroys all pmd threads. */
2929 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
)
2931 struct dp_netdev_pmd_thread
*pmd
;
2932 struct dp_netdev_pmd_thread
**pmd_list
;
2933 size_t k
= 0, n_pmds
;
2935 n_pmds
= cmap_count(&dp
->poll_threads
);
2936 pmd_list
= xcalloc(n_pmds
, sizeof *pmd_list
);
2938 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2939 /* We cannot call dp_netdev_del_pmd(), since it alters
2940 * 'dp->poll_threads' (while we're iterating it) and it
2942 ovs_assert(k
< n_pmds
);
2943 pmd_list
[k
++] = pmd
;
2946 for (size_t i
= 0; i
< k
; i
++) {
2947 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
2952 /* Deletes all pmd threads on numa node 'numa_id' and
2953 * fixes tx_qids of other threads to keep them sequential. */
2955 dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2957 struct dp_netdev_pmd_thread
*pmd
;
2958 int n_pmds_on_numa
, n_pmds
;
2959 int *free_idx
, k
= 0;
2960 struct dp_netdev_pmd_thread
**pmd_list
;
2962 n_pmds_on_numa
= get_n_pmd_threads_on_numa(dp
, numa_id
);
2963 free_idx
= xcalloc(n_pmds_on_numa
, sizeof *free_idx
);
2964 pmd_list
= xcalloc(n_pmds_on_numa
, sizeof *pmd_list
);
2966 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2967 /* We cannot call dp_netdev_del_pmd(), since it alters
2968 * 'dp->poll_threads' (while we're iterating it) and it
2970 if (pmd
->numa_id
== numa_id
) {
2971 atomic_read_relaxed(&pmd
->tx_qid
, &free_idx
[k
]);
2973 ovs_assert(k
< n_pmds_on_numa
);
2978 for (int i
= 0; i
< k
; i
++) {
2979 dp_netdev_del_pmd(dp
, pmd_list
[i
]);
2982 n_pmds
= get_n_pmd_threads(dp
);
2983 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2986 atomic_read_relaxed(&pmd
->tx_qid
, &old_tx_qid
);
2988 if (old_tx_qid
>= n_pmds
) {
2989 int new_tx_qid
= free_idx
[--k
];
2991 atomic_store_relaxed(&pmd
->tx_qid
, new_tx_qid
);
2999 /* Deletes all rx queues from pmd->poll_list. */
3001 dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread
*pmd
)
3003 struct rxq_poll
*poll
;
3005 ovs_mutex_lock(&pmd
->poll_mutex
);
3006 LIST_FOR_EACH_POP (poll
, node
, &pmd
->poll_list
) {
3010 ovs_mutex_unlock(&pmd
->poll_mutex
);
3013 /* Deletes all rx queues of 'port' from poll_list of pmd thread. Returns true
3014 * if 'port' was found in 'pmd' (therefore a restart is required). */
3016 dp_netdev_del_port_from_pmd__(struct dp_netdev_port
*port
,
3017 struct dp_netdev_pmd_thread
*pmd
)
3019 struct rxq_poll
*poll
, *next
;
3022 ovs_mutex_lock(&pmd
->poll_mutex
);
3023 LIST_FOR_EACH_SAFE (poll
, next
, node
, &pmd
->poll_list
) {
3024 if (poll
->port
== port
) {
3026 ovs_list_remove(&poll
->node
);
3031 ovs_mutex_unlock(&pmd
->poll_mutex
);
3036 /* Deletes all rx queues of 'port' from all pmd threads. The pmd threads that
3037 * need to be restarted are inserted in 'to_reload'. */
3039 dp_netdev_del_port_from_all_pmds__(struct dp_netdev
*dp
,
3040 struct dp_netdev_port
*port
,
3041 struct hmapx
*to_reload
)
3043 int numa_id
= netdev_get_numa_id(port
->netdev
);
3044 struct dp_netdev_pmd_thread
*pmd
;
3046 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3047 if (pmd
->numa_id
== numa_id
) {
3050 found
= dp_netdev_del_port_from_pmd__(port
, pmd
);
3053 hmapx_add(to_reload
, pmd
);
3059 /* Deletes all rx queues of 'port' from all pmd threads of dp and
3060 * reloads them if needed. */
3062 dp_netdev_del_port_from_all_pmds(struct dp_netdev
*dp
,
3063 struct dp_netdev_port
*port
)
3065 struct dp_netdev_pmd_thread
*pmd
;
3066 struct hmapx to_reload
= HMAPX_INITIALIZER(&to_reload
);
3067 struct hmapx_node
*node
;
3069 dp_netdev_del_port_from_all_pmds__(dp
, port
, &to_reload
);
3071 HMAPX_FOR_EACH (node
, &to_reload
) {
3072 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3073 dp_netdev_reload_pmd__(pmd
);
3076 hmapx_destroy(&to_reload
);
3080 /* Returns PMD thread from this numa node with fewer rx queues to poll.
3081 * Returns NULL if there is no PMD threads on this numa node.
3082 * Can be called safely only by main thread. */
3083 static struct dp_netdev_pmd_thread
*
3084 dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev
*dp
, int numa_id
)
3087 struct dp_netdev_pmd_thread
*pmd
, *res
= NULL
;
3089 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
3090 if (pmd
->numa_id
== numa_id
3091 && (min_cnt
> pmd
->poll_cnt
|| res
== NULL
)) {
3092 min_cnt
= pmd
->poll_cnt
;
3100 /* Adds rx queue to poll_list of PMD thread. */
3102 dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread
*pmd
,
3103 struct dp_netdev_port
*port
, struct netdev_rxq
*rx
)
3104 OVS_REQUIRES(pmd
->poll_mutex
)
3106 struct rxq_poll
*poll
= xmalloc(sizeof *poll
);
3111 ovs_list_push_back(&pmd
->poll_list
, &poll
->node
);
3115 /* Distributes all rx queues of 'port' between all PMD threads in 'dp'. The
3116 * pmd threads that need to be restarted are inserted in 'to_reload'. */
3118 dp_netdev_add_port_to_pmds__(struct dp_netdev
*dp
, struct dp_netdev_port
*port
,
3119 struct hmapx
*to_reload
)
3121 int numa_id
= netdev_get_numa_id(port
->netdev
);
3122 struct dp_netdev_pmd_thread
*pmd
;
3125 /* Cannot create pmd threads for invalid numa node. */
3126 ovs_assert(ovs_numa_numa_id_is_valid(numa_id
));
3128 for (i
= 0; i
< port
->n_rxq
; i
++) {
3129 pmd
= dp_netdev_less_loaded_pmd_on_numa(dp
, numa_id
);
3131 /* There is no pmd threads on this numa node. */
3132 dp_netdev_set_pmds_on_numa(dp
, numa_id
);
3133 /* Assigning of rx queues done. */
3137 ovs_mutex_lock(&pmd
->poll_mutex
);
3138 dp_netdev_add_rxq_to_pmd(pmd
, port
, port
->rxq
[i
]);
3139 ovs_mutex_unlock(&pmd
->poll_mutex
);
3141 hmapx_add(to_reload
, pmd
);
3145 /* Distributes all rx queues of 'port' between all PMD threads in 'dp' and
3146 * reloads them, if needed. */
3148 dp_netdev_add_port_to_pmds(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
3150 struct dp_netdev_pmd_thread
*pmd
;
3151 struct hmapx to_reload
= HMAPX_INITIALIZER(&to_reload
);
3152 struct hmapx_node
*node
;
3154 dp_netdev_add_port_to_pmds__(dp
, port
, &to_reload
);
3156 HMAPX_FOR_EACH (node
, &to_reload
) {
3157 pmd
= (struct dp_netdev_pmd_thread
*) node
->data
;
3158 dp_netdev_reload_pmd__(pmd
);
3161 hmapx_destroy(&to_reload
);
3164 /* Checks the numa node id of 'netdev' and starts pmd threads for
3167 dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
3171 if (!ovs_numa_numa_id_is_valid(numa_id
)) {
3172 VLOG_ERR("Cannot create pmd threads due to numa id (%d)"
3173 "invalid", numa_id
);
3177 n_pmds
= get_n_pmd_threads_on_numa(dp
, numa_id
);
3179 /* If there are already pmd threads created for the numa node
3180 * in which 'netdev' is on, do nothing. Else, creates the
3181 * pmd threads for the numa node. */
3183 int can_have
, n_unpinned
, i
, index
= 0;
3184 struct dp_netdev_pmd_thread
**pmds
;
3185 struct dp_netdev_port
*port
;
3187 n_unpinned
= ovs_numa_get_n_unpinned_cores_on_numa(numa_id
);
3189 VLOG_ERR("Cannot create pmd threads due to out of unpinned "
3190 "cores on numa node %d", numa_id
);
3194 /* If cpu mask is specified, uses all unpinned cores, otherwise
3195 * tries creating NR_PMD_THREADS pmd threads. */
3196 can_have
= dp
->pmd_cmask
? n_unpinned
: MIN(n_unpinned
, NR_PMD_THREADS
);
3197 pmds
= xzalloc(can_have
* sizeof *pmds
);
3198 for (i
= 0; i
< can_have
; i
++) {
3199 unsigned core_id
= ovs_numa_get_unpinned_core_on_numa(numa_id
);
3200 pmds
[i
] = xzalloc(sizeof **pmds
);
3201 dp_netdev_configure_pmd(pmds
[i
], dp
, core_id
, numa_id
);
3204 /* Distributes rx queues of this numa node between new pmd threads. */
3205 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3206 if (netdev_is_pmd(port
->netdev
)
3207 && netdev_get_numa_id(port
->netdev
) == numa_id
) {
3208 for (i
= 0; i
< port
->n_rxq
; i
++) {
3209 /* Make thread-safety analyser happy. */
3210 ovs_mutex_lock(&pmds
[index
]->poll_mutex
);
3211 dp_netdev_add_rxq_to_pmd(pmds
[index
], port
, port
->rxq
[i
]);
3212 ovs_mutex_unlock(&pmds
[index
]->poll_mutex
);
3213 index
= (index
+ 1) % can_have
;
3218 /* Actual start of pmd threads. */
3219 for (i
= 0; i
< can_have
; i
++) {
3220 pmds
[i
]->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmds
[i
]);
3223 VLOG_INFO("Created %d pmd threads on numa node %d", can_have
, numa_id
);
3228 /* Called after pmd threads config change. Restarts pmd threads with
3229 * new configuration. */
3231 dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
3233 struct dp_netdev_port
*port
;
3235 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
3236 if (netdev_is_pmd(port
->netdev
)) {
3237 int numa_id
= netdev_get_numa_id(port
->netdev
);
3239 dp_netdev_set_pmds_on_numa(dp
, numa_id
);
3245 dpif_netdev_get_datapath_version(void)
3247 return xstrdup("<built-in>");
3251 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
, int cnt
, int size
,
3252 uint16_t tcp_flags
, long long now
)
3256 atomic_store_relaxed(&netdev_flow
->stats
.used
, now
);
3257 non_atomic_ullong_add(&netdev_flow
->stats
.packet_count
, cnt
);
3258 non_atomic_ullong_add(&netdev_flow
->stats
.byte_count
, size
);
3259 atomic_read_relaxed(&netdev_flow
->stats
.tcp_flags
, &flags
);
3261 atomic_store_relaxed(&netdev_flow
->stats
.tcp_flags
, flags
);
3265 dp_netdev_count_packet(struct dp_netdev_pmd_thread
*pmd
,
3266 enum dp_stat_type type
, int cnt
)
3268 non_atomic_ullong_add(&pmd
->stats
.n
[type
], cnt
);
3272 dp_netdev_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet_
,
3273 struct flow
*flow
, struct flow_wildcards
*wc
, ovs_u128
*ufid
,
3274 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
3275 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
3277 struct dp_netdev
*dp
= pmd
->dp
;
3278 struct flow_tnl orig_tunnel
;
3281 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
3285 /* Upcall processing expects the Geneve options to be in the translated
3286 * format but we need to retain the raw format for datapath use. */
3287 orig_tunnel
.flags
= flow
->tunnel
.flags
;
3288 if (flow
->tunnel
.flags
& FLOW_TNL_F_UDPIF
) {
3289 orig_tunnel
.metadata
.present
.len
= flow
->tunnel
.metadata
.present
.len
;
3290 memcpy(orig_tunnel
.metadata
.opts
.gnv
, flow
->tunnel
.metadata
.opts
.gnv
,
3291 flow
->tunnel
.metadata
.present
.len
);
3292 err
= tun_metadata_from_geneve_udpif(&orig_tunnel
, &orig_tunnel
,
3299 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
3300 struct ds ds
= DS_EMPTY_INITIALIZER
;
3303 struct odp_flow_key_parms odp_parms
= {
3306 .odp_in_port
= flow
->in_port
.odp_port
,
3307 .support
= dp_netdev_support
,
3310 ofpbuf_init(&key
, 0);
3311 odp_flow_key_from_flow(&odp_parms
, &key
);
3312 packet_str
= ofp_packet_to_string(dp_packet_data(packet_
),
3313 dp_packet_size(packet_
));
3315 odp_flow_key_format(key
.data
, key
.size
, &ds
);
3317 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
3318 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
3320 ofpbuf_uninit(&key
);
3326 err
= dp
->upcall_cb(packet_
, flow
, ufid
, pmd
->core_id
, type
, userdata
,
3327 actions
, wc
, put_actions
, dp
->upcall_aux
);
3328 if (err
&& err
!= ENOSPC
) {
3332 /* Translate tunnel metadata masks to datapath format. */
3334 if (wc
->masks
.tunnel
.metadata
.present
.map
) {
3335 struct geneve_opt opts
[TLV_TOT_OPT_SIZE
/
3336 sizeof(struct geneve_opt
)];
3338 if (orig_tunnel
.flags
& FLOW_TNL_F_UDPIF
) {
3339 tun_metadata_to_geneve_udpif_mask(&flow
->tunnel
,
3341 orig_tunnel
.metadata
.opts
.gnv
,
3342 orig_tunnel
.metadata
.present
.len
,
3345 orig_tunnel
.metadata
.present
.len
= 0;
3348 memset(&wc
->masks
.tunnel
.metadata
, 0,
3349 sizeof wc
->masks
.tunnel
.metadata
);
3350 memcpy(&wc
->masks
.tunnel
.metadata
.opts
.gnv
, opts
,
3351 orig_tunnel
.metadata
.present
.len
);
3353 wc
->masks
.tunnel
.metadata
.present
.len
= 0xff;
3356 /* Restore tunnel metadata. We need to use the saved options to ensure
3357 * that any unknown options are not lost. The generated mask will have
3358 * the same structure, matching on types and lengths but wildcarding
3359 * option data we don't care about. */
3360 if (orig_tunnel
.flags
& FLOW_TNL_F_UDPIF
) {
3361 memcpy(&flow
->tunnel
.metadata
.opts
.gnv
, orig_tunnel
.metadata
.opts
.gnv
,
3362 orig_tunnel
.metadata
.present
.len
);
3363 flow
->tunnel
.metadata
.present
.len
= orig_tunnel
.metadata
.present
.len
;
3364 flow
->tunnel
.flags
|= FLOW_TNL_F_UDPIF
;
3370 static inline uint32_t
3371 dpif_netdev_packet_get_rss_hash(struct dp_packet
*packet
,
3372 const struct miniflow
*mf
)
3374 uint32_t hash
, recirc_depth
;
3376 if (OVS_LIKELY(dp_packet_rss_valid(packet
))) {
3377 hash
= dp_packet_get_rss_hash(packet
);
3379 hash
= miniflow_hash_5tuple(mf
, 0);
3380 dp_packet_set_rss_hash(packet
, hash
);
3383 /* The RSS hash must account for the recirculation depth to avoid
3384 * collisions in the exact match cache */
3385 recirc_depth
= *recirc_depth_get_unsafe();
3386 if (OVS_UNLIKELY(recirc_depth
)) {
3387 hash
= hash_finish(hash
, recirc_depth
);
3388 dp_packet_set_rss_hash(packet
, hash
);
3393 struct packet_batch_per_flow
{
3394 unsigned int byte_count
;
3396 struct dp_netdev_flow
*flow
;
3398 struct dp_packet_batch array
;
3402 packet_batch_per_flow_update(struct packet_batch_per_flow
*batch
,
3403 struct dp_packet
*packet
,
3404 const struct miniflow
*mf
)
3406 batch
->byte_count
+= dp_packet_size(packet
);
3407 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
3408 batch
->array
.packets
[batch
->array
.count
++] = packet
;
3412 packet_batch_per_flow_init(struct packet_batch_per_flow
*batch
,
3413 struct dp_netdev_flow
*flow
)
3415 flow
->batch
= batch
;
3418 dp_packet_batch_init(&batch
->array
);
3419 batch
->byte_count
= 0;
3420 batch
->tcp_flags
= 0;
3424 packet_batch_per_flow_execute(struct packet_batch_per_flow
*batch
,
3425 struct dp_netdev_pmd_thread
*pmd
,
3428 struct dp_netdev_actions
*actions
;
3429 struct dp_netdev_flow
*flow
= batch
->flow
;
3431 dp_netdev_flow_used(flow
, batch
->array
.count
, batch
->byte_count
,
3432 batch
->tcp_flags
, now
);
3434 actions
= dp_netdev_flow_get_actions(flow
);
3436 dp_netdev_execute_actions(pmd
, &batch
->array
, true,
3437 actions
->actions
, actions
->size
);
3441 dp_netdev_queue_batches(struct dp_packet
*pkt
,
3442 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
3443 struct packet_batch_per_flow
*batches
, size_t *n_batches
)
3445 struct packet_batch_per_flow
*batch
= flow
->batch
;
3447 if (OVS_UNLIKELY(!batch
)) {
3448 batch
= &batches
[(*n_batches
)++];
3449 packet_batch_per_flow_init(batch
, flow
);
3452 packet_batch_per_flow_update(batch
, pkt
, mf
);
3455 /* Try to process all ('cnt') the 'packets' using only the exact match cache
3456 * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
3457 * miniflow is copied into 'keys' and the packet pointer is moved at the
3458 * beginning of the 'packets' array.
3460 * The function returns the number of packets that needs to be processed in the
3461 * 'packets' array (they have been moved to the beginning of the vector).
3463 * If 'md_is_valid' is false, the metadata in 'packets' is not valid and must be
3464 * initialized by this function using 'port_no'.
3466 static inline size_t
3467 emc_processing(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet_batch
*packets_
,
3468 struct netdev_flow_key
*keys
,
3469 struct packet_batch_per_flow batches
[], size_t *n_batches
,
3470 bool md_is_valid
, odp_port_t port_no
)
3472 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
3473 struct netdev_flow_key
*key
= &keys
[0];
3474 size_t i
, n_missed
= 0, n_dropped
= 0;
3475 struct dp_packet
**packets
= packets_
->packets
;
3476 int cnt
= packets_
->count
;
3478 for (i
= 0; i
< cnt
; i
++) {
3479 struct dp_netdev_flow
*flow
;
3480 struct dp_packet
*packet
= packets
[i
];
3482 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
3483 dp_packet_delete(packet
);
3489 /* Prefetch next packet data and metadata. */
3490 OVS_PREFETCH(dp_packet_data(packets
[i
+1]));
3491 pkt_metadata_prefetch_init(&packets
[i
+1]->md
);
3495 pkt_metadata_init(&packet
->md
, port_no
);
3497 miniflow_extract(packet
, &key
->mf
);
3498 key
->len
= 0; /* Not computed yet. */
3499 key
->hash
= dpif_netdev_packet_get_rss_hash(packet
, &key
->mf
);
3501 flow
= emc_lookup(flow_cache
, key
);
3502 if (OVS_LIKELY(flow
)) {
3503 dp_netdev_queue_batches(packet
, flow
, &key
->mf
, batches
,
3506 /* Exact match cache missed. Group missed packets together at
3507 * the beginning of the 'packets' array. */
3508 packets
[n_missed
] = packet
;
3509 /* 'key[n_missed]' contains the key of the current packet and it
3510 * must be returned to the caller. The next key should be extracted
3511 * to 'keys[n_missed + 1]'. */
3512 key
= &keys
[++n_missed
];
3516 dp_netdev_count_packet(pmd
, DP_STAT_EXACT_HIT
, cnt
- n_dropped
- n_missed
);
3522 handle_packet_upcall(struct dp_netdev_pmd_thread
*pmd
, struct dp_packet
*packet
,
3523 const struct netdev_flow_key
*key
,
3524 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
,
3527 struct ofpbuf
*add_actions
;
3528 struct dp_packet_batch b
;
3533 match
.tun_md
.valid
= false;
3534 miniflow_expand(&key
->mf
, &match
.flow
);
3536 ofpbuf_clear(actions
);
3537 ofpbuf_clear(put_actions
);
3539 dpif_flow_hash(pmd
->dp
->dpif
, &match
.flow
, sizeof match
.flow
, &ufid
);
3540 error
= dp_netdev_upcall(pmd
, packet
, &match
.flow
, &match
.wc
,
3541 &ufid
, DPIF_UC_MISS
, NULL
, actions
,
3543 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
3544 dp_packet_delete(packet
);
3549 /* The Netlink encoding of datapath flow keys cannot express
3550 * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
3551 * tag is interpreted as exact match on the fact that there is no
3552 * VLAN. Unless we refactor a lot of code that translates between
3553 * Netlink and struct flow representations, we have to do the same
3555 if (!match
.wc
.masks
.vlan_tci
) {
3556 match
.wc
.masks
.vlan_tci
= htons(0xffff);
3559 /* We can't allow the packet batching in the next loop to execute
3560 * the actions. Otherwise, if there are any slow path actions,
3561 * we'll send the packet up twice. */
3562 packet_batch_init_packet(&b
, packet
);
3563 dp_netdev_execute_actions(pmd
, &b
, true,
3564 actions
->data
, actions
->size
);
3566 add_actions
= put_actions
->size
? put_actions
: actions
;
3567 if (OVS_LIKELY(error
!= ENOSPC
)) {
3568 struct dp_netdev_flow
*netdev_flow
;
3570 /* XXX: There's a race window where a flow covering this packet
3571 * could have already been installed since we last did the flow
3572 * lookup before upcall. This could be solved by moving the
3573 * mutex lock outside the loop, but that's an awful long time
3574 * to be locking everyone out of making flow installs. If we
3575 * move to a per-core classifier, it would be reasonable. */
3576 ovs_mutex_lock(&pmd
->flow_mutex
);
3577 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, key
);
3578 if (OVS_LIKELY(!netdev_flow
)) {
3579 netdev_flow
= dp_netdev_flow_add(pmd
, &match
, &ufid
,
3583 ovs_mutex_unlock(&pmd
->flow_mutex
);
3585 emc_insert(&pmd
->flow_cache
, key
, netdev_flow
);
3590 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
3591 struct dp_packet_batch
*packets_
,
3592 struct netdev_flow_key
*keys
,
3593 struct packet_batch_per_flow batches
[], size_t *n_batches
)
3595 int cnt
= packets_
->count
;
3596 #if !defined(__CHECKER__) && !defined(_WIN32)
3597 const size_t PKT_ARRAY_SIZE
= cnt
;
3599 /* Sparse or MSVC doesn't like variable length array. */
3600 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
3602 struct dp_packet
**packets
= packets_
->packets
;
3603 struct dpcls_rule
*rules
[PKT_ARRAY_SIZE
];
3604 struct dp_netdev
*dp
= pmd
->dp
;
3605 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
3606 int miss_cnt
= 0, lost_cnt
= 0;
3610 for (i
= 0; i
< cnt
; i
++) {
3611 /* Key length is needed in all the cases, hash computed on demand. */
3612 keys
[i
].len
= netdev_flow_key_size(miniflow_n_values(&keys
[i
].mf
));
3614 any_miss
= !dpcls_lookup(&pmd
->cls
, keys
, rules
, cnt
);
3615 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
3616 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
3617 struct ofpbuf actions
, put_actions
;
3619 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
3620 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
3622 for (i
= 0; i
< cnt
; i
++) {
3623 struct dp_netdev_flow
*netdev_flow
;
3625 if (OVS_LIKELY(rules
[i
])) {
3629 /* It's possible that an earlier slow path execution installed
3630 * a rule covering this flow. In this case, it's a lot cheaper
3631 * to catch it here than execute a miss. */
3632 netdev_flow
= dp_netdev_pmd_lookup_flow(pmd
, &keys
[i
]);
3634 rules
[i
] = &netdev_flow
->cr
;
3639 handle_packet_upcall(pmd
, packets
[i
], &keys
[i
], &actions
, &put_actions
,
3643 ofpbuf_uninit(&actions
);
3644 ofpbuf_uninit(&put_actions
);
3645 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3646 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
3647 } else if (OVS_UNLIKELY(any_miss
)) {
3648 for (i
= 0; i
< cnt
; i
++) {
3649 if (OVS_UNLIKELY(!rules
[i
])) {
3650 dp_packet_delete(packets
[i
]);
3657 for (i
= 0; i
< cnt
; i
++) {
3658 struct dp_packet
*packet
= packets
[i
];
3659 struct dp_netdev_flow
*flow
;
3661 if (OVS_UNLIKELY(!rules
[i
])) {
3665 flow
= dp_netdev_flow_cast(rules
[i
]);
3667 emc_insert(flow_cache
, &keys
[i
], flow
);
3668 dp_netdev_queue_batches(packet
, flow
, &keys
[i
].mf
, batches
, n_batches
);
3671 dp_netdev_count_packet(pmd
, DP_STAT_MASKED_HIT
, cnt
- miss_cnt
);
3672 dp_netdev_count_packet(pmd
, DP_STAT_MISS
, miss_cnt
);
3673 dp_netdev_count_packet(pmd
, DP_STAT_LOST
, lost_cnt
);
3676 /* Packets enter the datapath from a port (or from recirculation) here.
3678 * For performance reasons a caller may choose not to initialize the metadata
3679 * in 'packets': in this case 'mdinit' is false and this function needs to
3680 * initialize it using 'port_no'. If the metadata in 'packets' is already
3681 * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */
3683 dp_netdev_input__(struct dp_netdev_pmd_thread
*pmd
,
3684 struct dp_packet_batch
*packets
,
3685 bool md_is_valid
, odp_port_t port_no
)
3687 int cnt
= packets
->count
;
3688 #if !defined(__CHECKER__) && !defined(_WIN32)
3689 const size_t PKT_ARRAY_SIZE
= cnt
;
3691 /* Sparse or MSVC doesn't like variable length array. */
3692 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
3694 struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
3695 struct packet_batch_per_flow batches
[PKT_ARRAY_SIZE
];
3696 long long now
= time_msec();
3697 size_t newcnt
, n_batches
, i
;
3700 newcnt
= emc_processing(pmd
, packets
, keys
, batches
, &n_batches
,
3701 md_is_valid
, port_no
);
3702 if (OVS_UNLIKELY(newcnt
)) {
3703 packets
->count
= newcnt
;
3704 fast_path_processing(pmd
, packets
, keys
, batches
, &n_batches
);
3707 for (i
= 0; i
< n_batches
; i
++) {
3708 batches
[i
].flow
->batch
= NULL
;
3711 for (i
= 0; i
< n_batches
; i
++) {
3712 packet_batch_per_flow_execute(&batches
[i
], pmd
, now
);
3717 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
3718 struct dp_packet_batch
*packets
,
3721 dp_netdev_input__(pmd
, packets
, false, port_no
);
3725 dp_netdev_recirculate(struct dp_netdev_pmd_thread
*pmd
,
3726 struct dp_packet_batch
*packets
)
3728 dp_netdev_input__(pmd
, packets
, true, 0);
3731 struct dp_netdev_execute_aux
{
3732 struct dp_netdev_pmd_thread
*pmd
;
3736 dpif_netdev_register_dp_purge_cb(struct dpif
*dpif
, dp_purge_callback
*cb
,
3739 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3740 dp
->dp_purge_aux
= aux
;
3741 dp
->dp_purge_cb
= cb
;
3745 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
3748 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
3749 dp
->upcall_aux
= aux
;
3754 push_tnl_action(const struct dp_netdev
*dp
,
3755 const struct nlattr
*attr
,
3756 struct dp_packet_batch
*batch
)
3758 struct dp_netdev_port
*tun_port
;
3759 const struct ovs_action_push_tnl
*data
;
3762 data
= nl_attr_get(attr
);
3764 tun_port
= dp_netdev_lookup_port(dp
, u32_to_odp(data
->tnl_port
));
3769 err
= netdev_push_header(tun_port
->netdev
, batch
, data
);
3774 dp_packet_delete_batch(batch
, true);
3779 dp_execute_userspace_action(struct dp_netdev_pmd_thread
*pmd
,
3780 struct dp_packet
*packet
, bool may_steal
,
3781 struct flow
*flow
, ovs_u128
*ufid
,
3782 struct ofpbuf
*actions
,
3783 const struct nlattr
*userdata
)
3785 struct dp_packet_batch b
;
3788 ofpbuf_clear(actions
);
3790 error
= dp_netdev_upcall(pmd
, packet
, flow
, NULL
, ufid
,
3791 DPIF_UC_ACTION
, userdata
, actions
,
3793 if (!error
|| error
== ENOSPC
) {
3794 packet_batch_init_packet(&b
, packet
);
3795 dp_netdev_execute_actions(pmd
, &b
, may_steal
,
3796 actions
->data
, actions
->size
);
3797 } else if (may_steal
) {
3798 dp_packet_delete(packet
);
3803 dp_execute_cb(void *aux_
, struct dp_packet_batch
*packets_
,
3804 const struct nlattr
*a
, bool may_steal
)
3805 OVS_NO_THREAD_SAFETY_ANALYSIS
3807 struct dp_netdev_execute_aux
*aux
= aux_
;
3808 uint32_t *depth
= recirc_depth_get();
3809 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
3810 struct dp_netdev
*dp
= pmd
->dp
;
3811 int type
= nl_attr_type(a
);
3812 struct dp_netdev_port
*p
;
3814 switch ((enum ovs_action_attr
)type
) {
3815 case OVS_ACTION_ATTR_OUTPUT
:
3816 p
= dp_netdev_lookup_port(dp
, u32_to_odp(nl_attr_get_u32(a
)));
3817 if (OVS_LIKELY(p
)) {
3820 atomic_read_relaxed(&pmd
->tx_qid
, &tx_qid
);
3822 netdev_send(p
->netdev
, tx_qid
, packets_
, may_steal
);
3827 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
3828 if (*depth
< MAX_RECIRC_DEPTH
) {
3829 struct dp_packet_batch tnl_pkt
;
3833 dp_packet_batch_clone(&tnl_pkt
, packets_
);
3834 packets_
= &tnl_pkt
;
3837 err
= push_tnl_action(dp
, a
, packets_
);
3840 dp_netdev_recirculate(pmd
, packets_
);
3847 case OVS_ACTION_ATTR_TUNNEL_POP
:
3848 if (*depth
< MAX_RECIRC_DEPTH
) {
3849 odp_port_t portno
= u32_to_odp(nl_attr_get_u32(a
));
3851 p
= dp_netdev_lookup_port(dp
, portno
);
3853 struct dp_packet_batch tnl_pkt
;
3857 dp_packet_batch_clone(&tnl_pkt
, packets_
);
3858 packets_
= &tnl_pkt
;
3861 netdev_pop_header(p
->netdev
, packets_
);
3862 if (!packets_
->count
) {
3866 for (i
= 0; i
< packets_
->count
; i
++) {
3867 packets_
->packets
[i
]->md
.in_port
.odp_port
= portno
;
3871 dp_netdev_recirculate(pmd
, packets_
);
3878 case OVS_ACTION_ATTR_USERSPACE
:
3879 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
3880 struct dp_packet
**packets
= packets_
->packets
;
3881 const struct nlattr
*userdata
;
3882 struct ofpbuf actions
;
3887 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
3888 ofpbuf_init(&actions
, 0);
3890 for (i
= 0; i
< packets_
->count
; i
++) {
3891 flow_extract(packets
[i
], &flow
);
3892 dpif_flow_hash(dp
->dpif
, &flow
, sizeof flow
, &ufid
);
3893 dp_execute_userspace_action(pmd
, packets
[i
], may_steal
, &flow
,
3894 &ufid
, &actions
, userdata
);
3896 ofpbuf_uninit(&actions
);
3897 fat_rwlock_unlock(&dp
->upcall_rwlock
);
3903 case OVS_ACTION_ATTR_RECIRC
:
3904 if (*depth
< MAX_RECIRC_DEPTH
) {
3905 struct dp_packet_batch recirc_pkts
;
3909 dp_packet_batch_clone(&recirc_pkts
, packets_
);
3910 packets_
= &recirc_pkts
;
3913 for (i
= 0; i
< packets_
->count
; i
++) {
3914 packets_
->packets
[i
]->md
.recirc_id
= nl_attr_get_u32(a
);
3918 dp_netdev_recirculate(pmd
, packets_
);
3924 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
3927 case OVS_ACTION_ATTR_CT
:
3928 /* If a flow with this action is slow-pathed, datapath assistance is
3929 * required to implement it. However, we don't support this action
3930 * in the userspace datapath. */
3931 VLOG_WARN("Cannot execute conntrack action in userspace.");
3934 case OVS_ACTION_ATTR_PUSH_VLAN
:
3935 case OVS_ACTION_ATTR_POP_VLAN
:
3936 case OVS_ACTION_ATTR_PUSH_MPLS
:
3937 case OVS_ACTION_ATTR_POP_MPLS
:
3938 case OVS_ACTION_ATTR_SET
:
3939 case OVS_ACTION_ATTR_SET_MASKED
:
3940 case OVS_ACTION_ATTR_SAMPLE
:
3941 case OVS_ACTION_ATTR_HASH
:
3942 case OVS_ACTION_ATTR_UNSPEC
:
3943 case __OVS_ACTION_ATTR_MAX
:
3947 dp_packet_delete_batch(packets_
, may_steal
);
3951 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
3952 struct dp_packet_batch
*packets
,
3954 const struct nlattr
*actions
, size_t actions_len
)
3956 struct dp_netdev_execute_aux aux
= { pmd
};
3958 odp_execute_actions(&aux
, packets
, may_steal
, actions
,
3959 actions_len
, dp_execute_cb
);
3962 const struct dpif_class dpif_netdev_class
= {
3965 dpif_netdev_enumerate
,
3966 dpif_netdev_port_open_type
,
3969 dpif_netdev_destroy
,
3972 dpif_netdev_get_stats
,
3973 dpif_netdev_port_add
,
3974 dpif_netdev_port_del
,
3975 dpif_netdev_port_query_by_number
,
3976 dpif_netdev_port_query_by_name
,
3977 NULL
, /* port_get_pid */
3978 dpif_netdev_port_dump_start
,
3979 dpif_netdev_port_dump_next
,
3980 dpif_netdev_port_dump_done
,
3981 dpif_netdev_port_poll
,
3982 dpif_netdev_port_poll_wait
,
3983 dpif_netdev_flow_flush
,
3984 dpif_netdev_flow_dump_create
,
3985 dpif_netdev_flow_dump_destroy
,
3986 dpif_netdev_flow_dump_thread_create
,
3987 dpif_netdev_flow_dump_thread_destroy
,
3988 dpif_netdev_flow_dump_next
,
3989 dpif_netdev_operate
,
3990 NULL
, /* recv_set */
3991 NULL
, /* handlers_set */
3992 dpif_netdev_pmd_set
,
3993 dpif_netdev_queue_to_priority
,
3995 NULL
, /* recv_wait */
3996 NULL
, /* recv_purge */
3997 dpif_netdev_register_dp_purge_cb
,
3998 dpif_netdev_register_upcall_cb
,
3999 dpif_netdev_enable_upcall
,
4000 dpif_netdev_disable_upcall
,
4001 dpif_netdev_get_datapath_version
,
4002 NULL
, /* ct_dump_start */
4003 NULL
, /* ct_dump_next */
4004 NULL
, /* ct_dump_done */
4005 NULL
, /* ct_flush */
4009 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
4010 const char *argv
[], void *aux OVS_UNUSED
)
4012 struct dp_netdev_port
*old_port
;
4013 struct dp_netdev_port
*new_port
;
4014 struct dp_netdev
*dp
;
4017 ovs_mutex_lock(&dp_netdev_mutex
);
4018 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
4019 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
4020 ovs_mutex_unlock(&dp_netdev_mutex
);
4021 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
4024 ovs_refcount_ref(&dp
->ref_cnt
);
4025 ovs_mutex_unlock(&dp_netdev_mutex
);
4027 ovs_mutex_lock(&dp
->port_mutex
);
4028 if (get_port_by_name(dp
, argv
[2], &old_port
)) {
4029 unixctl_command_reply_error(conn
, "unknown port");
4033 port_no
= u32_to_odp(atoi(argv
[3]));
4034 if (!port_no
|| port_no
== ODPP_NONE
) {
4035 unixctl_command_reply_error(conn
, "bad port number");
4038 if (dp_netdev_lookup_port(dp
, port_no
)) {
4039 unixctl_command_reply_error(conn
, "port number already in use");
4043 /* Remove old port. */
4044 cmap_remove(&dp
->ports
, &old_port
->node
, hash_port_no(old_port
->port_no
));
4045 ovsrcu_postpone(free
, old_port
);
4047 /* Insert new port (cmap semantics mean we cannot re-insert 'old_port'). */
4048 new_port
= xmemdup(old_port
, sizeof *old_port
);
4049 new_port
->port_no
= port_no
;
4050 cmap_insert(&dp
->ports
, &new_port
->node
, hash_port_no(port_no
));
4052 seq_change(dp
->port_seq
);
4053 unixctl_command_reply(conn
, NULL
);
4056 ovs_mutex_unlock(&dp
->port_mutex
);
4057 dp_netdev_unref(dp
);
4061 dpif_dummy_register__(const char *type
)
4063 struct dpif_class
*class;
4065 class = xmalloc(sizeof *class);
4066 *class = dpif_netdev_class
;
4067 class->type
= xstrdup(type
);
4068 dp_register_provider(class);
4072 dpif_dummy_override(const char *type
)
4077 * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
4078 * a userland-only build. It's useful for testsuite.
4080 error
= dp_unregister_provider(type
);
4081 if (error
== 0 || error
== EAFNOSUPPORT
) {
4082 dpif_dummy_register__(type
);
4087 dpif_dummy_register(enum dummy_level level
)
4089 if (level
== DUMMY_OVERRIDE_ALL
) {
4094 dp_enumerate_types(&types
);
4095 SSET_FOR_EACH (type
, &types
) {
4096 dpif_dummy_override(type
);
4098 sset_destroy(&types
);
4099 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
4100 dpif_dummy_override("system");
4103 dpif_dummy_register__("dummy");
4105 unixctl_command_register("dpif-dummy/change-port-number",
4106 "dp port new-number",
4107 3, 3, dpif_dummy_change_port_number
, NULL
);
4110 /* Datapath Classifier. */
4112 /* A set of rules that all have the same fields wildcarded. */
4113 struct dpcls_subtable
{
4114 /* The fields are only used by writers. */
4115 struct cmap_node cmap_node OVS_GUARDED
; /* Within dpcls 'subtables_map'. */
4117 /* These fields are accessed by readers. */
4118 struct cmap rules
; /* Contains "struct dpcls_rule"s. */
4119 struct netdev_flow_key mask
; /* Wildcards for fields (const). */
4120 /* 'mask' must be the last field, additional space is allocated here. */
4123 /* Initializes 'cls' as a classifier that initially contains no classification
4126 dpcls_init(struct dpcls
*cls
)
4128 cmap_init(&cls
->subtables_map
);
4129 pvector_init(&cls
->subtables
);
4133 dpcls_destroy_subtable(struct dpcls
*cls
, struct dpcls_subtable
*subtable
)
4135 pvector_remove(&cls
->subtables
, subtable
);
4136 cmap_remove(&cls
->subtables_map
, &subtable
->cmap_node
,
4137 subtable
->mask
.hash
);
4138 cmap_destroy(&subtable
->rules
);
4139 ovsrcu_postpone(free
, subtable
);
4142 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
4143 * caller's responsibility.
4144 * May only be called after all the readers have been terminated. */
4146 dpcls_destroy(struct dpcls
*cls
)
4149 struct dpcls_subtable
*subtable
;
4151 CMAP_FOR_EACH (subtable
, cmap_node
, &cls
->subtables_map
) {
4152 ovs_assert(cmap_count(&subtable
->rules
) == 0);
4153 dpcls_destroy_subtable(cls
, subtable
);
4155 cmap_destroy(&cls
->subtables_map
);
4156 pvector_destroy(&cls
->subtables
);
4160 static struct dpcls_subtable
*
4161 dpcls_create_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
4163 struct dpcls_subtable
*subtable
;
4165 /* Need to add one. */
4166 subtable
= xmalloc(sizeof *subtable
4167 - sizeof subtable
->mask
.mf
+ mask
->len
);
4168 cmap_init(&subtable
->rules
);
4169 netdev_flow_key_clone(&subtable
->mask
, mask
);
4170 cmap_insert(&cls
->subtables_map
, &subtable
->cmap_node
, mask
->hash
);
4171 pvector_insert(&cls
->subtables
, subtable
, 0);
4172 pvector_publish(&cls
->subtables
);
4177 static inline struct dpcls_subtable
*
4178 dpcls_find_subtable(struct dpcls
*cls
, const struct netdev_flow_key
*mask
)
4180 struct dpcls_subtable
*subtable
;
4182 CMAP_FOR_EACH_WITH_HASH (subtable
, cmap_node
, mask
->hash
,
4183 &cls
->subtables_map
) {
4184 if (netdev_flow_key_equal(&subtable
->mask
, mask
)) {
4188 return dpcls_create_subtable(cls
, mask
);
4191 /* Insert 'rule' into 'cls'. */
4193 dpcls_insert(struct dpcls
*cls
, struct dpcls_rule
*rule
,
4194 const struct netdev_flow_key
*mask
)
4196 struct dpcls_subtable
*subtable
= dpcls_find_subtable(cls
, mask
);
4198 rule
->mask
= &subtable
->mask
;
4199 cmap_insert(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
);
4202 /* Removes 'rule' from 'cls', also destructing the 'rule'. */
4204 dpcls_remove(struct dpcls
*cls
, struct dpcls_rule
*rule
)
4206 struct dpcls_subtable
*subtable
;
4208 ovs_assert(rule
->mask
);
4210 INIT_CONTAINER(subtable
, rule
->mask
, mask
);
4212 if (cmap_remove(&subtable
->rules
, &rule
->cmap_node
, rule
->flow
.hash
)
4214 dpcls_destroy_subtable(cls
, subtable
);
4215 pvector_publish(&cls
->subtables
);
4219 /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
4220 * in 'mask' the values in 'key' and 'target' are the same. */
4222 dpcls_rule_matches_key(const struct dpcls_rule
*rule
,
4223 const struct netdev_flow_key
*target
)
4225 const uint64_t *keyp
= miniflow_get_values(&rule
->flow
.mf
);
4226 const uint64_t *maskp
= miniflow_get_values(&rule
->mask
->mf
);
4229 NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value
, target
, rule
->flow
.mf
.map
) {
4230 if (OVS_UNLIKELY((value
& *maskp
++) != *keyp
++)) {
4237 /* For each miniflow in 'flows' performs a classifier lookup writing the result
4238 * into the corresponding slot in 'rules'. If a particular entry in 'flows' is
4239 * NULL it is skipped.
4241 * This function is optimized for use in the userspace datapath and therefore
4242 * does not implement a lot of features available in the standard
4243 * classifier_lookup() function. Specifically, it does not implement
4244 * priorities, instead returning any rule which matches the flow.
4246 * Returns true if all flows found a corresponding rule. */
4248 dpcls_lookup(const struct dpcls
*cls
, const struct netdev_flow_key keys
[],
4249 struct dpcls_rule
**rules
, const size_t cnt
)
4251 /* The batch size 16 was experimentally found faster than 8 or 32. */
4252 typedef uint16_t map_type
;
4253 #define MAP_BITS (sizeof(map_type) * CHAR_BIT)
4255 #if !defined(__CHECKER__) && !defined(_WIN32)
4256 const int N_MAPS
= DIV_ROUND_UP(cnt
, MAP_BITS
);
4258 enum { N_MAPS
= DIV_ROUND_UP(NETDEV_MAX_BURST
, MAP_BITS
) };
4260 map_type maps
[N_MAPS
];
4261 struct dpcls_subtable
*subtable
;
4263 memset(maps
, 0xff, sizeof maps
);
4264 if (cnt
% MAP_BITS
) {
4265 maps
[N_MAPS
- 1] >>= MAP_BITS
- cnt
% MAP_BITS
; /* Clear extra bits. */
4267 memset(rules
, 0, cnt
* sizeof *rules
);
4269 PVECTOR_FOR_EACH (subtable
, &cls
->subtables
) {
4270 const struct netdev_flow_key
*mkeys
= keys
;
4271 struct dpcls_rule
**mrules
= rules
;
4272 map_type remains
= 0;
4275 BUILD_ASSERT_DECL(sizeof remains
== sizeof *maps
);
4277 for (m
= 0; m
< N_MAPS
; m
++, mkeys
+= MAP_BITS
, mrules
+= MAP_BITS
) {
4278 uint32_t hashes
[MAP_BITS
];
4279 const struct cmap_node
*nodes
[MAP_BITS
];
4280 unsigned long map
= maps
[m
];
4284 continue; /* Skip empty maps. */
4287 /* Compute hashes for the remaining keys. */
4288 ULLONG_FOR_EACH_1(i
, map
) {
4289 hashes
[i
] = netdev_flow_key_hash_in_mask(&mkeys
[i
],
4293 map
= cmap_find_batch(&subtable
->rules
, map
, hashes
, nodes
);
4294 /* Check results. */
4295 ULLONG_FOR_EACH_1(i
, map
) {
4296 struct dpcls_rule
*rule
;
4298 CMAP_NODE_FOR_EACH (rule
, cmap_node
, nodes
[i
]) {
4299 if (OVS_LIKELY(dpcls_rule_matches_key(rule
, &mkeys
[i
]))) {
4304 ULLONG_SET0(map
, i
); /* Did not match. */
4306 ; /* Keep Sparse happy. */
4308 maps
[m
] &= ~map
; /* Clear the found rules. */
4312 return true; /* All found. */
4315 return false; /* Some misses. */