2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-netdev.h"
24 #include <netinet/in.h>
25 #include <sys/socket.h>
30 #include <sys/ioctl.h>
34 #include "classifier.h"
38 #include "dpif-provider.h"
40 #include "dynamic-string.h"
41 #include "fat-rwlock.h"
46 #include "meta-flow.h"
48 #include "netdev-dpdk.h"
49 #include "netdev-vport.h"
51 #include "odp-execute.h"
53 #include "ofp-print.h"
57 #include "packet-dpif.h"
59 #include "poll-loop.h"
69 VLOG_DEFINE_THIS_MODULE(dpif_netdev
);
71 /* By default, choose a priority in the middle. */
72 #define NETDEV_RULE_PRIORITY 0x8000
74 #define FLOW_DUMP_MAX_BATCH 50
75 /* Use per thread recirc_depth to prevent recirculation loop. */
76 #define MAX_RECIRC_DEPTH 5
77 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth
, 0)
79 /* Configuration parameters. */
80 enum { MAX_FLOWS
= 65536 }; /* Maximum number of flows in flow table. */
82 /* Protects against changes to 'dp_netdevs'. */
83 static struct ovs_mutex dp_netdev_mutex
= OVS_MUTEX_INITIALIZER
;
85 /* Contains all 'struct dp_netdev's. */
86 static struct shash dp_netdevs
OVS_GUARDED_BY(dp_netdev_mutex
)
87 = SHASH_INITIALIZER(&dp_netdevs
);
89 static struct vlog_rate_limit upcall_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
91 /* Stores a miniflow with inline values */
93 /* There are fields in the flow structure that we never use. Therefore we can
94 * save a few words of memory */
95 #define NETDEV_KEY_BUF_SIZE_U32 (FLOW_U32S \
97 - FLOW_U32_SIZE(regs) \
98 - FLOW_U32_SIZE(metadata) \
100 struct netdev_flow_key
{
101 struct miniflow flow
;
102 uint32_t buf
[NETDEV_KEY_BUF_SIZE_U32
];
105 /* Exact match cache for frequently used flows
107 * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to
108 * search its entries for a miniflow that matches exactly the miniflow of the
109 * packet. It stores the 'cls_rule'(rule) that matches the miniflow.
111 * A cache entry holds a reference to its 'dp_netdev_flow'.
113 * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different
114 * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of
115 * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each
116 * value is the index of a cache entry where the miniflow could be.
122 * Each pmd_thread has its own private exact match cache.
123 * If dp_netdev_input is not called from a pmd thread, a mutex is used.
126 #define EM_FLOW_HASH_SHIFT 10
127 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
128 #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
129 #define EM_FLOW_HASH_SEGS 2
134 struct netdev_flow_key mf
;
135 struct dp_netdev_flow
*flow
;
139 struct emc_entry entries
[EM_FLOW_HASH_ENTRIES
];
142 /* Iterate in the exact match cache through every entry that might contain a
143 * miniflow with hash 'HASH'. */
144 #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \
145 for (uint32_t i__ = 0, srch_hash__ = (HASH); \
146 (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \
147 i__ < EM_FLOW_HASH_SEGS; \
148 i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT)
150 /* Datapath based on the network device interface from netdev.h.
156 * Some members, marked 'const', are immutable. Accessing other members
157 * requires synchronization, as noted in more detail below.
159 * Acquisition order is, from outermost to innermost:
161 * dp_netdev_mutex (global)
166 const struct dpif_class
*const class;
167 const char *const name
;
169 struct ovs_refcount ref_cnt
;
170 atomic_flag destroyed
;
174 * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
175 * changes to 'cls' must be made while still holding the 'flow_mutex'.
177 struct ovs_mutex flow_mutex
;
178 struct classifier cls
;
179 struct cmap flow_table OVS_GUARDED
; /* Flow table. */
183 * ovsthread_stats is internally synchronized. */
184 struct ovsthread_stats stats
; /* Contains 'struct dp_netdev_stats *'. */
188 * Protected by RCU. Take the mutex to add or remove ports. */
189 struct ovs_mutex port_mutex
;
191 struct seq
*port_seq
; /* Incremented whenever a port changes. */
193 /* Protects access to ofproto-dpif-upcall interface during revalidator
194 * thread synchronization. */
195 struct fat_rwlock upcall_rwlock
;
196 upcall_callback
*upcall_cb
; /* Callback function for executing upcalls. */
199 /* Stores all 'struct dp_netdev_pmd_thread's. */
200 struct cmap poll_threads
;
202 /* Protects the access of the 'struct dp_netdev_pmd_thread'
203 * instance for non-pmd thread. */
204 struct ovs_mutex non_pmd_mutex
;
206 /* Each pmd thread will store its pointer to
207 * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
208 ovsthread_key_t per_pmd_key
;
210 /* Number of rx queues for each dpdk interface and the cpu mask
211 * for pin of pmd threads. */
216 static struct dp_netdev_port
*dp_netdev_lookup_port(const struct dp_netdev
*dp
,
220 DP_STAT_HIT
, /* Packets that matched in the flow table. */
221 DP_STAT_MISS
, /* Packets that did not match. */
222 DP_STAT_LOST
, /* Packets not passed up to the client. */
226 /* Contained by struct dp_netdev's 'stats' member. */
227 struct dp_netdev_stats
{
228 struct ovs_mutex mutex
; /* Protects 'n'. */
230 /* Indexed by DP_STAT_*, protected by 'mutex'. */
231 unsigned long long int n
[DP_N_STATS
] OVS_GUARDED
;
235 /* A port in a netdev-based datapath. */
236 struct dp_netdev_port
{
237 struct cmap_node node
; /* Node in dp_netdev's 'ports'. */
239 struct netdev
*netdev
;
240 struct netdev_saved_flags
*sf
;
241 struct netdev_rxq
**rxq
;
242 struct ovs_refcount ref_cnt
;
243 char *type
; /* Port type as requested by user. */
246 /* A flow in dp_netdev's 'flow_table'.
252 * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
253 * its dp_netdev's classifier. The text below calls this classifier 'cls'.
258 * The thread safety rules described here for "struct dp_netdev_flow" are
259 * motivated by two goals:
261 * - Prevent threads that read members of "struct dp_netdev_flow" from
262 * reading bad data due to changes by some thread concurrently modifying
265 * - Prevent two threads making changes to members of a given "struct
266 * dp_netdev_flow" from interfering with each other.
272 * A flow 'flow' may be accessed without a risk of being freed during an RCU
273 * grace period. Code that needs to hold onto a flow for a while
274 * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
276 * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
277 * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
280 * Some members, marked 'const', are immutable. Accessing other members
281 * requires synchronization, as noted in more detail below.
283 struct dp_netdev_flow
{
285 /* Packet classification. */
286 const struct cls_rule cr
; /* In owning dp_netdev's 'cls'. */
288 /* Hash table index by unmasked flow. */
289 const struct cmap_node node
; /* In owning dp_netdev's 'flow_table'. */
290 const struct flow flow
; /* The flow that created this entry. */
292 /* Number of references.
293 * The classifier owns one reference.
294 * Any thread trying to keep a rule from being freed should hold its own
296 struct ovs_refcount ref_cnt
;
300 * Reading or writing these members requires 'mutex'. */
301 struct ovsthread_stats stats
; /* Contains "struct dp_netdev_flow_stats". */
304 OVSRCU_TYPE(struct dp_netdev_actions
*) actions
;
307 static void dp_netdev_flow_unref(struct dp_netdev_flow
*);
308 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*);
310 /* Contained by struct dp_netdev_flow's 'stats' member. */
311 struct dp_netdev_flow_stats
{
312 struct ovs_mutex mutex
; /* Guards all the other members. */
314 long long int used OVS_GUARDED
; /* Last used time, in monotonic msecs. */
315 long long int packet_count OVS_GUARDED
; /* Number of packets matched. */
316 long long int byte_count OVS_GUARDED
; /* Number of bytes matched. */
317 uint16_t tcp_flags OVS_GUARDED
; /* Bitwise-OR of seen tcp_flags values. */
320 /* A set of datapath actions within a "struct dp_netdev_flow".
326 * A struct dp_netdev_actions 'actions' is protected with RCU. */
327 struct dp_netdev_actions
{
328 /* These members are immutable: they do not change during the struct's
330 struct nlattr
*actions
; /* Sequence of OVS_ACTION_ATTR_* attributes. */
331 unsigned int size
; /* Size of 'actions', in bytes. */
334 struct dp_netdev_actions
*dp_netdev_actions_create(const struct nlattr
*,
336 struct dp_netdev_actions
*dp_netdev_flow_get_actions(
337 const struct dp_netdev_flow
*);
338 static void dp_netdev_actions_free(struct dp_netdev_actions
*);
340 /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
341 * the performance overhead of interrupt processing. Therefore netdev can
342 * not implement rx-wait for these devices. dpif-netdev needs to poll
343 * these device to check for recv buffer. pmd-thread does polling for
344 * devices assigned to itself thread.
346 * DPDK used PMD for accessing NIC.
348 * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for
349 * I/O of all non-pmd threads. There will be no actual thread created
352 struct dp_netdev_pmd_thread
{
353 struct dp_netdev
*dp
;
354 struct cmap_node node
; /* In 'dp->poll_threads'. */
355 /* Per thread exact-match cache. Note, the instance for cpu core
356 * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
357 * need to be protected (e.g. by 'dp_netdev_mutex'). All other
358 * instances will only be accessed by its own pmd thread. */
359 struct emc_cache flow_cache
;
360 struct latch exit_latch
; /* For terminating the pmd thread. */
361 atomic_uint change_seq
; /* For reloading pmd ports. */
363 int index
; /* Idx of this pmd thread among pmd*/
364 /* threads on same numa node. */
365 int core_id
; /* CPU core id of this pmd thread. */
366 int numa_id
; /* numa node id of this pmd thread. */
369 #define PMD_INITIAL_SEQ 1
371 /* Interface to netdev-based datapath. */
374 struct dp_netdev
*dp
;
375 uint64_t last_port_seq
;
378 static int get_port_by_number(struct dp_netdev
*dp
, odp_port_t port_no
,
379 struct dp_netdev_port
**portp
);
380 static int get_port_by_name(struct dp_netdev
*dp
, const char *devname
,
381 struct dp_netdev_port
**portp
);
382 static void dp_netdev_free(struct dp_netdev
*)
383 OVS_REQUIRES(dp_netdev_mutex
);
384 static void dp_netdev_flow_flush(struct dp_netdev
*);
385 static int do_add_port(struct dp_netdev
*dp
, const char *devname
,
386 const char *type
, odp_port_t port_no
)
387 OVS_REQUIRES(dp
->port_mutex
);
388 static void do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*)
389 OVS_REQUIRES(dp
->port_mutex
);
390 static int dpif_netdev_open(const struct dpif_class
*, const char *name
,
391 bool create
, struct dpif
**);
392 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
393 struct dpif_packet
**, int c
,
394 bool may_steal
, struct pkt_metadata
*,
395 const struct nlattr
*actions
,
397 static void dp_netdev_input(struct dp_netdev_pmd_thread
*,
398 struct dpif_packet
**, int cnt
,
399 struct pkt_metadata
*);
400 static void dp_netdev_disable_upcall(struct dp_netdev
*);
401 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
,
402 struct dp_netdev
*dp
, int index
,
403 int core_id
, int numa_id
);
404 static void dp_netdev_set_nonpmd(struct dp_netdev
*dp
);
405 static struct dp_netdev_pmd_thread
*dp_netdev_get_nonpmd(struct dp_netdev
*dp
);
406 static void dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
);
407 static void dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
408 static void dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
);
409 static void dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
);
411 static void emc_clear_entry(struct emc_entry
*ce
);
414 emc_cache_init(struct emc_cache
*flow_cache
)
418 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
419 flow_cache
->entries
[i
].flow
= NULL
;
420 flow_cache
->entries
[i
].hash
= 0;
421 flow_cache
->entries
[i
].mf_len
= 0;
422 miniflow_initialize(&flow_cache
->entries
[i
].mf
.flow
,
423 flow_cache
->entries
[i
].mf
.buf
);
428 emc_cache_uninit(struct emc_cache
*flow_cache
)
432 for (i
= 0; i
< ARRAY_SIZE(flow_cache
->entries
); i
++) {
433 emc_clear_entry(&flow_cache
->entries
[i
]);
437 static struct dpif_netdev
*
438 dpif_netdev_cast(const struct dpif
*dpif
)
440 ovs_assert(dpif
->dpif_class
->open
== dpif_netdev_open
);
441 return CONTAINER_OF(dpif
, struct dpif_netdev
, dpif
);
444 static struct dp_netdev
*
445 get_dp_netdev(const struct dpif
*dpif
)
447 return dpif_netdev_cast(dpif
)->dp
;
451 dpif_netdev_enumerate(struct sset
*all_dps
,
452 const struct dpif_class
*dpif_class
)
454 struct shash_node
*node
;
456 ovs_mutex_lock(&dp_netdev_mutex
);
457 SHASH_FOR_EACH(node
, &dp_netdevs
) {
458 struct dp_netdev
*dp
= node
->data
;
459 if (dpif_class
!= dp
->class) {
460 /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
461 * If the class doesn't match, skip this dpif. */
464 sset_add(all_dps
, node
->name
);
466 ovs_mutex_unlock(&dp_netdev_mutex
);
472 dpif_netdev_class_is_dummy(const struct dpif_class
*class)
474 return class != &dpif_netdev_class
;
478 dpif_netdev_port_open_type(const struct dpif_class
*class, const char *type
)
480 return strcmp(type
, "internal") ? type
481 : dpif_netdev_class_is_dummy(class) ? "dummy"
486 create_dpif_netdev(struct dp_netdev
*dp
)
488 uint16_t netflow_id
= hash_string(dp
->name
, 0);
489 struct dpif_netdev
*dpif
;
491 ovs_refcount_ref(&dp
->ref_cnt
);
493 dpif
= xmalloc(sizeof *dpif
);
494 dpif_init(&dpif
->dpif
, dp
->class, dp
->name
, netflow_id
>> 8, netflow_id
);
496 dpif
->last_port_seq
= seq_read(dp
->port_seq
);
501 /* Choose an unused, non-zero port number and return it on success.
502 * Return ODPP_NONE on failure. */
504 choose_port(struct dp_netdev
*dp
, const char *name
)
505 OVS_REQUIRES(dp
->port_mutex
)
509 if (dp
->class != &dpif_netdev_class
) {
513 /* If the port name begins with "br", start the number search at
514 * 100 to make writing tests easier. */
515 if (!strncmp(name
, "br", 2)) {
519 /* If the port name contains a number, try to assign that port number.
520 * This can make writing unit tests easier because port numbers are
522 for (p
= name
; *p
!= '\0'; p
++) {
523 if (isdigit((unsigned char) *p
)) {
524 port_no
= start_no
+ strtol(p
, NULL
, 10);
525 if (port_no
> 0 && port_no
!= odp_to_u32(ODPP_NONE
)
526 && !dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
527 return u32_to_odp(port_no
);
534 for (port_no
= 1; port_no
<= UINT16_MAX
; port_no
++) {
535 if (!dp_netdev_lookup_port(dp
, u32_to_odp(port_no
))) {
536 return u32_to_odp(port_no
);
544 create_dp_netdev(const char *name
, const struct dpif_class
*class,
545 struct dp_netdev
**dpp
)
546 OVS_REQUIRES(dp_netdev_mutex
)
548 struct dp_netdev
*dp
;
551 dp
= xzalloc(sizeof *dp
);
552 shash_add(&dp_netdevs
, name
, dp
);
554 *CONST_CAST(const struct dpif_class
**, &dp
->class) = class;
555 *CONST_CAST(const char **, &dp
->name
) = xstrdup(name
);
556 ovs_refcount_init(&dp
->ref_cnt
);
557 atomic_flag_clear(&dp
->destroyed
);
559 ovs_mutex_init(&dp
->flow_mutex
);
560 classifier_init(&dp
->cls
, NULL
);
561 cmap_init(&dp
->flow_table
);
563 ovsthread_stats_init(&dp
->stats
);
565 ovs_mutex_init(&dp
->port_mutex
);
566 cmap_init(&dp
->ports
);
567 dp
->port_seq
= seq_create();
568 fat_rwlock_init(&dp
->upcall_rwlock
);
570 /* Disable upcalls by default. */
571 dp_netdev_disable_upcall(dp
);
572 dp
->upcall_aux
= NULL
;
573 dp
->upcall_cb
= NULL
;
575 cmap_init(&dp
->poll_threads
);
576 ovs_mutex_init_recursive(&dp
->non_pmd_mutex
);
577 ovsthread_key_create(&dp
->per_pmd_key
, NULL
);
579 /* Reserves the core NON_PMD_CORE_ID for all non-pmd threads. */
580 ovs_numa_try_pin_core_specific(NON_PMD_CORE_ID
);
581 dp_netdev_set_nonpmd(dp
);
582 dp
->n_dpdk_rxqs
= NR_QUEUE
;
584 ovs_mutex_lock(&dp
->port_mutex
);
585 error
= do_add_port(dp
, name
, "internal", ODPP_LOCAL
);
586 ovs_mutex_unlock(&dp
->port_mutex
);
597 dpif_netdev_open(const struct dpif_class
*class, const char *name
,
598 bool create
, struct dpif
**dpifp
)
600 struct dp_netdev
*dp
;
603 ovs_mutex_lock(&dp_netdev_mutex
);
604 dp
= shash_find_data(&dp_netdevs
, name
);
606 error
= create
? create_dp_netdev(name
, class, &dp
) : ENODEV
;
608 error
= (dp
->class != class ? EINVAL
613 *dpifp
= create_dpif_netdev(dp
);
616 ovs_mutex_unlock(&dp_netdev_mutex
);
622 dp_netdev_destroy_upcall_lock(struct dp_netdev
*dp
)
623 OVS_NO_THREAD_SAFETY_ANALYSIS
625 /* Check that upcalls are disabled, i.e. that the rwlock is taken */
626 ovs_assert(fat_rwlock_tryrdlock(&dp
->upcall_rwlock
));
628 /* Before freeing a lock we should release it */
629 fat_rwlock_unlock(&dp
->upcall_rwlock
);
630 fat_rwlock_destroy(&dp
->upcall_rwlock
);
633 /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
634 * through the 'dp_netdevs' shash while freeing 'dp'. */
636 dp_netdev_free(struct dp_netdev
*dp
)
637 OVS_REQUIRES(dp_netdev_mutex
)
639 struct dp_netdev_port
*port
;
640 struct dp_netdev_stats
*bucket
;
643 shash_find_and_delete(&dp_netdevs
, dp
->name
);
645 dp_netdev_destroy_all_pmds(dp
);
646 cmap_destroy(&dp
->poll_threads
);
647 ovs_mutex_destroy(&dp
->non_pmd_mutex
);
648 ovsthread_key_delete(dp
->per_pmd_key
);
650 dp_netdev_flow_flush(dp
);
651 ovs_mutex_lock(&dp
->port_mutex
);
652 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
653 do_del_port(dp
, port
);
655 ovs_mutex_unlock(&dp
->port_mutex
);
657 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &dp
->stats
) {
658 ovs_mutex_destroy(&bucket
->mutex
);
659 free_cacheline(bucket
);
661 ovsthread_stats_destroy(&dp
->stats
);
663 classifier_destroy(&dp
->cls
);
664 cmap_destroy(&dp
->flow_table
);
665 ovs_mutex_destroy(&dp
->flow_mutex
);
666 seq_destroy(dp
->port_seq
);
667 cmap_destroy(&dp
->ports
);
669 /* Upcalls must be disabled at this point */
670 dp_netdev_destroy_upcall_lock(dp
);
673 free(CONST_CAST(char *, dp
->name
));
678 dp_netdev_unref(struct dp_netdev
*dp
)
681 /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
682 * get a new reference to 'dp' through the 'dp_netdevs' shash. */
683 ovs_mutex_lock(&dp_netdev_mutex
);
684 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
687 ovs_mutex_unlock(&dp_netdev_mutex
);
692 dpif_netdev_close(struct dpif
*dpif
)
694 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
701 dpif_netdev_destroy(struct dpif
*dpif
)
703 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
705 if (!atomic_flag_test_and_set(&dp
->destroyed
)) {
706 if (ovs_refcount_unref_relaxed(&dp
->ref_cnt
) == 1) {
707 /* Can't happen: 'dpif' still owns a reference to 'dp'. */
716 dpif_netdev_get_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
718 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
719 struct dp_netdev_stats
*bucket
;
722 stats
->n_flows
= cmap_count(&dp
->flow_table
);
724 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
725 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &dp
->stats
) {
726 ovs_mutex_lock(&bucket
->mutex
);
727 stats
->n_hit
+= bucket
->n
[DP_STAT_HIT
];
728 stats
->n_missed
+= bucket
->n
[DP_STAT_MISS
];
729 stats
->n_lost
+= bucket
->n
[DP_STAT_LOST
];
730 ovs_mutex_unlock(&bucket
->mutex
);
732 stats
->n_masks
= UINT32_MAX
;
733 stats
->n_mask_hit
= UINT64_MAX
;
739 dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread
*pmd
)
743 atomic_add_relaxed(&pmd
->change_seq
, 1, &old_seq
);
746 /* Causes all pmd threads to reload its tx/rx devices.
747 * Must be called after adding/removing ports. */
749 dp_netdev_reload_pmds(struct dp_netdev
*dp
)
751 struct dp_netdev_pmd_thread
*pmd
;
753 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
754 dp_netdev_reload_pmd__(pmd
);
759 hash_port_no(odp_port_t port_no
)
761 return hash_int(odp_to_u32(port_no
), 0);
765 do_add_port(struct dp_netdev
*dp
, const char *devname
, const char *type
,
767 OVS_REQUIRES(dp
->port_mutex
)
769 struct netdev_saved_flags
*sf
;
770 struct dp_netdev_port
*port
;
771 struct netdev
*netdev
;
772 enum netdev_flags flags
;
773 const char *open_type
;
777 /* XXX reject devices already in some dp_netdev. */
779 /* Open and validate network device. */
780 open_type
= dpif_netdev_port_open_type(dp
->class, type
);
781 error
= netdev_open(devname
, open_type
, &netdev
);
785 /* XXX reject non-Ethernet devices */
787 netdev_get_flags(netdev
, &flags
);
788 if (flags
& NETDEV_LOOPBACK
) {
789 VLOG_ERR("%s: cannot add a loopback device", devname
);
790 netdev_close(netdev
);
794 if (netdev_is_pmd(netdev
)) {
795 int n_cores
= ovs_numa_get_n_cores();
797 if (n_cores
== OVS_CORE_UNSPEC
) {
798 VLOG_ERR("%s, cannot get cpu core info", devname
);
801 /* There can only be ovs_numa_get_n_cores() pmd threads,
802 * so creates a txq for each. */
803 error
= netdev_set_multiq(netdev
, n_cores
, dp
->n_dpdk_rxqs
);
805 VLOG_ERR("%s, cannot set multiq", devname
);
809 port
= xzalloc(sizeof *port
);
810 port
->port_no
= port_no
;
811 port
->netdev
= netdev
;
812 port
->rxq
= xmalloc(sizeof *port
->rxq
* netdev_n_rxq(netdev
));
813 port
->type
= xstrdup(type
);
814 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
815 error
= netdev_rxq_open(netdev
, &port
->rxq
[i
], i
);
817 && !(error
== EOPNOTSUPP
&& dpif_netdev_class_is_dummy(dp
->class))) {
818 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
819 devname
, ovs_strerror(errno
));
820 netdev_close(netdev
);
828 error
= netdev_turn_flags_on(netdev
, NETDEV_PROMISC
, &sf
);
830 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
831 netdev_rxq_close(port
->rxq
[i
]);
833 netdev_close(netdev
);
841 if (netdev_is_pmd(netdev
)) {
842 dp_netdev_set_pmds_on_numa(dp
, netdev_get_numa_id(netdev
));
843 dp_netdev_reload_pmds(dp
);
845 ovs_refcount_init(&port
->ref_cnt
);
847 cmap_insert(&dp
->ports
, &port
->node
, hash_port_no(port_no
));
848 seq_change(dp
->port_seq
);
854 dpif_netdev_port_add(struct dpif
*dpif
, struct netdev
*netdev
,
855 odp_port_t
*port_nop
)
857 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
858 char namebuf
[NETDEV_VPORT_NAME_BUFSIZE
];
859 const char *dpif_port
;
863 ovs_mutex_lock(&dp
->port_mutex
);
864 dpif_port
= netdev_vport_get_dpif_port(netdev
, namebuf
, sizeof namebuf
);
865 if (*port_nop
!= ODPP_NONE
) {
867 error
= dp_netdev_lookup_port(dp
, *port_nop
) ? EBUSY
: 0;
869 port_no
= choose_port(dp
, dpif_port
);
870 error
= port_no
== ODPP_NONE
? EFBIG
: 0;
874 error
= do_add_port(dp
, dpif_port
, netdev_get_type(netdev
), port_no
);
876 ovs_mutex_unlock(&dp
->port_mutex
);
882 dpif_netdev_port_del(struct dpif
*dpif
, odp_port_t port_no
)
884 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
887 ovs_mutex_lock(&dp
->port_mutex
);
888 if (port_no
== ODPP_LOCAL
) {
891 struct dp_netdev_port
*port
;
893 error
= get_port_by_number(dp
, port_no
, &port
);
895 do_del_port(dp
, port
);
898 ovs_mutex_unlock(&dp
->port_mutex
);
904 is_valid_port_number(odp_port_t port_no
)
906 return port_no
!= ODPP_NONE
;
909 static struct dp_netdev_port
*
910 dp_netdev_lookup_port(const struct dp_netdev
*dp
, odp_port_t port_no
)
912 struct dp_netdev_port
*port
;
914 CMAP_FOR_EACH_WITH_HASH (port
, node
, hash_port_no(port_no
), &dp
->ports
) {
915 if (port
->port_no
== port_no
) {
923 get_port_by_number(struct dp_netdev
*dp
,
924 odp_port_t port_no
, struct dp_netdev_port
**portp
)
926 if (!is_valid_port_number(port_no
)) {
930 *portp
= dp_netdev_lookup_port(dp
, port_no
);
931 return *portp
? 0 : ENOENT
;
936 port_ref(struct dp_netdev_port
*port
)
939 ovs_refcount_ref(&port
->ref_cnt
);
944 port_try_ref(struct dp_netdev_port
*port
)
947 return ovs_refcount_try_ref_rcu(&port
->ref_cnt
);
954 port_destroy__(struct dp_netdev_port
*port
)
956 int n_rxq
= netdev_n_rxq(port
->netdev
);
959 netdev_close(port
->netdev
);
960 netdev_restore_flags(port
->sf
);
962 for (i
= 0; i
< n_rxq
; i
++) {
963 netdev_rxq_close(port
->rxq
[i
]);
971 port_unref(struct dp_netdev_port
*port
)
973 if (port
&& ovs_refcount_unref_relaxed(&port
->ref_cnt
) == 1) {
974 ovsrcu_postpone(port_destroy__
, port
);
979 get_port_by_name(struct dp_netdev
*dp
,
980 const char *devname
, struct dp_netdev_port
**portp
)
981 OVS_REQUIRES(dp
->port_mutex
)
983 struct dp_netdev_port
*port
;
985 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
986 if (!strcmp(netdev_get_name(port
->netdev
), devname
)) {
995 get_n_pmd_threads_on_numa(struct dp_netdev
*dp
, int numa_id
)
997 struct dp_netdev_pmd_thread
*pmd
;
1000 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
1001 if (pmd
->numa_id
== numa_id
) {
1009 /* Returns 'true' if there is a port with pmd netdev and the netdev
1010 * is on numa node 'numa_id'. */
1012 has_pmd_port_for_numa(struct dp_netdev
*dp
, int numa_id
)
1014 struct dp_netdev_port
*port
;
1016 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1017 if (netdev_is_pmd(port
->netdev
)
1018 && netdev_get_numa_id(port
->netdev
) == numa_id
) {
1028 do_del_port(struct dp_netdev
*dp
, struct dp_netdev_port
*port
)
1029 OVS_REQUIRES(dp
->port_mutex
)
1031 cmap_remove(&dp
->ports
, &port
->node
, hash_odp_port(port
->port_no
));
1032 seq_change(dp
->port_seq
);
1033 if (netdev_is_pmd(port
->netdev
)) {
1034 int numa_id
= netdev_get_numa_id(port
->netdev
);
1036 /* If there is no netdev on the numa node, deletes the pmd threads
1037 * for that numa. Else, just reloads the queues. */
1038 if (!has_pmd_port_for_numa(dp
, numa_id
)) {
1039 dp_netdev_del_pmds_on_numa(dp
, numa_id
);
1041 dp_netdev_reload_pmds(dp
);
1048 answer_port_query(const struct dp_netdev_port
*port
,
1049 struct dpif_port
*dpif_port
)
1051 dpif_port
->name
= xstrdup(netdev_get_name(port
->netdev
));
1052 dpif_port
->type
= xstrdup(port
->type
);
1053 dpif_port
->port_no
= port
->port_no
;
1057 dpif_netdev_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
1058 struct dpif_port
*dpif_port
)
1060 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1061 struct dp_netdev_port
*port
;
1064 error
= get_port_by_number(dp
, port_no
, &port
);
1065 if (!error
&& dpif_port
) {
1066 answer_port_query(port
, dpif_port
);
1073 dpif_netdev_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
1074 struct dpif_port
*dpif_port
)
1076 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1077 struct dp_netdev_port
*port
;
1080 ovs_mutex_lock(&dp
->port_mutex
);
1081 error
= get_port_by_name(dp
, devname
, &port
);
1082 if (!error
&& dpif_port
) {
1083 answer_port_query(port
, dpif_port
);
1085 ovs_mutex_unlock(&dp
->port_mutex
);
1091 dp_netdev_flow_free(struct dp_netdev_flow
*flow
)
1093 struct dp_netdev_flow_stats
*bucket
;
1096 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &flow
->stats
) {
1097 ovs_mutex_destroy(&bucket
->mutex
);
1098 free_cacheline(bucket
);
1100 ovsthread_stats_destroy(&flow
->stats
);
1102 cls_rule_destroy(CONST_CAST(struct cls_rule
*, &flow
->cr
));
1103 dp_netdev_actions_free(dp_netdev_flow_get_actions(flow
));
1107 static void dp_netdev_flow_unref(struct dp_netdev_flow
*flow
)
1109 if (ovs_refcount_unref_relaxed(&flow
->ref_cnt
) == 1) {
1110 ovsrcu_postpone(dp_netdev_flow_free
, flow
);
1115 dp_netdev_remove_flow(struct dp_netdev
*dp
, struct dp_netdev_flow
*flow
)
1116 OVS_REQUIRES(dp
->flow_mutex
)
1118 struct cls_rule
*cr
= CONST_CAST(struct cls_rule
*, &flow
->cr
);
1119 struct cmap_node
*node
= CONST_CAST(struct cmap_node
*, &flow
->node
);
1121 classifier_remove(&dp
->cls
, cr
);
1122 cmap_remove(&dp
->flow_table
, node
, flow_hash(&flow
->flow
, 0));
1125 dp_netdev_flow_unref(flow
);
1129 dp_netdev_flow_flush(struct dp_netdev
*dp
)
1131 struct dp_netdev_flow
*netdev_flow
;
1133 ovs_mutex_lock(&dp
->flow_mutex
);
1134 CMAP_FOR_EACH (netdev_flow
, node
, &dp
->flow_table
) {
1135 dp_netdev_remove_flow(dp
, netdev_flow
);
1137 ovs_mutex_unlock(&dp
->flow_mutex
);
1141 dpif_netdev_flow_flush(struct dpif
*dpif
)
1143 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1145 dp_netdev_flow_flush(dp
);
1149 struct dp_netdev_port_state
{
1150 struct cmap_position position
;
1155 dpif_netdev_port_dump_start(const struct dpif
*dpif OVS_UNUSED
, void **statep
)
1157 *statep
= xzalloc(sizeof(struct dp_netdev_port_state
));
1162 dpif_netdev_port_dump_next(const struct dpif
*dpif
, void *state_
,
1163 struct dpif_port
*dpif_port
)
1165 struct dp_netdev_port_state
*state
= state_
;
1166 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1167 struct cmap_node
*node
;
1170 node
= cmap_next_position(&dp
->ports
, &state
->position
);
1172 struct dp_netdev_port
*port
;
1174 port
= CONTAINER_OF(node
, struct dp_netdev_port
, node
);
1177 state
->name
= xstrdup(netdev_get_name(port
->netdev
));
1178 dpif_port
->name
= state
->name
;
1179 dpif_port
->type
= port
->type
;
1180 dpif_port
->port_no
= port
->port_no
;
1191 dpif_netdev_port_dump_done(const struct dpif
*dpif OVS_UNUSED
, void *state_
)
1193 struct dp_netdev_port_state
*state
= state_
;
1200 dpif_netdev_port_poll(const struct dpif
*dpif_
, char **devnamep OVS_UNUSED
)
1202 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1203 uint64_t new_port_seq
;
1206 new_port_seq
= seq_read(dpif
->dp
->port_seq
);
1207 if (dpif
->last_port_seq
!= new_port_seq
) {
1208 dpif
->last_port_seq
= new_port_seq
;
1218 dpif_netdev_port_poll_wait(const struct dpif
*dpif_
)
1220 struct dpif_netdev
*dpif
= dpif_netdev_cast(dpif_
);
1222 seq_wait(dpif
->dp
->port_seq
, dpif
->last_port_seq
);
1225 static struct dp_netdev_flow
*
1226 dp_netdev_flow_cast(const struct cls_rule
*cr
)
1228 return cr
? CONTAINER_OF(cr
, struct dp_netdev_flow
, cr
) : NULL
;
1231 static bool dp_netdev_flow_ref(struct dp_netdev_flow
*flow
)
1233 return ovs_refcount_try_ref_rcu(&flow
->ref_cnt
);
1236 /* netdev_flow_key utilities.
1238 * netdev_flow_key is basically a miniflow. We use these functions
1239 * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow
1240 * functions (miniflow_clone_inline, miniflow_equal, ...), because:
1242 * - Since we are dealing exclusively with miniflows created by
1243 * miniflow_extract(), if the map is different the miniflow is different.
1244 * Therefore we can be faster by comparing the map and the miniflow in a
1246 * _ netdev_flow_key's miniflow has always inline values.
1247 * - These functions can be inlined by the compiler.
1249 * The following assertions make sure that what we're doing with miniflow is
1252 BUILD_ASSERT_DECL(offsetof(struct miniflow
, inline_values
)
1253 == sizeof(uint64_t));
1254 BUILD_ASSERT_DECL(offsetof(struct netdev_flow_key
, flow
) == 0);
1256 static inline struct netdev_flow_key
*
1257 miniflow_to_netdev_flow_key(const struct miniflow
*mf
)
1259 return (struct netdev_flow_key
*) CONST_CAST(struct miniflow
*, mf
);
1262 /* Given the number of bits set in the miniflow map, returns the size of the
1263 * netdev_flow key */
1264 static inline uint32_t
1265 netdev_flow_key_size(uint32_t flow_u32s
)
1267 return MINIFLOW_VALUES_SIZE(flow_u32s
)
1268 + offsetof(struct miniflow
, inline_values
);
1271 /* Used to compare 'netdev_flow_key's (miniflows) in the exact match cache. */
1273 netdev_flow_key_equal(const struct netdev_flow_key
*a
,
1274 const struct netdev_flow_key
*b
,
1277 return !memcmp(a
, b
, size
);
1281 netdev_flow_key_clone(struct netdev_flow_key
*dst
,
1282 const struct netdev_flow_key
*src
,
1285 memcpy(dst
, src
, size
);
1289 emc_entry_alive(struct emc_entry
*ce
)
1291 return ce
->flow
&& !ce
->flow
->dead
;
1295 emc_clear_entry(struct emc_entry
*ce
)
1298 dp_netdev_flow_unref(ce
->flow
);
1304 emc_change_entry(struct emc_entry
*ce
, struct dp_netdev_flow
*flow
,
1305 const struct netdev_flow_key
*mf
, uint32_t hash
)
1307 if (ce
->flow
!= flow
) {
1309 dp_netdev_flow_unref(ce
->flow
);
1312 if (dp_netdev_flow_ref(flow
)) {
1319 uint32_t mf_len
= netdev_flow_key_size(count_1bits(mf
->flow
.map
));
1321 netdev_flow_key_clone(&ce
->mf
, mf
, mf_len
);
1323 ce
->mf_len
= mf_len
;
1328 emc_insert(struct emc_cache
*cache
, const struct miniflow
*mf
, uint32_t hash
,
1329 struct dp_netdev_flow
*flow
)
1331 struct emc_entry
*to_be_replaced
= NULL
;
1332 struct emc_entry
*current_entry
;
1334 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, hash
) {
1335 if (current_entry
->hash
== hash
1336 && netdev_flow_key_equal(¤t_entry
->mf
,
1337 miniflow_to_netdev_flow_key(mf
),
1338 current_entry
->mf_len
)) {
1340 /* We found the entry with the 'mf' miniflow */
1341 emc_change_entry(current_entry
, flow
, NULL
, 0);
1345 /* Replacement policy: put the flow in an empty (not alive) entry, or
1346 * in the first entry where it can be */
1348 || (emc_entry_alive(to_be_replaced
)
1349 && !emc_entry_alive(current_entry
))
1350 || current_entry
->hash
< to_be_replaced
->hash
) {
1351 to_be_replaced
= current_entry
;
1354 /* We didn't find the miniflow in the cache.
1355 * The 'to_be_replaced' entry is where the new flow will be stored */
1357 emc_change_entry(to_be_replaced
, flow
, miniflow_to_netdev_flow_key(mf
),
1361 static inline struct dp_netdev_flow
*
1362 emc_lookup(struct emc_cache
*cache
, const struct miniflow
*mf
, uint32_t hash
)
1364 struct emc_entry
*current_entry
;
1366 EMC_FOR_EACH_POS_WITH_HASH(cache
, current_entry
, hash
) {
1367 if (current_entry
->hash
== hash
&& emc_entry_alive(current_entry
)
1368 && netdev_flow_key_equal(¤t_entry
->mf
,
1369 miniflow_to_netdev_flow_key(mf
),
1370 current_entry
->mf_len
)) {
1372 /* We found the entry with the 'mf' miniflow */
1373 return current_entry
->flow
;
1380 static struct dp_netdev_flow
*
1381 dp_netdev_lookup_flow(const struct dp_netdev
*dp
, const struct miniflow
*key
)
1383 struct dp_netdev_flow
*netdev_flow
;
1384 struct cls_rule
*rule
;
1386 classifier_lookup_miniflow_batch(&dp
->cls
, &key
, &rule
, 1);
1387 netdev_flow
= dp_netdev_flow_cast(rule
);
1392 static struct dp_netdev_flow
*
1393 dp_netdev_find_flow(const struct dp_netdev
*dp
, const struct flow
*flow
)
1395 struct dp_netdev_flow
*netdev_flow
;
1397 CMAP_FOR_EACH_WITH_HASH (netdev_flow
, node
, flow_hash(flow
, 0),
1399 if (flow_equal(&netdev_flow
->flow
, flow
)) {
1408 get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow
,
1409 struct dpif_flow_stats
*stats
)
1411 struct dp_netdev_flow_stats
*bucket
;
1414 memset(stats
, 0, sizeof *stats
);
1415 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &netdev_flow
->stats
) {
1416 ovs_mutex_lock(&bucket
->mutex
);
1417 stats
->n_packets
+= bucket
->packet_count
;
1418 stats
->n_bytes
+= bucket
->byte_count
;
1419 stats
->used
= MAX(stats
->used
, bucket
->used
);
1420 stats
->tcp_flags
|= bucket
->tcp_flags
;
1421 ovs_mutex_unlock(&bucket
->mutex
);
1426 dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow
,
1427 struct ofpbuf
*buffer
, struct dpif_flow
*flow
)
1429 struct flow_wildcards wc
;
1430 struct dp_netdev_actions
*actions
;
1432 minimask_expand(&netdev_flow
->cr
.match
.mask
, &wc
);
1433 odp_flow_key_from_mask(buffer
, &wc
.masks
, &netdev_flow
->flow
,
1434 odp_to_u32(wc
.masks
.in_port
.odp_port
),
1436 flow
->mask
= ofpbuf_data(buffer
);
1437 flow
->mask_len
= ofpbuf_size(buffer
);
1439 actions
= dp_netdev_flow_get_actions(netdev_flow
);
1440 flow
->actions
= actions
->actions
;
1441 flow
->actions_len
= actions
->size
;
1443 get_dpif_flow_stats(netdev_flow
, &flow
->stats
);
1447 dpif_netdev_mask_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1448 const struct nlattr
*mask_key
,
1449 uint32_t mask_key_len
, const struct flow
*flow
,
1453 enum odp_key_fitness fitness
;
1455 fitness
= odp_flow_key_to_mask(mask_key
, mask_key_len
, mask
, flow
);
1457 /* This should not happen: it indicates that
1458 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
1459 * disagree on the acceptable form of a mask. Log the problem
1460 * as an error, with enough details to enable debugging. */
1461 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1463 if (!VLOG_DROP_ERR(&rl
)) {
1467 odp_flow_format(key
, key_len
, mask_key
, mask_key_len
, NULL
, &s
,
1469 VLOG_ERR("internal error parsing flow mask %s (%s)",
1470 ds_cstr(&s
), odp_key_fitness_to_string(fitness
));
1477 enum mf_field_id id
;
1478 /* No mask key, unwildcard everything except fields whose
1479 * prerequisities are not met. */
1480 memset(mask
, 0x0, sizeof *mask
);
1482 for (id
= 0; id
< MFF_N_IDS
; ++id
) {
1483 /* Skip registers and metadata. */
1484 if (!(id
>= MFF_REG0
&& id
< MFF_REG0
+ FLOW_N_REGS
)
1485 && id
!= MFF_METADATA
) {
1486 const struct mf_field
*mf
= mf_from_id(id
);
1487 if (mf_are_prereqs_ok(mf
, flow
)) {
1488 mf_mask_field(mf
, mask
);
1494 /* Force unwildcard the in_port.
1496 * We need to do this even in the case where we unwildcard "everything"
1497 * above because "everything" only includes the 16-bit OpenFlow port number
1498 * mask->in_port.ofp_port, which only covers half of the 32-bit datapath
1499 * port number mask->in_port.odp_port. */
1500 mask
->in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
1506 dpif_netdev_flow_from_nlattrs(const struct nlattr
*key
, uint32_t key_len
,
1511 if (odp_flow_key_to_flow(key
, key_len
, flow
)) {
1512 /* This should not happen: it indicates that odp_flow_key_from_flow()
1513 * and odp_flow_key_to_flow() disagree on the acceptable form of a
1514 * flow. Log the problem as an error, with enough details to enable
1516 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
1518 if (!VLOG_DROP_ERR(&rl
)) {
1522 odp_flow_format(key
, key_len
, NULL
, 0, NULL
, &s
, true);
1523 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s
));
1530 in_port
= flow
->in_port
.odp_port
;
1531 if (!is_valid_port_number(in_port
) && in_port
!= ODPP_NONE
) {
1539 dpif_netdev_flow_get(const struct dpif
*dpif
, const struct dpif_flow_get
*get
)
1541 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1542 struct dp_netdev_flow
*netdev_flow
;
1546 error
= dpif_netdev_flow_from_nlattrs(get
->key
, get
->key_len
, &key
);
1551 netdev_flow
= dp_netdev_find_flow(dp
, &key
);
1554 dp_netdev_flow_to_dpif_flow(netdev_flow
, get
->buffer
, get
->flow
);
1563 dp_netdev_flow_add(struct dp_netdev
*dp
, struct match
*match
,
1564 const struct nlattr
*actions
, size_t actions_len
)
1565 OVS_REQUIRES(dp
->flow_mutex
)
1567 struct dp_netdev_flow
*netdev_flow
;
1569 netdev_flow
= xzalloc(sizeof *netdev_flow
);
1570 *CONST_CAST(struct flow
*, &netdev_flow
->flow
) = match
->flow
;
1572 ovs_refcount_init(&netdev_flow
->ref_cnt
);
1574 ovsthread_stats_init(&netdev_flow
->stats
);
1576 ovsrcu_set(&netdev_flow
->actions
,
1577 dp_netdev_actions_create(actions
, actions_len
));
1579 cls_rule_init(CONST_CAST(struct cls_rule
*, &netdev_flow
->cr
),
1580 match
, NETDEV_RULE_PRIORITY
);
1581 cmap_insert(&dp
->flow_table
,
1582 CONST_CAST(struct cmap_node
*, &netdev_flow
->node
),
1583 flow_hash(&match
->flow
, 0));
1584 classifier_insert(&dp
->cls
,
1585 CONST_CAST(struct cls_rule
*, &netdev_flow
->cr
));
1587 if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
1588 struct ds ds
= DS_EMPTY_INITIALIZER
;
1590 ds_put_cstr(&ds
, "flow_add: ");
1591 match_format(match
, &ds
, OFP_DEFAULT_PRIORITY
);
1592 ds_put_cstr(&ds
, ", actions:");
1593 format_odp_actions(&ds
, actions
, actions_len
);
1595 VLOG_DBG_RL(&upcall_rl
, "%s", ds_cstr(&ds
));
1604 clear_stats(struct dp_netdev_flow
*netdev_flow
)
1606 struct dp_netdev_flow_stats
*bucket
;
1609 OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket
, i
, &netdev_flow
->stats
) {
1610 ovs_mutex_lock(&bucket
->mutex
);
1612 bucket
->packet_count
= 0;
1613 bucket
->byte_count
= 0;
1614 bucket
->tcp_flags
= 0;
1615 ovs_mutex_unlock(&bucket
->mutex
);
1620 dpif_netdev_flow_put(struct dpif
*dpif
, const struct dpif_flow_put
*put
)
1622 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1623 struct dp_netdev_flow
*netdev_flow
;
1624 struct miniflow miniflow
;
1628 error
= dpif_netdev_flow_from_nlattrs(put
->key
, put
->key_len
, &match
.flow
);
1632 error
= dpif_netdev_mask_from_nlattrs(put
->key
, put
->key_len
,
1633 put
->mask
, put
->mask_len
,
1634 &match
.flow
, &match
.wc
.masks
);
1638 miniflow_init(&miniflow
, &match
.flow
);
1640 ovs_mutex_lock(&dp
->flow_mutex
);
1641 netdev_flow
= dp_netdev_lookup_flow(dp
, &miniflow
);
1643 if (put
->flags
& DPIF_FP_CREATE
) {
1644 if (cmap_count(&dp
->flow_table
) < MAX_FLOWS
) {
1646 memset(put
->stats
, 0, sizeof *put
->stats
);
1648 error
= dp_netdev_flow_add(dp
, &match
, put
->actions
,
1657 if (put
->flags
& DPIF_FP_MODIFY
1658 && flow_equal(&match
.flow
, &netdev_flow
->flow
)) {
1659 struct dp_netdev_actions
*new_actions
;
1660 struct dp_netdev_actions
*old_actions
;
1662 new_actions
= dp_netdev_actions_create(put
->actions
,
1665 old_actions
= dp_netdev_flow_get_actions(netdev_flow
);
1666 ovsrcu_set(&netdev_flow
->actions
, new_actions
);
1669 get_dpif_flow_stats(netdev_flow
, put
->stats
);
1671 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
1672 clear_stats(netdev_flow
);
1675 ovsrcu_postpone(dp_netdev_actions_free
, old_actions
);
1676 } else if (put
->flags
& DPIF_FP_CREATE
) {
1679 /* Overlapping flow. */
1683 ovs_mutex_unlock(&dp
->flow_mutex
);
1684 miniflow_destroy(&miniflow
);
1690 dpif_netdev_flow_del(struct dpif
*dpif
, const struct dpif_flow_del
*del
)
1692 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1693 struct dp_netdev_flow
*netdev_flow
;
1697 error
= dpif_netdev_flow_from_nlattrs(del
->key
, del
->key_len
, &key
);
1702 ovs_mutex_lock(&dp
->flow_mutex
);
1703 netdev_flow
= dp_netdev_find_flow(dp
, &key
);
1706 get_dpif_flow_stats(netdev_flow
, del
->stats
);
1708 dp_netdev_remove_flow(dp
, netdev_flow
);
1712 ovs_mutex_unlock(&dp
->flow_mutex
);
1717 struct dpif_netdev_flow_dump
{
1718 struct dpif_flow_dump up
;
1719 struct cmap_position pos
;
1721 struct ovs_mutex mutex
;
1724 static struct dpif_netdev_flow_dump
*
1725 dpif_netdev_flow_dump_cast(struct dpif_flow_dump
*dump
)
1727 return CONTAINER_OF(dump
, struct dpif_netdev_flow_dump
, up
);
1730 static struct dpif_flow_dump
*
1731 dpif_netdev_flow_dump_create(const struct dpif
*dpif_
)
1733 struct dpif_netdev_flow_dump
*dump
;
1735 dump
= xmalloc(sizeof *dump
);
1736 dpif_flow_dump_init(&dump
->up
, dpif_
);
1737 memset(&dump
->pos
, 0, sizeof dump
->pos
);
1739 ovs_mutex_init(&dump
->mutex
);
1745 dpif_netdev_flow_dump_destroy(struct dpif_flow_dump
*dump_
)
1747 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
1749 ovs_mutex_destroy(&dump
->mutex
);
1754 struct dpif_netdev_flow_dump_thread
{
1755 struct dpif_flow_dump_thread up
;
1756 struct dpif_netdev_flow_dump
*dump
;
1757 struct odputil_keybuf keybuf
[FLOW_DUMP_MAX_BATCH
];
1758 struct odputil_keybuf maskbuf
[FLOW_DUMP_MAX_BATCH
];
1761 static struct dpif_netdev_flow_dump_thread
*
1762 dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread
*thread
)
1764 return CONTAINER_OF(thread
, struct dpif_netdev_flow_dump_thread
, up
);
1767 static struct dpif_flow_dump_thread
*
1768 dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump
*dump_
)
1770 struct dpif_netdev_flow_dump
*dump
= dpif_netdev_flow_dump_cast(dump_
);
1771 struct dpif_netdev_flow_dump_thread
*thread
;
1773 thread
= xmalloc(sizeof *thread
);
1774 dpif_flow_dump_thread_init(&thread
->up
, &dump
->up
);
1775 thread
->dump
= dump
;
1780 dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread_
)
1782 struct dpif_netdev_flow_dump_thread
*thread
1783 = dpif_netdev_flow_dump_thread_cast(thread_
);
1789 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_
,
1790 struct dpif_flow
*flows
, int max_flows
)
1792 struct dpif_netdev_flow_dump_thread
*thread
1793 = dpif_netdev_flow_dump_thread_cast(thread_
);
1794 struct dpif_netdev_flow_dump
*dump
= thread
->dump
;
1795 struct dpif_netdev
*dpif
= dpif_netdev_cast(thread
->up
.dpif
);
1796 struct dp_netdev_flow
*netdev_flows
[FLOW_DUMP_MAX_BATCH
];
1797 struct dp_netdev
*dp
= get_dp_netdev(&dpif
->dpif
);
1801 ovs_mutex_lock(&dump
->mutex
);
1802 if (!dump
->status
) {
1803 for (n_flows
= 0; n_flows
< MIN(max_flows
, FLOW_DUMP_MAX_BATCH
);
1805 struct cmap_node
*node
;
1807 node
= cmap_next_position(&dp
->flow_table
, &dump
->pos
);
1812 netdev_flows
[n_flows
] = CONTAINER_OF(node
, struct dp_netdev_flow
,
1816 ovs_mutex_unlock(&dump
->mutex
);
1818 for (i
= 0; i
< n_flows
; i
++) {
1819 struct odputil_keybuf
*maskbuf
= &thread
->maskbuf
[i
];
1820 struct odputil_keybuf
*keybuf
= &thread
->keybuf
[i
];
1821 struct dp_netdev_flow
*netdev_flow
= netdev_flows
[i
];
1822 struct dpif_flow
*f
= &flows
[i
];
1823 struct dp_netdev_actions
*dp_actions
;
1824 struct flow_wildcards wc
;
1827 minimask_expand(&netdev_flow
->cr
.match
.mask
, &wc
);
1830 ofpbuf_use_stack(&buf
, keybuf
, sizeof *keybuf
);
1831 odp_flow_key_from_flow(&buf
, &netdev_flow
->flow
, &wc
.masks
,
1832 netdev_flow
->flow
.in_port
.odp_port
, true);
1833 f
->key
= ofpbuf_data(&buf
);
1834 f
->key_len
= ofpbuf_size(&buf
);
1837 ofpbuf_use_stack(&buf
, maskbuf
, sizeof *maskbuf
);
1838 odp_flow_key_from_mask(&buf
, &wc
.masks
, &netdev_flow
->flow
,
1839 odp_to_u32(wc
.masks
.in_port
.odp_port
),
1841 f
->mask
= ofpbuf_data(&buf
);
1842 f
->mask_len
= ofpbuf_size(&buf
);
1845 dp_actions
= dp_netdev_flow_get_actions(netdev_flow
);
1846 f
->actions
= dp_actions
->actions
;
1847 f
->actions_len
= dp_actions
->size
;
1850 get_dpif_flow_stats(netdev_flow
, &f
->stats
);
1857 dpif_netdev_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
1858 OVS_NO_THREAD_SAFETY_ANALYSIS
1860 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1861 struct dp_netdev_pmd_thread
*pmd
;
1862 struct dpif_packet packet
, *pp
;
1863 struct pkt_metadata
*md
= &execute
->md
;
1865 if (ofpbuf_size(execute
->packet
) < ETH_HEADER_LEN
||
1866 ofpbuf_size(execute
->packet
) > UINT16_MAX
) {
1870 packet
.ofpbuf
= *execute
->packet
;
1873 /* Tries finding the 'pmd'. If NULL is returned, that means
1874 * the current thread is a non-pmd thread and should use
1875 * dp_netdev_get_nonpmd(). */
1876 pmd
= ovsthread_getspecific(dp
->per_pmd_key
);
1878 pmd
= dp_netdev_get_nonpmd(dp
);
1881 /* If the current thread is non-pmd thread, acquires
1882 * the 'non_pmd_mutex'. */
1883 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1884 ovs_mutex_lock(&dp
->non_pmd_mutex
);
1886 dp_netdev_execute_actions(pmd
, &pp
, 1, false, md
, execute
->actions
,
1887 execute
->actions_len
);
1888 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
1889 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
1892 /* Even though may_steal is set to false, some actions could modify or
1893 * reallocate the ofpbuf memory. We need to pass those changes to the
1895 *execute
->packet
= packet
.ofpbuf
;
1901 dpif_netdev_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
)
1905 for (i
= 0; i
< n_ops
; i
++) {
1906 struct dpif_op
*op
= ops
[i
];
1909 case DPIF_OP_FLOW_PUT
:
1910 op
->error
= dpif_netdev_flow_put(dpif
, &op
->u
.flow_put
);
1913 case DPIF_OP_FLOW_DEL
:
1914 op
->error
= dpif_netdev_flow_del(dpif
, &op
->u
.flow_del
);
1917 case DPIF_OP_EXECUTE
:
1918 op
->error
= dpif_netdev_execute(dpif
, &op
->u
.execute
);
1921 case DPIF_OP_FLOW_GET
:
1922 op
->error
= dpif_netdev_flow_get(dpif
, &op
->u
.flow_get
);
1928 /* Returns true if the configuration for rx queues or cpu mask
1931 pmd_config_changed(const struct dp_netdev
*dp
, size_t rxqs
, const char *cmask
)
1933 if (dp
->n_dpdk_rxqs
!= rxqs
) {
1936 if (dp
->pmd_cmask
!= NULL
&& cmask
!= NULL
) {
1937 return strcmp(dp
->pmd_cmask
, cmask
);
1939 return (dp
->pmd_cmask
!= NULL
|| cmask
!= NULL
);
1944 /* Resets pmd threads if the configuration for 'rxq's or cpu mask changes. */
1946 dpif_netdev_pmd_set(struct dpif
*dpif
, unsigned int n_rxqs
, const char *cmask
)
1948 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
1950 if (pmd_config_changed(dp
, n_rxqs
, cmask
)) {
1951 struct dp_netdev_port
*port
;
1953 dp_netdev_destroy_all_pmds(dp
);
1955 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
1956 if (netdev_is_pmd(port
->netdev
)) {
1959 /* Closes the existing 'rxq's. */
1960 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
1961 netdev_rxq_close(port
->rxq
[i
]);
1962 port
->rxq
[i
] = NULL
;
1965 /* Sets the new rx queue config. */
1966 err
= netdev_set_multiq(port
->netdev
, ovs_numa_get_n_cores(),
1969 VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
1970 " %u", netdev_get_name(port
->netdev
),
1975 /* If the set_multiq() above succeeds, reopens the 'rxq's. */
1976 port
->rxq
= xrealloc(port
->rxq
, sizeof *port
->rxq
1977 * netdev_n_rxq(port
->netdev
));
1978 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
1979 netdev_rxq_open(port
->netdev
, &port
->rxq
[i
], i
);
1983 dp
->n_dpdk_rxqs
= n_rxqs
;
1985 /* Reconfigures the cpu mask. */
1986 ovs_numa_set_cpu_mask(cmask
);
1987 free(dp
->pmd_cmask
);
1988 dp
->pmd_cmask
= cmask
? xstrdup(cmask
) : NULL
;
1990 /* Restores the non-pmd. */
1991 dp_netdev_set_nonpmd(dp
);
1992 /* Restores all pmd threads. */
1993 dp_netdev_reset_pmd_threads(dp
);
2000 dpif_netdev_queue_to_priority(const struct dpif
*dpif OVS_UNUSED
,
2001 uint32_t queue_id
, uint32_t *priority
)
2003 *priority
= queue_id
;
2008 /* Creates and returns a new 'struct dp_netdev_actions', with a reference count
2009 * of 1, whose actions are a copy of from the 'ofpacts_len' bytes of
2011 struct dp_netdev_actions
*
2012 dp_netdev_actions_create(const struct nlattr
*actions
, size_t size
)
2014 struct dp_netdev_actions
*netdev_actions
;
2016 netdev_actions
= xmalloc(sizeof *netdev_actions
);
2017 netdev_actions
->actions
= xmemdup(actions
, size
);
2018 netdev_actions
->size
= size
;
2020 return netdev_actions
;
2023 struct dp_netdev_actions
*
2024 dp_netdev_flow_get_actions(const struct dp_netdev_flow
*flow
)
2026 return ovsrcu_get(struct dp_netdev_actions
*, &flow
->actions
);
2030 dp_netdev_actions_free(struct dp_netdev_actions
*actions
)
2032 free(actions
->actions
);
2038 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread
*pmd
,
2039 struct dp_netdev_port
*port
,
2040 struct netdev_rxq
*rxq
)
2042 struct dpif_packet
*packets
[NETDEV_MAX_RX_BATCH
];
2045 error
= netdev_rxq_recv(rxq
, packets
, &cnt
);
2047 struct pkt_metadata md
= PKT_METADATA_INITIALIZER(port
->port_no
);
2049 *recirc_depth_get() = 0;
2050 dp_netdev_input(pmd
, packets
, cnt
, &md
);
2051 } else if (error
!= EAGAIN
&& error
!= EOPNOTSUPP
) {
2052 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2054 VLOG_ERR_RL(&rl
, "error receiving data from %s: %s",
2055 netdev_get_name(port
->netdev
), ovs_strerror(error
));
2060 dpif_netdev_run(struct dpif
*dpif
)
2062 struct dp_netdev_port
*port
;
2063 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2064 struct dp_netdev_pmd_thread
*non_pmd
= dp_netdev_get_nonpmd(dp
);
2066 ovs_mutex_lock(&dp
->non_pmd_mutex
);
2067 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2068 if (!netdev_is_pmd(port
->netdev
)) {
2071 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2072 dp_netdev_process_rxq_port(non_pmd
, port
, port
->rxq
[i
]);
2076 ovs_mutex_unlock(&dp
->non_pmd_mutex
);
2080 dpif_netdev_wait(struct dpif
*dpif
)
2082 struct dp_netdev_port
*port
;
2083 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2085 ovs_mutex_lock(&dp_netdev_mutex
);
2086 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2087 if (!netdev_is_pmd(port
->netdev
)) {
2090 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2091 netdev_rxq_wait(port
->rxq
[i
]);
2095 ovs_mutex_unlock(&dp_netdev_mutex
);
2099 struct dp_netdev_port
*port
;
2100 struct netdev_rxq
*rx
;
2104 pmd_load_queues(struct dp_netdev_pmd_thread
*pmd
,
2105 struct rxq_poll
**ppoll_list
, int poll_cnt
)
2107 struct rxq_poll
*poll_list
= *ppoll_list
;
2108 struct dp_netdev_port
*port
;
2109 int n_pmds_on_numa
, index
, i
;
2111 /* Simple scheduler for netdev rx polling. */
2112 for (i
= 0; i
< poll_cnt
; i
++) {
2113 port_unref(poll_list
[i
].port
);
2117 n_pmds_on_numa
= get_n_pmd_threads_on_numa(pmd
->dp
, pmd
->numa_id
);
2120 CMAP_FOR_EACH (port
, node
, &pmd
->dp
->ports
) {
2121 /* Calls port_try_ref() to prevent the main thread
2122 * from deleting the port. */
2123 if (port_try_ref(port
)) {
2124 if (netdev_is_pmd(port
->netdev
)
2125 && netdev_get_numa_id(port
->netdev
) == pmd
->numa_id
) {
2128 for (i
= 0; i
< netdev_n_rxq(port
->netdev
); i
++) {
2129 if ((index
% n_pmds_on_numa
) == pmd
->index
) {
2130 poll_list
= xrealloc(poll_list
,
2131 sizeof *poll_list
* (poll_cnt
+ 1));
2134 poll_list
[poll_cnt
].port
= port
;
2135 poll_list
[poll_cnt
].rx
= port
->rxq
[i
];
2141 /* Unrefs the port_try_ref(). */
2146 *ppoll_list
= poll_list
;
2151 pmd_thread_main(void *f_
)
2153 struct dp_netdev_pmd_thread
*pmd
= f_
;
2154 unsigned int lc
= 0;
2155 struct rxq_poll
*poll_list
;
2156 unsigned int port_seq
= PMD_INITIAL_SEQ
;
2163 /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
2164 ovsthread_setspecific(pmd
->dp
->per_pmd_key
, pmd
);
2165 pmd_thread_setaffinity_cpu(pmd
->core_id
);
2167 emc_cache_init(&pmd
->flow_cache
);
2168 poll_cnt
= pmd_load_queues(pmd
, &poll_list
, poll_cnt
);
2173 for (i
= 0; i
< poll_cnt
; i
++) {
2174 dp_netdev_process_rxq_port(pmd
, poll_list
[i
].port
, poll_list
[i
].rx
);
2184 atomic_read_relaxed(&pmd
->change_seq
, &seq
);
2185 if (seq
!= port_seq
) {
2192 emc_cache_uninit(&pmd
->flow_cache
);
2194 if (!latch_is_set(&pmd
->exit_latch
)){
2198 for (i
= 0; i
< poll_cnt
; i
++) {
2199 port_unref(poll_list
[i
].port
);
2207 dp_netdev_disable_upcall(struct dp_netdev
*dp
)
2208 OVS_ACQUIRES(dp
->upcall_rwlock
)
2210 fat_rwlock_wrlock(&dp
->upcall_rwlock
);
2214 dpif_netdev_disable_upcall(struct dpif
*dpif
)
2215 OVS_NO_THREAD_SAFETY_ANALYSIS
2217 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2218 dp_netdev_disable_upcall(dp
);
2222 dp_netdev_enable_upcall(struct dp_netdev
*dp
)
2223 OVS_RELEASES(dp
->upcall_rwlock
)
2225 fat_rwlock_unlock(&dp
->upcall_rwlock
);
2229 dpif_netdev_enable_upcall(struct dpif
*dpif
)
2230 OVS_NO_THREAD_SAFETY_ANALYSIS
2232 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2233 dp_netdev_enable_upcall(dp
);
2236 /* Returns the pointer to the dp_netdev_pmd_thread for non-pmd threads. */
2237 static struct dp_netdev_pmd_thread
*
2238 dp_netdev_get_nonpmd(struct dp_netdev
*dp
)
2240 struct dp_netdev_pmd_thread
*pmd
;
2241 struct cmap_node
*pnode
;
2243 pnode
= cmap_find(&dp
->poll_threads
, hash_int(NON_PMD_CORE_ID
, 0));
2245 pmd
= CONTAINER_OF(pnode
, struct dp_netdev_pmd_thread
, node
);
2250 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
2252 dp_netdev_set_nonpmd(struct dp_netdev
*dp
)
2254 struct dp_netdev_pmd_thread
*non_pmd
;
2256 non_pmd
= xzalloc(sizeof *non_pmd
);
2257 dp_netdev_configure_pmd(non_pmd
, dp
, 0, NON_PMD_CORE_ID
,
2261 /* Configures the 'pmd' based on the input argument. */
2263 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread
*pmd
, struct dp_netdev
*dp
,
2264 int index
, int core_id
, int numa_id
)
2268 pmd
->core_id
= core_id
;
2269 pmd
->numa_id
= numa_id
;
2270 latch_init(&pmd
->exit_latch
);
2271 atomic_init(&pmd
->change_seq
, PMD_INITIAL_SEQ
);
2272 /* init the 'flow_cache' since there is no
2273 * actual thread created for NON_PMD_CORE_ID. */
2274 if (core_id
== NON_PMD_CORE_ID
) {
2275 emc_cache_init(&pmd
->flow_cache
);
2277 cmap_insert(&dp
->poll_threads
, CONST_CAST(struct cmap_node
*, &pmd
->node
),
2278 hash_int(core_id
, 0));
2281 /* Stops the pmd thread, removes it from the 'dp->poll_threads'
2282 * and destroys the struct. */
2284 dp_netdev_del_pmd(struct dp_netdev_pmd_thread
*pmd
)
2286 /* Uninit the 'flow_cache' since there is
2287 * no actual thread uninit it. */
2288 if (pmd
->core_id
== NON_PMD_CORE_ID
) {
2289 emc_cache_uninit(&pmd
->flow_cache
);
2291 latch_set(&pmd
->exit_latch
);
2292 dp_netdev_reload_pmd__(pmd
);
2293 ovs_numa_unpin_core(pmd
->core_id
);
2294 xpthread_join(pmd
->thread
, NULL
);
2296 cmap_remove(&pmd
->dp
->poll_threads
, &pmd
->node
, hash_int(pmd
->core_id
, 0));
2297 latch_destroy(&pmd
->exit_latch
);
2301 /* Destroys all pmd threads. */
2303 dp_netdev_destroy_all_pmds(struct dp_netdev
*dp
)
2305 struct dp_netdev_pmd_thread
*pmd
;
2307 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2308 dp_netdev_del_pmd(pmd
);
2312 /* Deletes all pmd threads on numa node 'numa_id'. */
2314 dp_netdev_del_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2316 struct dp_netdev_pmd_thread
*pmd
;
2318 CMAP_FOR_EACH (pmd
, node
, &dp
->poll_threads
) {
2319 if (pmd
->numa_id
== numa_id
) {
2320 dp_netdev_del_pmd(pmd
);
2325 /* Checks the numa node id of 'netdev' and starts pmd threads for
2328 dp_netdev_set_pmds_on_numa(struct dp_netdev
*dp
, int numa_id
)
2332 if (!ovs_numa_numa_id_is_valid(numa_id
)) {
2333 VLOG_ERR("Cannot create pmd threads due to numa id (%d)"
2334 "invalid", numa_id
);
2338 n_pmds
= get_n_pmd_threads_on_numa(dp
, numa_id
);
2340 /* If there are already pmd threads created for the numa node
2341 * in which 'netdev' is on, do nothing. Else, creates the
2342 * pmd threads for the numa node. */
2344 int can_have
, n_unpinned
, i
;
2346 n_unpinned
= ovs_numa_get_n_unpinned_cores_on_numa(numa_id
);
2348 VLOG_ERR("Cannot create pmd threads due to out of unpinned "
2349 "cores on numa node");
2353 /* If cpu mask is specified, uses all unpinned cores, otherwise
2354 * tries creating NR_PMD_THREADS pmd threads. */
2355 can_have
= dp
->pmd_cmask
? n_unpinned
: MIN(n_unpinned
, NR_PMD_THREADS
);
2356 for (i
= 0; i
< can_have
; i
++) {
2357 struct dp_netdev_pmd_thread
*pmd
= xzalloc(sizeof *pmd
);
2358 int core_id
= ovs_numa_get_unpinned_core_on_numa(numa_id
);
2360 dp_netdev_configure_pmd(pmd
, dp
, i
, core_id
, numa_id
);
2361 /* Each thread will distribute all devices rx-queues among
2363 pmd
->thread
= ovs_thread_create("pmd", pmd_thread_main
, pmd
);
2365 VLOG_INFO("Created %d pmd threads on numa node %d", can_have
, numa_id
);
2371 dp_netdev_flow_stats_new_cb(void)
2373 struct dp_netdev_flow_stats
*bucket
= xzalloc_cacheline(sizeof *bucket
);
2374 ovs_mutex_init(&bucket
->mutex
);
2378 /* Called after pmd threads config change. Restarts pmd threads with
2379 * new configuration. */
2381 dp_netdev_reset_pmd_threads(struct dp_netdev
*dp
)
2383 struct dp_netdev_port
*port
;
2385 CMAP_FOR_EACH (port
, node
, &dp
->ports
) {
2386 if (netdev_is_pmd(port
->netdev
)) {
2387 int numa_id
= netdev_get_numa_id(port
->netdev
);
2389 dp_netdev_set_pmds_on_numa(dp
, numa_id
);
2395 dp_netdev_flow_used(struct dp_netdev_flow
*netdev_flow
,
2399 long long int now
= time_msec();
2400 struct dp_netdev_flow_stats
*bucket
;
2402 bucket
= ovsthread_stats_bucket_get(&netdev_flow
->stats
,
2403 dp_netdev_flow_stats_new_cb
);
2405 ovs_mutex_lock(&bucket
->mutex
);
2406 bucket
->used
= MAX(now
, bucket
->used
);
2407 bucket
->packet_count
+= cnt
;
2408 bucket
->byte_count
+= size
;
2409 bucket
->tcp_flags
|= tcp_flags
;
2410 ovs_mutex_unlock(&bucket
->mutex
);
2414 dp_netdev_stats_new_cb(void)
2416 struct dp_netdev_stats
*bucket
= xzalloc_cacheline(sizeof *bucket
);
2417 ovs_mutex_init(&bucket
->mutex
);
2422 dp_netdev_count_packet(struct dp_netdev
*dp
, enum dp_stat_type type
, int cnt
)
2424 struct dp_netdev_stats
*bucket
;
2426 bucket
= ovsthread_stats_bucket_get(&dp
->stats
, dp_netdev_stats_new_cb
);
2427 ovs_mutex_lock(&bucket
->mutex
);
2428 bucket
->n
[type
] += cnt
;
2429 ovs_mutex_unlock(&bucket
->mutex
);
2433 dp_netdev_upcall(struct dp_netdev
*dp
, struct dpif_packet
*packet_
,
2434 struct flow
*flow
, struct flow_wildcards
*wc
,
2435 enum dpif_upcall_type type
, const struct nlattr
*userdata
,
2436 struct ofpbuf
*actions
, struct ofpbuf
*put_actions
)
2438 struct ofpbuf
*packet
= &packet_
->ofpbuf
;
2440 if (type
== DPIF_UC_MISS
) {
2441 dp_netdev_count_packet(dp
, DP_STAT_MISS
, 1);
2444 if (OVS_UNLIKELY(!dp
->upcall_cb
)) {
2448 if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl
))) {
2449 struct ds ds
= DS_EMPTY_INITIALIZER
;
2453 ofpbuf_init(&key
, 0);
2454 odp_flow_key_from_flow(&key
, flow
, &wc
->masks
, flow
->in_port
.odp_port
,
2457 packet_str
= ofp_packet_to_string(ofpbuf_data(packet
),
2458 ofpbuf_size(packet
));
2460 odp_flow_key_format(ofpbuf_data(&key
), ofpbuf_size(&key
), &ds
);
2462 VLOG_DBG("%s: %s upcall:\n%s\n%s", dp
->name
,
2463 dpif_upcall_type_to_string(type
), ds_cstr(&ds
), packet_str
);
2465 ofpbuf_uninit(&key
);
2470 return dp
->upcall_cb(packet
, flow
, type
, userdata
, actions
, wc
,
2471 put_actions
, dp
->upcall_aux
);
2474 static inline uint32_t
2475 dpif_netdev_packet_get_dp_hash(struct dpif_packet
*packet
,
2476 const struct miniflow
*mf
)
2480 hash
= dpif_packet_get_dp_hash(packet
);
2481 if (OVS_UNLIKELY(!hash
)) {
2482 hash
= miniflow_hash_5tuple(mf
, 0);
2483 dpif_packet_set_dp_hash(packet
, hash
);
2488 struct packet_batch
{
2489 unsigned int packet_count
;
2490 unsigned int byte_count
;
2493 struct dp_netdev_flow
*flow
;
2495 struct dpif_packet
*packets
[NETDEV_MAX_RX_BATCH
];
2496 struct pkt_metadata md
;
2500 packet_batch_update(struct packet_batch
*batch
, struct dpif_packet
*packet
,
2501 const struct miniflow
*mf
)
2503 batch
->tcp_flags
|= miniflow_get_tcp_flags(mf
);
2504 batch
->packets
[batch
->packet_count
++] = packet
;
2505 batch
->byte_count
+= ofpbuf_size(&packet
->ofpbuf
);
2509 packet_batch_init(struct packet_batch
*batch
, struct dp_netdev_flow
*flow
,
2510 struct pkt_metadata
*md
)
2515 batch
->packet_count
= 0;
2516 batch
->byte_count
= 0;
2517 batch
->tcp_flags
= 0;
2521 packet_batch_execute(struct packet_batch
*batch
,
2522 struct dp_netdev_pmd_thread
*pmd
)
2524 struct dp_netdev_actions
*actions
;
2525 struct dp_netdev_flow
*flow
= batch
->flow
;
2527 dp_netdev_flow_used(batch
->flow
, batch
->packet_count
, batch
->byte_count
,
2530 actions
= dp_netdev_flow_get_actions(flow
);
2532 dp_netdev_execute_actions(pmd
, batch
->packets
, batch
->packet_count
, true,
2533 &batch
->md
, actions
->actions
, actions
->size
);
2535 dp_netdev_count_packet(pmd
->dp
, DP_STAT_HIT
, batch
->packet_count
);
2539 dp_netdev_queue_batches(struct dpif_packet
*pkt
, struct pkt_metadata
*md
,
2540 struct dp_netdev_flow
*flow
, const struct miniflow
*mf
,
2541 struct packet_batch
*batches
, size_t *n_batches
,
2544 struct packet_batch
*batch
= NULL
;
2547 if (OVS_UNLIKELY(!flow
)) {
2550 /* XXX: This O(n^2) algortihm makes sense if we're operating under the
2551 * assumption that the number of distinct flows (and therefore the
2552 * number of distinct batches) is quite small. If this turns out not
2553 * to be the case, it may make sense to pre sort based on the
2554 * netdev_flow pointer. That done we can get the appropriate batching
2555 * in O(n * log(n)) instead. */
2556 for (j
= *n_batches
- 1; j
>= 0; j
--) {
2557 if (batches
[j
].flow
== flow
) {
2558 batch
= &batches
[j
];
2559 packet_batch_update(batch
, pkt
, mf
);
2563 if (OVS_UNLIKELY(*n_batches
>= max_batches
)) {
2567 batch
= &batches
[(*n_batches
)++];
2568 packet_batch_init(batch
, flow
, md
);
2569 packet_batch_update(batch
, pkt
, mf
);
2574 dpif_packet_swap(struct dpif_packet
**a
, struct dpif_packet
**b
)
2576 struct dpif_packet
*tmp
= *a
;
2581 /* Try to process all ('cnt') the 'packets' using only the exact match cache
2582 * 'flow_cache'. If a flow is not found for a packet 'packets[i]', or if there
2583 * is no matching batch for a packet's flow, the miniflow is copied into 'keys'
2584 * and the packet pointer is moved at the beginning of the 'packets' array.
2586 * The function returns the number of packets that needs to be processed in the
2587 * 'packets' array (they have been moved to the beginning of the vector).
2589 static inline size_t
2590 emc_processing(struct dp_netdev_pmd_thread
*pmd
, struct dpif_packet
**packets
,
2591 size_t cnt
, struct pkt_metadata
*md
,
2592 struct netdev_flow_key
*keys
)
2594 struct netdev_flow_key key
;
2595 struct packet_batch batches
[4];
2596 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
2597 size_t n_batches
, i
;
2598 size_t notfound_cnt
= 0;
2601 miniflow_initialize(&key
.flow
, key
.buf
);
2602 for (i
= 0; i
< cnt
; i
++) {
2603 struct dp_netdev_flow
*flow
;
2606 if (OVS_UNLIKELY(ofpbuf_size(&packets
[i
]->ofpbuf
) < ETH_HEADER_LEN
)) {
2607 dpif_packet_delete(packets
[i
]);
2611 miniflow_extract(&packets
[i
]->ofpbuf
, md
, &key
.flow
);
2613 hash
= dpif_netdev_packet_get_dp_hash(packets
[i
], &key
.flow
);
2615 flow
= emc_lookup(flow_cache
, &key
.flow
, hash
);
2616 if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets
[i
], md
,
2618 batches
, &n_batches
,
2619 ARRAY_SIZE(batches
)))) {
2620 if (i
!= notfound_cnt
) {
2621 dpif_packet_swap(&packets
[i
], &packets
[notfound_cnt
]);
2624 keys
[notfound_cnt
++] = key
;
2628 for (i
= 0; i
< n_batches
; i
++) {
2629 packet_batch_execute(&batches
[i
], pmd
);
2632 return notfound_cnt
;
2636 fast_path_processing(struct dp_netdev_pmd_thread
*pmd
,
2637 struct dpif_packet
**packets
, size_t cnt
,
2638 struct pkt_metadata
*md
, struct netdev_flow_key
*keys
)
2640 #if !defined(__CHECKER__) && !defined(_WIN32)
2641 const size_t PKT_ARRAY_SIZE
= cnt
;
2643 /* Sparse or MSVC doesn't like variable length array. */
2644 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_RX_BATCH
};
2646 struct packet_batch batches
[PKT_ARRAY_SIZE
];
2647 const struct miniflow
*mfs
[PKT_ARRAY_SIZE
]; /* NULL at bad packets. */
2648 struct cls_rule
*rules
[PKT_ARRAY_SIZE
];
2649 struct dp_netdev
*dp
= pmd
->dp
;
2650 struct emc_cache
*flow_cache
= &pmd
->flow_cache
;
2651 size_t n_batches
, i
;
2654 for (i
= 0; i
< cnt
; i
++) {
2655 mfs
[i
] = &keys
[i
].flow
;
2657 any_miss
= !classifier_lookup_miniflow_batch(&dp
->cls
, mfs
, rules
, cnt
);
2658 if (OVS_UNLIKELY(any_miss
) && !fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
2659 uint64_t actions_stub
[512 / 8], slow_stub
[512 / 8];
2660 struct ofpbuf actions
, put_actions
;
2663 ofpbuf_use_stub(&actions
, actions_stub
, sizeof actions_stub
);
2664 ofpbuf_use_stub(&put_actions
, slow_stub
, sizeof slow_stub
);
2666 for (i
= 0; i
< cnt
; i
++) {
2667 const struct dp_netdev_flow
*netdev_flow
;
2668 struct ofpbuf
*add_actions
;
2671 if (OVS_LIKELY(rules
[i
] || !mfs
[i
])) {
2675 /* It's possible that an earlier slow path execution installed
2676 * the rule this flow needs. In this case, it's a lot cheaper
2677 * to catch it here than execute a miss. */
2678 netdev_flow
= dp_netdev_lookup_flow(dp
, mfs
[i
]);
2680 rules
[i
] = CONST_CAST(struct cls_rule
*, &netdev_flow
->cr
);
2684 miniflow_expand(mfs
[i
], &match
.flow
);
2686 ofpbuf_clear(&actions
);
2687 ofpbuf_clear(&put_actions
);
2689 error
= dp_netdev_upcall(dp
, packets
[i
], &match
.flow
, &match
.wc
,
2690 DPIF_UC_MISS
, NULL
, &actions
,
2692 if (OVS_UNLIKELY(error
&& error
!= ENOSPC
)) {
2696 /* We can't allow the packet batching in the next loop to execute
2697 * the actions. Otherwise, if there are any slow path actions,
2698 * we'll send the packet up twice. */
2699 dp_netdev_execute_actions(pmd
, &packets
[i
], 1, true, md
,
2700 ofpbuf_data(&actions
),
2701 ofpbuf_size(&actions
));
2703 add_actions
= ofpbuf_size(&put_actions
)
2707 ovs_mutex_lock(&dp
->flow_mutex
);
2708 /* XXX: There's a brief race where this flow could have already
2709 * been installed since we last did the flow lookup. This could be
2710 * solved by moving the mutex lock outside the loop, but that's an
2711 * awful long time to be locking everyone out of making flow
2712 * installs. If we move to a per-core classifier, it would be
2714 if (OVS_LIKELY(error
!= ENOSPC
)
2715 && !dp_netdev_lookup_flow(dp
, mfs
[i
])) {
2716 dp_netdev_flow_add(dp
, &match
, ofpbuf_data(add_actions
),
2717 ofpbuf_size(add_actions
));
2719 ovs_mutex_unlock(&dp
->flow_mutex
);
2722 ofpbuf_uninit(&actions
);
2723 ofpbuf_uninit(&put_actions
);
2724 fat_rwlock_unlock(&dp
->upcall_rwlock
);
2725 } else if (OVS_UNLIKELY(any_miss
)) {
2726 int dropped_cnt
= 0;
2728 for (i
= 0; i
< cnt
; i
++) {
2729 if (OVS_UNLIKELY(!rules
[i
] && mfs
[i
])) {
2730 dpif_packet_delete(packets
[i
]);
2735 dp_netdev_count_packet(dp
, DP_STAT_LOST
, dropped_cnt
);
2739 for (i
= 0; i
< cnt
; i
++) {
2740 struct dpif_packet
*packet
= packets
[i
];
2741 struct dp_netdev_flow
*flow
;
2743 if (OVS_UNLIKELY(!rules
[i
] || !mfs
[i
])) {
2747 flow
= dp_netdev_flow_cast(rules
[i
]);
2748 emc_insert(flow_cache
, mfs
[i
], dpif_packet_get_dp_hash(packet
),
2750 dp_netdev_queue_batches(packet
, md
, flow
, mfs
[i
], batches
, &n_batches
,
2751 ARRAY_SIZE(batches
));
2754 for (i
= 0; i
< n_batches
; i
++) {
2755 packet_batch_execute(&batches
[i
], pmd
);
2760 dp_netdev_input(struct dp_netdev_pmd_thread
*pmd
,
2761 struct dpif_packet
**packets
, int cnt
, struct pkt_metadata
*md
)
2763 #if !defined(__CHECKER__) && !defined(_WIN32)
2764 const size_t PKT_ARRAY_SIZE
= cnt
;
2766 /* Sparse or MSVC doesn't like variable length array. */
2767 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_RX_BATCH
};
2769 struct netdev_flow_key keys
[PKT_ARRAY_SIZE
];
2772 newcnt
= emc_processing(pmd
, packets
, cnt
, md
, keys
);
2773 if (OVS_UNLIKELY(newcnt
)) {
2774 fast_path_processing(pmd
, packets
, newcnt
, md
, keys
);
2778 struct dp_netdev_execute_aux
{
2779 struct dp_netdev_pmd_thread
*pmd
;
2783 dpif_netdev_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
,
2786 struct dp_netdev
*dp
= get_dp_netdev(dpif
);
2787 dp
->upcall_aux
= aux
;
2792 dp_netdev_drop_packets(struct dpif_packet
** packets
, int cnt
, bool may_steal
)
2797 for (i
= 0; i
< cnt
; i
++) {
2798 dpif_packet_delete(packets
[i
]);
2804 dp_execute_cb(void *aux_
, struct dpif_packet
**packets
, int cnt
,
2805 struct pkt_metadata
*md
,
2806 const struct nlattr
*a
, bool may_steal
)
2807 OVS_NO_THREAD_SAFETY_ANALYSIS
2809 struct dp_netdev_execute_aux
*aux
= aux_
;
2810 uint32_t *depth
= recirc_depth_get();
2811 struct dp_netdev_pmd_thread
*pmd
= aux
->pmd
;
2812 struct dp_netdev
*dp
= pmd
->dp
;
2813 int type
= nl_attr_type(a
);
2814 struct dp_netdev_port
*p
;
2817 switch ((enum ovs_action_attr
)type
) {
2818 case OVS_ACTION_ATTR_OUTPUT
:
2819 p
= dp_netdev_lookup_port(dp
, u32_to_odp(nl_attr_get_u32(a
)));
2820 if (OVS_LIKELY(p
)) {
2821 netdev_send(p
->netdev
, pmd
->core_id
, packets
, cnt
, may_steal
);
2826 case OVS_ACTION_ATTR_USERSPACE
:
2827 if (!fat_rwlock_tryrdlock(&dp
->upcall_rwlock
)) {
2828 const struct nlattr
*userdata
;
2829 struct ofpbuf actions
;
2832 userdata
= nl_attr_find_nested(a
, OVS_USERSPACE_ATTR_USERDATA
);
2833 ofpbuf_init(&actions
, 0);
2835 for (i
= 0; i
< cnt
; i
++) {
2838 ofpbuf_clear(&actions
);
2840 flow_extract(&packets
[i
]->ofpbuf
, md
, &flow
);
2841 error
= dp_netdev_upcall(dp
, packets
[i
], &flow
, NULL
,
2842 DPIF_UC_ACTION
, userdata
, &actions
,
2844 if (!error
|| error
== ENOSPC
) {
2845 dp_netdev_execute_actions(pmd
, &packets
[i
], 1, may_steal
,
2846 md
, ofpbuf_data(&actions
),
2847 ofpbuf_size(&actions
));
2848 } else if (may_steal
) {
2849 dpif_packet_delete(packets
[i
]);
2852 ofpbuf_uninit(&actions
);
2853 fat_rwlock_unlock(&dp
->upcall_rwlock
);
2859 case OVS_ACTION_ATTR_HASH
: {
2860 const struct ovs_action_hash
*hash_act
;
2863 hash_act
= nl_attr_get(a
);
2865 for (i
= 0; i
< cnt
; i
++) {
2867 if (hash_act
->hash_alg
== OVS_HASH_ALG_L4
) {
2868 /* Hash need not be symmetric, nor does it need to include
2870 hash
= hash_2words(dpif_packet_get_dp_hash(packets
[i
]),
2871 hash_act
->hash_basis
);
2873 VLOG_WARN("Unknown hash algorithm specified "
2874 "for the hash action.");
2879 hash
= 1; /* 0 is not valid */
2885 dpif_packet_set_dp_hash(packets
[i
], hash
);
2890 case OVS_ACTION_ATTR_RECIRC
:
2891 if (*depth
< MAX_RECIRC_DEPTH
) {
2894 for (i
= 0; i
< cnt
; i
++) {
2895 struct dpif_packet
*recirc_pkt
;
2896 struct pkt_metadata recirc_md
= *md
;
2898 recirc_pkt
= (may_steal
) ? packets
[i
]
2899 : dpif_packet_clone(packets
[i
]);
2901 recirc_md
.recirc_id
= nl_attr_get_u32(a
);
2903 /* Hash is private to each packet */
2904 recirc_md
.dp_hash
= dpif_packet_get_dp_hash(packets
[i
]);
2906 dp_netdev_input(pmd
, &recirc_pkt
, 1,
2914 VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
2917 case OVS_ACTION_ATTR_PUSH_VLAN
:
2918 case OVS_ACTION_ATTR_POP_VLAN
:
2919 case OVS_ACTION_ATTR_PUSH_MPLS
:
2920 case OVS_ACTION_ATTR_POP_MPLS
:
2921 case OVS_ACTION_ATTR_SET
:
2922 case OVS_ACTION_ATTR_SET_MASKED
:
2923 case OVS_ACTION_ATTR_SAMPLE
:
2924 case OVS_ACTION_ATTR_UNSPEC
:
2925 case __OVS_ACTION_ATTR_MAX
:
2929 dp_netdev_drop_packets(packets
, cnt
, may_steal
);
2933 dp_netdev_execute_actions(struct dp_netdev_pmd_thread
*pmd
,
2934 struct dpif_packet
**packets
, int cnt
,
2935 bool may_steal
, struct pkt_metadata
*md
,
2936 const struct nlattr
*actions
, size_t actions_len
)
2938 struct dp_netdev_execute_aux aux
= {pmd
};
2940 odp_execute_actions(&aux
, packets
, cnt
, may_steal
, md
, actions
,
2941 actions_len
, dp_execute_cb
);
2944 const struct dpif_class dpif_netdev_class
= {
2946 dpif_netdev_enumerate
,
2947 dpif_netdev_port_open_type
,
2950 dpif_netdev_destroy
,
2953 dpif_netdev_get_stats
,
2954 dpif_netdev_port_add
,
2955 dpif_netdev_port_del
,
2956 dpif_netdev_port_query_by_number
,
2957 dpif_netdev_port_query_by_name
,
2958 NULL
, /* port_get_pid */
2959 dpif_netdev_port_dump_start
,
2960 dpif_netdev_port_dump_next
,
2961 dpif_netdev_port_dump_done
,
2962 dpif_netdev_port_poll
,
2963 dpif_netdev_port_poll_wait
,
2964 dpif_netdev_flow_flush
,
2965 dpif_netdev_flow_dump_create
,
2966 dpif_netdev_flow_dump_destroy
,
2967 dpif_netdev_flow_dump_thread_create
,
2968 dpif_netdev_flow_dump_thread_destroy
,
2969 dpif_netdev_flow_dump_next
,
2970 dpif_netdev_operate
,
2971 NULL
, /* recv_set */
2972 NULL
, /* handlers_set */
2973 dpif_netdev_pmd_set
,
2974 dpif_netdev_queue_to_priority
,
2976 NULL
, /* recv_wait */
2977 NULL
, /* recv_purge */
2978 dpif_netdev_register_upcall_cb
,
2979 dpif_netdev_enable_upcall
,
2980 dpif_netdev_disable_upcall
,
2984 dpif_dummy_change_port_number(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
2985 const char *argv
[], void *aux OVS_UNUSED
)
2987 struct dp_netdev_port
*old_port
;
2988 struct dp_netdev_port
*new_port
;
2989 struct dp_netdev
*dp
;
2992 ovs_mutex_lock(&dp_netdev_mutex
);
2993 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
2994 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
2995 ovs_mutex_unlock(&dp_netdev_mutex
);
2996 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
2999 ovs_refcount_ref(&dp
->ref_cnt
);
3000 ovs_mutex_unlock(&dp_netdev_mutex
);
3002 ovs_mutex_lock(&dp
->port_mutex
);
3003 if (get_port_by_name(dp
, argv
[2], &old_port
)) {
3004 unixctl_command_reply_error(conn
, "unknown port");
3008 port_no
= u32_to_odp(atoi(argv
[3]));
3009 if (!port_no
|| port_no
== ODPP_NONE
) {
3010 unixctl_command_reply_error(conn
, "bad port number");
3013 if (dp_netdev_lookup_port(dp
, port_no
)) {
3014 unixctl_command_reply_error(conn
, "port number already in use");
3018 /* Remove old port. */
3019 cmap_remove(&dp
->ports
, &old_port
->node
, hash_port_no(old_port
->port_no
));
3020 ovsrcu_postpone(free
, old_port
);
3022 /* Insert new port (cmap semantics mean we cannot re-insert 'old_port'). */
3023 new_port
= xmemdup(old_port
, sizeof *old_port
);
3024 new_port
->port_no
= port_no
;
3025 cmap_insert(&dp
->ports
, &new_port
->node
, hash_port_no(port_no
));
3027 seq_change(dp
->port_seq
);
3028 unixctl_command_reply(conn
, NULL
);
3031 ovs_mutex_unlock(&dp
->port_mutex
);
3032 dp_netdev_unref(dp
);
3036 dpif_dummy_delete_port(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3037 const char *argv
[], void *aux OVS_UNUSED
)
3039 struct dp_netdev_port
*port
;
3040 struct dp_netdev
*dp
;
3042 ovs_mutex_lock(&dp_netdev_mutex
);
3043 dp
= shash_find_data(&dp_netdevs
, argv
[1]);
3044 if (!dp
|| !dpif_netdev_class_is_dummy(dp
->class)) {
3045 ovs_mutex_unlock(&dp_netdev_mutex
);
3046 unixctl_command_reply_error(conn
, "unknown datapath or not a dummy");
3049 ovs_refcount_ref(&dp
->ref_cnt
);
3050 ovs_mutex_unlock(&dp_netdev_mutex
);
3052 ovs_mutex_lock(&dp
->port_mutex
);
3053 if (get_port_by_name(dp
, argv
[2], &port
)) {
3054 unixctl_command_reply_error(conn
, "unknown port");
3055 } else if (port
->port_no
== ODPP_LOCAL
) {
3056 unixctl_command_reply_error(conn
, "can't delete local port");
3058 do_del_port(dp
, port
);
3059 unixctl_command_reply(conn
, NULL
);
3061 ovs_mutex_unlock(&dp
->port_mutex
);
3063 dp_netdev_unref(dp
);
3067 dpif_dummy_register__(const char *type
)
3069 struct dpif_class
*class;
3071 class = xmalloc(sizeof *class);
3072 *class = dpif_netdev_class
;
3073 class->type
= xstrdup(type
);
3074 dp_register_provider(class);
3078 dpif_dummy_register(bool override
)
3085 dp_enumerate_types(&types
);
3086 SSET_FOR_EACH (type
, &types
) {
3087 if (!dp_unregister_provider(type
)) {
3088 dpif_dummy_register__(type
);
3091 sset_destroy(&types
);
3094 dpif_dummy_register__("dummy");
3096 unixctl_command_register("dpif-dummy/change-port-number",
3097 "dp port new-number",
3098 3, 3, dpif_dummy_change_port_number
, NULL
);
3099 unixctl_command_register("dpif-dummy/delete-port", "dp port",
3100 2, 2, dpif_dummy_delete_port
, NULL
);