]>
Commit | Line | Data |
---|---|---|
72865317 | 1 | /* |
d262ac2c | 2 | * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc. |
72865317 BP |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | * you may not use this file except in compliance with the License. | |
6 | * You may obtain a copy of the License at: | |
7 | * | |
8 | * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | * | |
10 | * Unless required by applicable law or agreed to in writing, software | |
11 | * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | * See the License for the specific language governing permissions and | |
14 | * limitations under the License. | |
15 | */ | |
16 | ||
17 | #include <config.h> | |
db73f716 | 18 | #include "dpif-netdev.h" |
72865317 | 19 | |
72865317 BP |
20 | #include <ctype.h> |
21 | #include <errno.h> | |
22 | #include <fcntl.h> | |
23 | #include <inttypes.h> | |
7f3adc00 | 24 | #include <net/if.h> |
7daedce4 | 25 | #include <netinet/in.h> |
cdee00fd | 26 | #include <stdint.h> |
72865317 BP |
27 | #include <stdlib.h> |
28 | #include <string.h> | |
29 | #include <sys/ioctl.h> | |
7daedce4 | 30 | #include <sys/socket.h> |
72865317 | 31 | #include <sys/stat.h> |
72865317 BP |
32 | #include <unistd.h> |
33 | ||
01961bbd DDP |
34 | #ifdef DPDK_NETDEV |
35 | #include <rte_cycles.h> | |
36 | #endif | |
37 | ||
9f861c91 | 38 | #include "bitmap.h" |
59e6d833 | 39 | #include "cmap.h" |
5cf3edb3 | 40 | #include "conntrack.h" |
7daedce4 | 41 | #include "coverage.h" |
4d4e68ed | 42 | #include "ct-dpif.h" |
72865317 | 43 | #include "csum.h" |
e14deea0 | 44 | #include "dp-packet.h" |
614c4892 | 45 | #include "dpif.h" |
72865317 | 46 | #include "dpif-provider.h" |
614c4892 | 47 | #include "dummy.h" |
afae68b1 | 48 | #include "fat-rwlock.h" |
72865317 | 49 | #include "flow.h" |
762d146a | 50 | #include "hmapx.h" |
6c3eee82 | 51 | #include "latch.h" |
72865317 | 52 | #include "netdev.h" |
de281153 | 53 | #include "netdev-vport.h" |
cdee00fd | 54 | #include "netlink.h" |
f094af7b | 55 | #include "odp-execute.h" |
72865317 | 56 | #include "odp-util.h" |
25d436fb BW |
57 | #include "openvswitch/dynamic-string.h" |
58 | #include "openvswitch/list.h" | |
59 | #include "openvswitch/match.h" | |
60 | #include "openvswitch/ofp-print.h" | |
3eb67853 | 61 | #include "openvswitch/ofp-util.h" |
64c96779 | 62 | #include "openvswitch/ofpbuf.h" |
3eb67853 | 63 | #include "openvswitch/shash.h" |
25d436fb | 64 | #include "openvswitch/vlog.h" |
5a034064 | 65 | #include "ovs-numa.h" |
61e7deb1 | 66 | #include "ovs-rcu.h" |
72865317 BP |
67 | #include "packets.h" |
68 | #include "poll-loop.h" | |
0de8783a | 69 | #include "pvector.h" |
26c6b6cd | 70 | #include "random.h" |
d33ed218 | 71 | #include "seq.h" |
3eb67853 | 72 | #include "smap.h" |
0cbfe35d | 73 | #include "sset.h" |
72865317 | 74 | #include "timeval.h" |
53902038 | 75 | #include "tnl-neigh-cache.h" |
7f9b8504 | 76 | #include "tnl-ports.h" |
74cc3969 | 77 | #include "unixctl.h" |
72865317 | 78 | #include "util.h" |
7daedce4 | 79 | |
d98e6007 | 80 | VLOG_DEFINE_THIS_MODULE(dpif_netdev); |
72865317 | 81 | |
8bb113da | 82 | #define FLOW_DUMP_MAX_BATCH 50 |
adcf00ba AZ |
83 | /* Use per thread recirc_depth to prevent recirculation loop. */ |
84 | #define MAX_RECIRC_DEPTH 5 | |
85 | DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0) | |
e4cfed38 | 86 | |
72865317 | 87 | /* Configuration parameters. */ |
72865317 BP |
88 | enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */ |
89 | ||
8a4e3a85 BP |
90 | /* Protects against changes to 'dp_netdevs'. */ |
91 | static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER; | |
92 | ||
93 | /* Contains all 'struct dp_netdev's. */ | |
94 | static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex) | |
95 | = SHASH_INITIALIZER(&dp_netdevs); | |
96 | ||
623540e4 | 97 | static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600); |
6b31e073 | 98 | |
5cf3edb3 DDP |
99 | #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \ |
100 | | CS_INVALID | CS_REPLY_DIR | CS_TRACKED) | |
101 | #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK) | |
102 | ||
2494ccd7 JS |
103 | static struct odp_support dp_netdev_support = { |
104 | .max_mpls_depth = SIZE_MAX, | |
105 | .recirc = true, | |
5cf3edb3 DDP |
106 | .ct_state = true, |
107 | .ct_zone = true, | |
108 | .ct_mark = true, | |
109 | .ct_label = true, | |
2494ccd7 JS |
110 | }; |
111 | ||
79df317f | 112 | /* Stores a miniflow with inline values */ |
9bbf1c3d | 113 | |
9bbf1c3d | 114 | struct netdev_flow_key { |
caeb4906 JR |
115 | uint32_t hash; /* Hash function differs for different users. */ |
116 | uint32_t len; /* Length of the following miniflow (incl. map). */ | |
0de8783a | 117 | struct miniflow mf; |
8fd47924 | 118 | uint64_t buf[FLOW_MAX_PACKET_U64S]; |
9bbf1c3d DDP |
119 | }; |
120 | ||
121 | /* Exact match cache for frequently used flows | |
122 | * | |
123 | * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to | |
124 | * search its entries for a miniflow that matches exactly the miniflow of the | |
0de8783a | 125 | * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow. |
9bbf1c3d DDP |
126 | * |
127 | * A cache entry holds a reference to its 'dp_netdev_flow'. | |
128 | * | |
129 | * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different | |
130 | * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of | |
131 | * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each | |
132 | * value is the index of a cache entry where the miniflow could be. | |
133 | * | |
134 | * | |
135 | * Thread-safety | |
136 | * ============= | |
137 | * | |
138 | * Each pmd_thread has its own private exact match cache. | |
139 | * If dp_netdev_input is not called from a pmd thread, a mutex is used. | |
140 | */ | |
141 | ||
fc82e877 | 142 | #define EM_FLOW_HASH_SHIFT 13 |
9bbf1c3d DDP |
143 | #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT) |
144 | #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1) | |
145 | #define EM_FLOW_HASH_SEGS 2 | |
146 | ||
147 | struct emc_entry { | |
9bbf1c3d | 148 | struct dp_netdev_flow *flow; |
0de8783a | 149 | struct netdev_flow_key key; /* key.hash used for emc hash value. */ |
9bbf1c3d DDP |
150 | }; |
151 | ||
152 | struct emc_cache { | |
153 | struct emc_entry entries[EM_FLOW_HASH_ENTRIES]; | |
67ad54cb | 154 | int sweep_idx; /* For emc_cache_slow_sweep(). */ |
9bbf1c3d DDP |
155 | }; |
156 | ||
157 | /* Iterate in the exact match cache through every entry that might contain a | |
158 | * miniflow with hash 'HASH'. */ | |
159 | #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \ | |
160 | for (uint32_t i__ = 0, srch_hash__ = (HASH); \ | |
161 | (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \ | |
162 | i__ < EM_FLOW_HASH_SEGS; \ | |
163 | i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT) | |
0de8783a JR |
164 | \f |
165 | /* Simple non-wildcarding single-priority classifier. */ | |
166 | ||
3453b4d6 JS |
167 | /* Time in ms between successive optimizations of the dpcls subtable vector */ |
168 | #define DPCLS_OPTIMIZATION_INTERVAL 1000 | |
169 | ||
0de8783a | 170 | struct dpcls { |
3453b4d6 JS |
171 | struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */ |
172 | odp_port_t in_port; | |
0de8783a | 173 | struct cmap subtables_map; |
da9cfca6 | 174 | struct pvector subtables; |
0de8783a | 175 | }; |
9bbf1c3d | 176 | |
0de8783a JR |
177 | /* A rule to be inserted to the classifier. */ |
178 | struct dpcls_rule { | |
179 | struct cmap_node cmap_node; /* Within struct dpcls_subtable 'rules'. */ | |
180 | struct netdev_flow_key *mask; /* Subtable's mask. */ | |
181 | struct netdev_flow_key flow; /* Matching key. */ | |
182 | /* 'flow' must be the last field, additional space is allocated here. */ | |
183 | }; | |
184 | ||
185 | static void dpcls_init(struct dpcls *); | |
186 | static void dpcls_destroy(struct dpcls *); | |
3453b4d6 | 187 | static void dpcls_sort_subtable_vector(struct dpcls *); |
0de8783a JR |
188 | static void dpcls_insert(struct dpcls *, struct dpcls_rule *, |
189 | const struct netdev_flow_key *mask); | |
190 | static void dpcls_remove(struct dpcls *, struct dpcls_rule *); | |
3453b4d6 | 191 | static bool dpcls_lookup(struct dpcls *cls, |
0de8783a | 192 | const struct netdev_flow_key keys[], |
3453b4d6 JS |
193 | struct dpcls_rule **rules, size_t cnt, |
194 | int *num_lookups_p); | |
0de8783a | 195 | \f |
8a4e3a85 BP |
196 | /* Datapath based on the network device interface from netdev.h. |
197 | * | |
198 | * | |
199 | * Thread-safety | |
200 | * ============= | |
201 | * | |
202 | * Some members, marked 'const', are immutable. Accessing other members | |
203 | * requires synchronization, as noted in more detail below. | |
204 | * | |
205 | * Acquisition order is, from outermost to innermost: | |
206 | * | |
207 | * dp_netdev_mutex (global) | |
59e6d833 | 208 | * port_mutex |
d0cca6c3 | 209 | * non_pmd_mutex |
8a4e3a85 | 210 | */ |
72865317 | 211 | struct dp_netdev { |
8a4e3a85 BP |
212 | const struct dpif_class *const class; |
213 | const char *const name; | |
6b31e073 | 214 | struct dpif *dpif; |
6a8267c5 BP |
215 | struct ovs_refcount ref_cnt; |
216 | atomic_flag destroyed; | |
72865317 | 217 | |
8a4e3a85 BP |
218 | /* Ports. |
219 | * | |
e9985d6a DDP |
220 | * Any lookup into 'ports' or any access to the dp_netdev_ports found |
221 | * through 'ports' requires taking 'port_mutex'. */ | |
59e6d833 | 222 | struct ovs_mutex port_mutex; |
e9985d6a | 223 | struct hmap ports; |
d33ed218 | 224 | struct seq *port_seq; /* Incremented whenever a port changes. */ |
6c3eee82 | 225 | |
6b31e073 RW |
226 | /* Protects access to ofproto-dpif-upcall interface during revalidator |
227 | * thread synchronization. */ | |
228 | struct fat_rwlock upcall_rwlock; | |
623540e4 EJ |
229 | upcall_callback *upcall_cb; /* Callback function for executing upcalls. */ |
230 | void *upcall_aux; | |
6b31e073 | 231 | |
e4e74c3a AW |
232 | /* Callback function for notifying the purging of dp flows (during |
233 | * reseting pmd deletion). */ | |
234 | dp_purge_callback *dp_purge_cb; | |
235 | void *dp_purge_aux; | |
236 | ||
65f13b50 AW |
237 | /* Stores all 'struct dp_netdev_pmd_thread's. */ |
238 | struct cmap poll_threads; | |
239 | ||
240 | /* Protects the access of the 'struct dp_netdev_pmd_thread' | |
241 | * instance for non-pmd thread. */ | |
242 | struct ovs_mutex non_pmd_mutex; | |
243 | ||
244 | /* Each pmd thread will store its pointer to | |
245 | * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */ | |
246 | ovsthread_key_t per_pmd_key; | |
f2eee189 | 247 | |
a6a426d6 IM |
248 | struct seq *reconfigure_seq; |
249 | uint64_t last_reconfigure_seq; | |
250 | ||
a14b8947 | 251 | /* Cpu mask for pin of pmd threads. */ |
f2eee189 | 252 | char *pmd_cmask; |
6e3c6fa4 | 253 | |
a36de779 | 254 | uint64_t last_tnl_conf_seq; |
5cf3edb3 DDP |
255 | |
256 | struct conntrack conntrack; | |
72865317 BP |
257 | }; |
258 | ||
8a4e3a85 | 259 | static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp, |
e9985d6a DDP |
260 | odp_port_t) |
261 | OVS_REQUIRES(dp->port_mutex); | |
ff073a71 | 262 | |
51852a57 | 263 | enum dp_stat_type { |
abcf3ef4 DDP |
264 | DP_STAT_EXACT_HIT, /* Packets that had an exact match (emc). */ |
265 | DP_STAT_MASKED_HIT, /* Packets that matched in the flow table. */ | |
51852a57 BP |
266 | DP_STAT_MISS, /* Packets that did not match. */ |
267 | DP_STAT_LOST, /* Packets not passed up to the client. */ | |
3453b4d6 JS |
268 | DP_STAT_LOOKUP_HIT, /* Number of subtable lookups for flow table |
269 | hits */ | |
51852a57 BP |
270 | DP_N_STATS |
271 | }; | |
272 | ||
55e3ca97 DDP |
273 | enum pmd_cycles_counter_type { |
274 | PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */ | |
275 | PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */ | |
276 | PMD_N_CYCLES | |
277 | }; | |
278 | ||
324c8374 IM |
279 | #define XPS_TIMEOUT_MS 500LL |
280 | ||
3eb67853 IM |
281 | /* Contained by struct dp_netdev_port's 'rxqs' member. */ |
282 | struct dp_netdev_rxq { | |
947dc567 DDP |
283 | struct dp_netdev_port *port; |
284 | struct netdev_rxq *rx; | |
285 | unsigned core_id; /* Core to which this queue should be | |
286 | pinned. OVS_CORE_UNSPEC if the | |
287 | queue doesn't need to be pinned to a | |
288 | particular core. */ | |
e32971b8 | 289 | struct dp_netdev_pmd_thread *pmd; /* pmd thread that will poll this queue. */ |
3eb67853 IM |
290 | }; |
291 | ||
72865317 BP |
292 | /* A port in a netdev-based datapath. */ |
293 | struct dp_netdev_port { | |
35303d71 | 294 | odp_port_t port_no; |
72865317 | 295 | struct netdev *netdev; |
e9985d6a | 296 | struct hmap_node node; /* Node in dp_netdev's 'ports'. */ |
4b609110 | 297 | struct netdev_saved_flags *sf; |
3eb67853 | 298 | struct dp_netdev_rxq *rxqs; |
2e4450aa | 299 | unsigned n_rxq; /* Number of elements in 'rxq' */ |
324c8374 IM |
300 | bool dynamic_txqs; /* If true XPS will be used. */ |
301 | unsigned *txq_used; /* Number of threads that uses each tx queue. */ | |
302 | struct ovs_mutex txq_used_mutex; | |
0cbfe35d | 303 | char *type; /* Port type as requested by user. */ |
3eb67853 | 304 | char *rxq_affinity_list; /* Requested affinity of rx queues. */ |
e32971b8 | 305 | bool need_reconfigure; /* True if we should reconfigure netdev. */ |
72865317 BP |
306 | }; |
307 | ||
1c1e46ed AW |
308 | /* Contained by struct dp_netdev_flow's 'stats' member. */ |
309 | struct dp_netdev_flow_stats { | |
eb94da30 DDP |
310 | atomic_llong used; /* Last used time, in monotonic msecs. */ |
311 | atomic_ullong packet_count; /* Number of packets matched. */ | |
312 | atomic_ullong byte_count; /* Number of bytes matched. */ | |
313 | atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */ | |
1c1e46ed AW |
314 | }; |
315 | ||
316 | /* A flow in 'dp_netdev_pmd_thread's 'flow_table'. | |
8a4e3a85 BP |
317 | * |
318 | * | |
319 | * Thread-safety | |
320 | * ============= | |
321 | * | |
322 | * Except near the beginning or ending of its lifespan, rule 'rule' belongs to | |
1c1e46ed | 323 | * its pmd thread's classifier. The text below calls this classifier 'cls'. |
8a4e3a85 BP |
324 | * |
325 | * Motivation | |
326 | * ---------- | |
327 | * | |
328 | * The thread safety rules described here for "struct dp_netdev_flow" are | |
329 | * motivated by two goals: | |
330 | * | |
331 | * - Prevent threads that read members of "struct dp_netdev_flow" from | |
332 | * reading bad data due to changes by some thread concurrently modifying | |
333 | * those members. | |
334 | * | |
335 | * - Prevent two threads making changes to members of a given "struct | |
336 | * dp_netdev_flow" from interfering with each other. | |
337 | * | |
338 | * | |
339 | * Rules | |
340 | * ----- | |
341 | * | |
ed79f89a DDP |
342 | * A flow 'flow' may be accessed without a risk of being freed during an RCU |
343 | * grace period. Code that needs to hold onto a flow for a while | |
344 | * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref(). | |
8a4e3a85 BP |
345 | * |
346 | * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the | |
ed79f89a DDP |
347 | * flow from being deleted from 'cls' and it doesn't protect members of 'flow' |
348 | * from modification. | |
8a4e3a85 BP |
349 | * |
350 | * Some members, marked 'const', are immutable. Accessing other members | |
351 | * requires synchronization, as noted in more detail below. | |
352 | */ | |
72865317 | 353 | struct dp_netdev_flow { |
11e5cf1f | 354 | const struct flow flow; /* Unmasked flow that created this entry. */ |
8a4e3a85 | 355 | /* Hash table index by unmasked flow. */ |
1c1e46ed AW |
356 | const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */ |
357 | /* 'flow_table'. */ | |
70e5ed6f | 358 | const ovs_u128 ufid; /* Unique flow identifier. */ |
bd5131ba | 359 | const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */ |
1c1e46ed | 360 | /* flow. */ |
72865317 | 361 | |
ed79f89a DDP |
362 | /* Number of references. |
363 | * The classifier owns one reference. | |
364 | * Any thread trying to keep a rule from being freed should hold its own | |
365 | * reference. */ | |
366 | struct ovs_refcount ref_cnt; | |
367 | ||
11e5cf1f DDP |
368 | bool dead; |
369 | ||
1c1e46ed AW |
370 | /* Statistics. */ |
371 | struct dp_netdev_flow_stats stats; | |
8a4e3a85 | 372 | |
45c626a3 | 373 | /* Actions. */ |
61e7deb1 | 374 | OVSRCU_TYPE(struct dp_netdev_actions *) actions; |
0de8783a | 375 | |
11e5cf1f DDP |
376 | /* While processing a group of input packets, the datapath uses the next |
377 | * member to store a pointer to the output batch for the flow. It is | |
378 | * reset after the batch has been sent out (See dp_netdev_queue_batches(), | |
f7ce4811 PS |
379 | * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */ |
380 | struct packet_batch_per_flow *batch; | |
11e5cf1f | 381 | |
0de8783a JR |
382 | /* Packet classification. */ |
383 | struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */ | |
384 | /* 'cr' must be the last member. */ | |
72865317 BP |
385 | }; |
386 | ||
ed79f89a | 387 | static void dp_netdev_flow_unref(struct dp_netdev_flow *); |
9bbf1c3d | 388 | static bool dp_netdev_flow_ref(struct dp_netdev_flow *); |
70e5ed6f JS |
389 | static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t, |
390 | struct flow *); | |
8a4e3a85 | 391 | |
a84cb64a BP |
392 | /* A set of datapath actions within a "struct dp_netdev_flow". |
393 | * | |
394 | * | |
395 | * Thread-safety | |
396 | * ============= | |
397 | * | |
45c626a3 | 398 | * A struct dp_netdev_actions 'actions' is protected with RCU. */ |
a84cb64a | 399 | struct dp_netdev_actions { |
a84cb64a BP |
400 | /* These members are immutable: they do not change during the struct's |
401 | * lifetime. */ | |
a84cb64a | 402 | unsigned int size; /* Size of 'actions', in bytes. */ |
9ff55ae2 | 403 | struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */ |
a84cb64a BP |
404 | }; |
405 | ||
406 | struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *, | |
407 | size_t); | |
61e7deb1 BP |
408 | struct dp_netdev_actions *dp_netdev_flow_get_actions( |
409 | const struct dp_netdev_flow *); | |
410 | static void dp_netdev_actions_free(struct dp_netdev_actions *); | |
a84cb64a | 411 | |
1c1e46ed AW |
412 | /* Contained by struct dp_netdev_pmd_thread's 'stats' member. */ |
413 | struct dp_netdev_pmd_stats { | |
414 | /* Indexed by DP_STAT_*. */ | |
eb94da30 | 415 | atomic_ullong n[DP_N_STATS]; |
1c1e46ed AW |
416 | }; |
417 | ||
55e3ca97 DDP |
418 | /* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */ |
419 | struct dp_netdev_pmd_cycles { | |
420 | /* Indexed by PMD_CYCLES_*. */ | |
421 | atomic_ullong n[PMD_N_CYCLES]; | |
422 | }; | |
423 | ||
947dc567 DDP |
424 | struct polled_queue { |
425 | struct netdev_rxq *rx; | |
426 | odp_port_t port_no; | |
427 | }; | |
428 | ||
ae7ad0a1 IM |
429 | /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */ |
430 | struct rxq_poll { | |
947dc567 DDP |
431 | struct dp_netdev_rxq *rxq; |
432 | struct hmap_node node; | |
ae7ad0a1 IM |
433 | }; |
434 | ||
57eebbb4 DDP |
435 | /* Contained by struct dp_netdev_pmd_thread's 'send_port_cache', |
436 | * 'tnl_port_cache' or 'tx_ports'. */ | |
d0cca6c3 | 437 | struct tx_port { |
324c8374 IM |
438 | struct dp_netdev_port *port; |
439 | int qid; | |
440 | long long last_used; | |
d0cca6c3 DDP |
441 | struct hmap_node node; |
442 | }; | |
443 | ||
e4cfed38 PS |
444 | /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate |
445 | * the performance overhead of interrupt processing. Therefore netdev can | |
446 | * not implement rx-wait for these devices. dpif-netdev needs to poll | |
447 | * these device to check for recv buffer. pmd-thread does polling for | |
1c1e46ed | 448 | * devices assigned to itself. |
e4cfed38 PS |
449 | * |
450 | * DPDK used PMD for accessing NIC. | |
451 | * | |
65f13b50 AW |
452 | * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for |
453 | * I/O of all non-pmd threads. There will be no actual thread created | |
454 | * for the instance. | |
1c1e46ed AW |
455 | * |
456 | * Each struct has its own flow table and classifier. Packets received | |
457 | * from managed ports are looked up in the corresponding pmd thread's | |
458 | * flow table, and are executed with the found actions. | |
459 | * */ | |
65f13b50 | 460 | struct dp_netdev_pmd_thread { |
6c3eee82 | 461 | struct dp_netdev *dp; |
1c1e46ed | 462 | struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */ |
65f13b50 | 463 | struct cmap_node node; /* In 'dp->poll_threads'. */ |
accf8626 AW |
464 | |
465 | pthread_cond_t cond; /* For synchronizing pmd thread reload. */ | |
466 | struct ovs_mutex cond_mutex; /* Mutex for condition variable. */ | |
467 | ||
65f13b50 AW |
468 | /* Per thread exact-match cache. Note, the instance for cpu core |
469 | * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly | |
d0cca6c3 DDP |
470 | * need to be protected by 'non_pmd_mutex'. Every other instance |
471 | * will only be accessed by its own pmd thread. */ | |
9bbf1c3d | 472 | struct emc_cache flow_cache; |
1c1e46ed | 473 | |
3453b4d6 | 474 | /* Flow-Table and classifiers |
1c1e46ed AW |
475 | * |
476 | * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding | |
3453b4d6 JS |
477 | * changes to 'classifiers' must be made while still holding the |
478 | * 'flow_mutex'. | |
1c1e46ed AW |
479 | */ |
480 | struct ovs_mutex flow_mutex; | |
1c1e46ed AW |
481 | struct cmap flow_table OVS_GUARDED; /* Flow table. */ |
482 | ||
3453b4d6 JS |
483 | /* One classifier per in_port polled by the pmd */ |
484 | struct cmap classifiers; | |
485 | /* Periodically sort subtable vectors according to hit frequencies */ | |
486 | long long int next_optimization; | |
487 | ||
1c1e46ed AW |
488 | /* Statistics. */ |
489 | struct dp_netdev_pmd_stats stats; | |
490 | ||
55e3ca97 DDP |
491 | /* Cycles counters */ |
492 | struct dp_netdev_pmd_cycles cycles; | |
493 | ||
494 | /* Used to count cicles. See 'cycles_counter_end()' */ | |
495 | unsigned long long last_cycles; | |
496 | ||
65f13b50 | 497 | struct latch exit_latch; /* For terminating the pmd thread. */ |
2788a1b1 DDP |
498 | struct seq *reload_seq; |
499 | uint64_t last_reload_seq; | |
14e3e12a | 500 | atomic_bool reload; /* Do we need to reload ports? */ |
6c3eee82 | 501 | pthread_t thread; |
bd5131ba | 502 | unsigned core_id; /* CPU core id of this pmd thread. */ |
65f13b50 | 503 | int numa_id; /* numa node id of this pmd thread. */ |
3eb67853 | 504 | bool isolated; |
81acebda | 505 | |
324c8374 IM |
506 | /* Queue id used by this pmd thread to send packets on all netdevs if |
507 | * XPS disabled for this netdev. All static_tx_qid's are unique and less | |
e32971b8 | 508 | * than 'cmap_count(dp->poll_threads)'. */ |
82d765f6 | 509 | const int static_tx_qid; |
6553d06b | 510 | |
d0cca6c3 | 511 | struct ovs_mutex port_mutex; /* Mutex for 'poll_list' and 'tx_ports'. */ |
ae7ad0a1 | 512 | /* List of rx queues to poll. */ |
947dc567 | 513 | struct hmap poll_list OVS_GUARDED; |
d0cca6c3 DDP |
514 | /* Map of 'tx_port's used for transmission. Written by the main thread, |
515 | * read by the pmd thread. */ | |
516 | struct hmap tx_ports OVS_GUARDED; | |
517 | ||
57eebbb4 DDP |
518 | /* These are thread-local copies of 'tx_ports'. One contains only tunnel |
519 | * ports (that support push_tunnel/pop_tunnel), the other contains ports | |
520 | * with at least one txq (that support send). A port can be in both. | |
521 | * | |
522 | * There are two separate maps to make sure that we don't try to execute | |
523 | * OUTPUT on a device which has 0 txqs or PUSH/POP on a non-tunnel device. | |
524 | * | |
525 | * The instances for cpu core NON_PMD_CORE_ID can be accessed by multiple | |
526 | * threads, and thusly need to be protected by 'non_pmd_mutex'. Every | |
527 | * other instance will only be accessed by its own pmd thread. */ | |
528 | struct hmap tnl_port_cache; | |
529 | struct hmap send_port_cache; | |
ae7ad0a1 | 530 | |
6553d06b DDP |
531 | /* Only a pmd thread can write on its own 'cycles' and 'stats'. |
532 | * The main thread keeps 'stats_zero' and 'cycles_zero' as base | |
533 | * values and subtracts them from 'stats' and 'cycles' before | |
534 | * reporting to the user */ | |
535 | unsigned long long stats_zero[DP_N_STATS]; | |
536 | uint64_t cycles_zero[PMD_N_CYCLES]; | |
e32971b8 DDP |
537 | |
538 | /* Set to true if the pmd thread needs to be reloaded. */ | |
539 | bool need_reload; | |
6c3eee82 BP |
540 | }; |
541 | ||
72865317 BP |
542 | /* Interface to netdev-based datapath. */ |
543 | struct dpif_netdev { | |
544 | struct dpif dpif; | |
545 | struct dp_netdev *dp; | |
d33ed218 | 546 | uint64_t last_port_seq; |
72865317 BP |
547 | }; |
548 | ||
8a4e3a85 | 549 | static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no, |
e9985d6a DDP |
550 | struct dp_netdev_port **portp) |
551 | OVS_REQUIRES(dp->port_mutex); | |
8a4e3a85 | 552 | static int get_port_by_name(struct dp_netdev *dp, const char *devname, |
e9985d6a DDP |
553 | struct dp_netdev_port **portp) |
554 | OVS_REQUIRES(dp->port_mutex); | |
8a4e3a85 BP |
555 | static void dp_netdev_free(struct dp_netdev *) |
556 | OVS_REQUIRES(dp_netdev_mutex); | |
8a4e3a85 BP |
557 | static int do_add_port(struct dp_netdev *dp, const char *devname, |
558 | const char *type, odp_port_t port_no) | |
59e6d833 | 559 | OVS_REQUIRES(dp->port_mutex); |
c40b890f | 560 | static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *) |
59e6d833 | 561 | OVS_REQUIRES(dp->port_mutex); |
614c4892 BP |
562 | static int dpif_netdev_open(const struct dpif_class *, const char *name, |
563 | bool create, struct dpif **); | |
65f13b50 | 564 | static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, |
1895cc8d | 565 | struct dp_packet_batch *, |
66e4ad8a | 566 | bool may_steal, const struct flow *flow, |
4edb9ae9 | 567 | const struct nlattr *actions, |
324c8374 IM |
568 | size_t actions_len, |
569 | long long now); | |
65f13b50 | 570 | static void dp_netdev_input(struct dp_netdev_pmd_thread *, |
1895cc8d | 571 | struct dp_packet_batch *, odp_port_t port_no); |
a90ed026 | 572 | static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *, |
1895cc8d | 573 | struct dp_packet_batch *); |
41ccaa24 | 574 | |
6b31e073 | 575 | static void dp_netdev_disable_upcall(struct dp_netdev *); |
ae7ad0a1 | 576 | static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd); |
65f13b50 | 577 | static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, |
00873463 DDP |
578 | struct dp_netdev *dp, unsigned core_id, |
579 | int numa_id); | |
1c1e46ed | 580 | static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd); |
e9985d6a DDP |
581 | static void dp_netdev_set_nonpmd(struct dp_netdev *dp) |
582 | OVS_REQUIRES(dp->port_mutex); | |
583 | ||
e32971b8 | 584 | static void *pmd_thread_main(void *); |
b19befae | 585 | static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp, |
bd5131ba | 586 | unsigned core_id); |
1c1e46ed AW |
587 | static struct dp_netdev_pmd_thread * |
588 | dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos); | |
e32971b8 | 589 | static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd); |
d0cca6c3 | 590 | static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd); |
d0cca6c3 | 591 | static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd, |
e32971b8 DDP |
592 | struct dp_netdev_port *port) |
593 | OVS_REQUIRES(pmd->port_mutex); | |
594 | static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd, | |
595 | struct tx_port *tx) | |
596 | OVS_REQUIRES(pmd->port_mutex); | |
d0cca6c3 | 597 | static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd, |
947dc567 DDP |
598 | struct dp_netdev_rxq *rxq) |
599 | OVS_REQUIRES(pmd->port_mutex); | |
e32971b8 DDP |
600 | static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd, |
601 | struct rxq_poll *poll) | |
602 | OVS_REQUIRES(pmd->port_mutex); | |
603 | static void reconfigure_datapath(struct dp_netdev *dp) | |
3eb67853 | 604 | OVS_REQUIRES(dp->port_mutex); |
1c1e46ed AW |
605 | static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd); |
606 | static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd); | |
607 | static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd); | |
d0cca6c3 DDP |
608 | static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) |
609 | OVS_REQUIRES(pmd->port_mutex); | |
3453b4d6 JS |
610 | static inline void |
611 | dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd); | |
72865317 | 612 | |
324c8374 IM |
613 | static void |
614 | dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd, | |
615 | long long now, bool purge); | |
616 | static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd, | |
617 | struct tx_port *tx, long long now); | |
618 | ||
67ad54cb | 619 | static inline bool emc_entry_alive(struct emc_entry *ce); |
9bbf1c3d DDP |
620 | static void emc_clear_entry(struct emc_entry *ce); |
621 | ||
622 | static void | |
623 | emc_cache_init(struct emc_cache *flow_cache) | |
624 | { | |
625 | int i; | |
626 | ||
67ad54cb | 627 | flow_cache->sweep_idx = 0; |
9bbf1c3d DDP |
628 | for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { |
629 | flow_cache->entries[i].flow = NULL; | |
0de8783a | 630 | flow_cache->entries[i].key.hash = 0; |
09b0fa9c | 631 | flow_cache->entries[i].key.len = sizeof(struct miniflow); |
5fcff47b | 632 | flowmap_init(&flow_cache->entries[i].key.mf.map); |
9bbf1c3d DDP |
633 | } |
634 | } | |
635 | ||
636 | static void | |
637 | emc_cache_uninit(struct emc_cache *flow_cache) | |
638 | { | |
639 | int i; | |
640 | ||
641 | for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { | |
642 | emc_clear_entry(&flow_cache->entries[i]); | |
643 | } | |
644 | } | |
645 | ||
67ad54cb AW |
646 | /* Check and clear dead flow references slowly (one entry at each |
647 | * invocation). */ | |
648 | static void | |
649 | emc_cache_slow_sweep(struct emc_cache *flow_cache) | |
650 | { | |
651 | struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx]; | |
652 | ||
653 | if (!emc_entry_alive(entry)) { | |
654 | emc_clear_entry(entry); | |
655 | } | |
656 | flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK; | |
657 | } | |
658 | ||
c4ea7529 BP |
659 | /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */ |
660 | bool | |
661 | dpif_is_netdev(const struct dpif *dpif) | |
662 | { | |
663 | return dpif->dpif_class->open == dpif_netdev_open; | |
664 | } | |
665 | ||
72865317 BP |
666 | static struct dpif_netdev * |
667 | dpif_netdev_cast(const struct dpif *dpif) | |
668 | { | |
c4ea7529 | 669 | ovs_assert(dpif_is_netdev(dpif)); |
72865317 BP |
670 | return CONTAINER_OF(dpif, struct dpif_netdev, dpif); |
671 | } | |
672 | ||
673 | static struct dp_netdev * | |
674 | get_dp_netdev(const struct dpif *dpif) | |
675 | { | |
676 | return dpif_netdev_cast(dpif)->dp; | |
677 | } | |
6553d06b DDP |
678 | \f |
679 | enum pmd_info_type { | |
ce179f11 IM |
680 | PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */ |
681 | PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */ | |
682 | PMD_INFO_SHOW_RXQ /* Show poll-lists of pmd threads. */ | |
6553d06b DDP |
683 | }; |
684 | ||
685 | static void | |
686 | pmd_info_show_stats(struct ds *reply, | |
687 | struct dp_netdev_pmd_thread *pmd, | |
688 | unsigned long long stats[DP_N_STATS], | |
689 | uint64_t cycles[PMD_N_CYCLES]) | |
690 | { | |
691 | unsigned long long total_packets = 0; | |
692 | uint64_t total_cycles = 0; | |
693 | int i; | |
694 | ||
695 | /* These loops subtracts reference values ('*_zero') from the counters. | |
696 | * Since loads and stores are relaxed, it might be possible for a '*_zero' | |
697 | * value to be more recent than the current value we're reading from the | |
698 | * counter. This is not a big problem, since these numbers are not | |
699 | * supposed to be too accurate, but we should at least make sure that | |
700 | * the result is not negative. */ | |
701 | for (i = 0; i < DP_N_STATS; i++) { | |
702 | if (stats[i] > pmd->stats_zero[i]) { | |
703 | stats[i] -= pmd->stats_zero[i]; | |
704 | } else { | |
705 | stats[i] = 0; | |
706 | } | |
707 | ||
708 | if (i != DP_STAT_LOST) { | |
709 | /* Lost packets are already included in DP_STAT_MISS */ | |
710 | total_packets += stats[i]; | |
711 | } | |
712 | } | |
713 | ||
714 | for (i = 0; i < PMD_N_CYCLES; i++) { | |
715 | if (cycles[i] > pmd->cycles_zero[i]) { | |
716 | cycles[i] -= pmd->cycles_zero[i]; | |
717 | } else { | |
718 | cycles[i] = 0; | |
719 | } | |
720 | ||
721 | total_cycles += cycles[i]; | |
722 | } | |
723 | ||
724 | ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID) | |
725 | ? "main thread" : "pmd thread"); | |
726 | ||
727 | if (pmd->numa_id != OVS_NUMA_UNSPEC) { | |
728 | ds_put_format(reply, " numa_id %d", pmd->numa_id); | |
729 | } | |
d5c199ea | 730 | if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) { |
bd5131ba | 731 | ds_put_format(reply, " core_id %u", pmd->core_id); |
6553d06b DDP |
732 | } |
733 | ds_put_cstr(reply, ":\n"); | |
734 | ||
735 | ds_put_format(reply, | |
736 | "\temc hits:%llu\n\tmegaflow hits:%llu\n" | |
3453b4d6 | 737 | "\tavg. subtable lookups per hit:%.2f\n" |
6553d06b DDP |
738 | "\tmiss:%llu\n\tlost:%llu\n", |
739 | stats[DP_STAT_EXACT_HIT], stats[DP_STAT_MASKED_HIT], | |
3453b4d6 JS |
740 | stats[DP_STAT_MASKED_HIT] > 0 |
741 | ? (1.0*stats[DP_STAT_LOOKUP_HIT])/stats[DP_STAT_MASKED_HIT] | |
742 | : 0, | |
6553d06b DDP |
743 | stats[DP_STAT_MISS], stats[DP_STAT_LOST]); |
744 | ||
745 | if (total_cycles == 0) { | |
746 | return; | |
747 | } | |
748 | ||
749 | ds_put_format(reply, | |
750 | "\tpolling cycles:%"PRIu64" (%.02f%%)\n" | |
751 | "\tprocessing cycles:%"PRIu64" (%.02f%%)\n", | |
752 | cycles[PMD_CYCLES_POLLING], | |
753 | cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100, | |
754 | cycles[PMD_CYCLES_PROCESSING], | |
755 | cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100); | |
756 | ||
757 | if (total_packets == 0) { | |
758 | return; | |
759 | } | |
760 | ||
761 | ds_put_format(reply, | |
762 | "\tavg cycles per packet: %.02f (%"PRIu64"/%llu)\n", | |
763 | total_cycles / (double)total_packets, | |
764 | total_cycles, total_packets); | |
765 | ||
766 | ds_put_format(reply, | |
767 | "\tavg processing cycles per packet: " | |
768 | "%.02f (%"PRIu64"/%llu)\n", | |
769 | cycles[PMD_CYCLES_PROCESSING] / (double)total_packets, | |
770 | cycles[PMD_CYCLES_PROCESSING], total_packets); | |
771 | } | |
772 | ||
773 | static void | |
774 | pmd_info_clear_stats(struct ds *reply OVS_UNUSED, | |
775 | struct dp_netdev_pmd_thread *pmd, | |
776 | unsigned long long stats[DP_N_STATS], | |
777 | uint64_t cycles[PMD_N_CYCLES]) | |
778 | { | |
779 | int i; | |
780 | ||
781 | /* We cannot write 'stats' and 'cycles' (because they're written by other | |
782 | * threads) and we shouldn't change 'stats' (because they're used to count | |
783 | * datapath stats, which must not be cleared here). Instead, we save the | |
784 | * current values and subtract them from the values to be displayed in the | |
785 | * future */ | |
786 | for (i = 0; i < DP_N_STATS; i++) { | |
787 | pmd->stats_zero[i] = stats[i]; | |
788 | } | |
789 | for (i = 0; i < PMD_N_CYCLES; i++) { | |
790 | pmd->cycles_zero[i] = cycles[i]; | |
791 | } | |
792 | } | |
793 | ||
947dc567 DDP |
794 | static int |
795 | compare_poll_list(const void *a_, const void *b_) | |
796 | { | |
797 | const struct rxq_poll *a = a_; | |
798 | const struct rxq_poll *b = b_; | |
799 | ||
800 | const char *namea = netdev_rxq_get_name(a->rxq->rx); | |
801 | const char *nameb = netdev_rxq_get_name(b->rxq->rx); | |
802 | ||
803 | int cmp = strcmp(namea, nameb); | |
804 | if (!cmp) { | |
805 | return netdev_rxq_get_queue_id(a->rxq->rx) | |
806 | - netdev_rxq_get_queue_id(b->rxq->rx); | |
807 | } else { | |
808 | return cmp; | |
809 | } | |
810 | } | |
811 | ||
812 | static void | |
813 | sorted_poll_list(struct dp_netdev_pmd_thread *pmd, struct rxq_poll **list, | |
814 | size_t *n) | |
815 | { | |
816 | struct rxq_poll *ret, *poll; | |
817 | size_t i; | |
818 | ||
819 | *n = hmap_count(&pmd->poll_list); | |
820 | if (!*n) { | |
821 | ret = NULL; | |
822 | } else { | |
823 | ret = xcalloc(*n, sizeof *ret); | |
824 | i = 0; | |
825 | HMAP_FOR_EACH (poll, node, &pmd->poll_list) { | |
826 | ret[i] = *poll; | |
827 | i++; | |
828 | } | |
829 | ovs_assert(i == *n); | |
830 | } | |
831 | ||
832 | qsort(ret, *n, sizeof *ret, compare_poll_list); | |
833 | ||
834 | *list = ret; | |
835 | } | |
836 | ||
ce179f11 IM |
837 | static void |
838 | pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd) | |
839 | { | |
840 | if (pmd->core_id != NON_PMD_CORE_ID) { | |
ce179f11 | 841 | const char *prev_name = NULL; |
947dc567 DDP |
842 | struct rxq_poll *list; |
843 | size_t i, n; | |
ce179f11 | 844 | |
3eb67853 IM |
845 | ds_put_format(reply, |
846 | "pmd thread numa_id %d core_id %u:\n\tisolated : %s\n", | |
847 | pmd->numa_id, pmd->core_id, (pmd->isolated) | |
848 | ? "true" : "false"); | |
ce179f11 | 849 | |
d0cca6c3 | 850 | ovs_mutex_lock(&pmd->port_mutex); |
947dc567 DDP |
851 | sorted_poll_list(pmd, &list, &n); |
852 | for (i = 0; i < n; i++) { | |
853 | const char *name = netdev_rxq_get_name(list[i].rxq->rx); | |
ce179f11 IM |
854 | |
855 | if (!prev_name || strcmp(name, prev_name)) { | |
856 | if (prev_name) { | |
857 | ds_put_cstr(reply, "\n"); | |
858 | } | |
947dc567 | 859 | ds_put_format(reply, "\tport: %s\tqueue-id:", name); |
ce179f11 | 860 | } |
947dc567 DDP |
861 | ds_put_format(reply, " %d", |
862 | netdev_rxq_get_queue_id(list[i].rxq->rx)); | |
ce179f11 IM |
863 | prev_name = name; |
864 | } | |
d0cca6c3 | 865 | ovs_mutex_unlock(&pmd->port_mutex); |
ce179f11 | 866 | ds_put_cstr(reply, "\n"); |
947dc567 | 867 | free(list); |
ce179f11 IM |
868 | } |
869 | } | |
870 | ||
6553d06b DDP |
871 | static void |
872 | dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[], | |
873 | void *aux) | |
874 | { | |
875 | struct ds reply = DS_EMPTY_INITIALIZER; | |
876 | struct dp_netdev_pmd_thread *pmd; | |
877 | struct dp_netdev *dp = NULL; | |
878 | enum pmd_info_type type = *(enum pmd_info_type *) aux; | |
879 | ||
880 | ovs_mutex_lock(&dp_netdev_mutex); | |
881 | ||
882 | if (argc == 2) { | |
883 | dp = shash_find_data(&dp_netdevs, argv[1]); | |
884 | } else if (shash_count(&dp_netdevs) == 1) { | |
885 | /* There's only one datapath */ | |
886 | dp = shash_first(&dp_netdevs)->data; | |
887 | } | |
888 | ||
889 | if (!dp) { | |
890 | ovs_mutex_unlock(&dp_netdev_mutex); | |
891 | unixctl_command_reply_error(conn, | |
892 | "please specify an existing datapath"); | |
893 | return; | |
894 | } | |
895 | ||
896 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
ce179f11 IM |
897 | if (type == PMD_INFO_SHOW_RXQ) { |
898 | pmd_info_show_rxq(&reply, pmd); | |
899 | } else { | |
900 | unsigned long long stats[DP_N_STATS]; | |
901 | uint64_t cycles[PMD_N_CYCLES]; | |
902 | int i; | |
6553d06b | 903 | |
ce179f11 IM |
904 | /* Read current stats and cycle counters */ |
905 | for (i = 0; i < ARRAY_SIZE(stats); i++) { | |
906 | atomic_read_relaxed(&pmd->stats.n[i], &stats[i]); | |
907 | } | |
908 | for (i = 0; i < ARRAY_SIZE(cycles); i++) { | |
909 | atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]); | |
910 | } | |
6553d06b | 911 | |
ce179f11 IM |
912 | if (type == PMD_INFO_CLEAR_STATS) { |
913 | pmd_info_clear_stats(&reply, pmd, stats, cycles); | |
914 | } else if (type == PMD_INFO_SHOW_STATS) { | |
915 | pmd_info_show_stats(&reply, pmd, stats, cycles); | |
916 | } | |
6553d06b DDP |
917 | } |
918 | } | |
919 | ||
920 | ovs_mutex_unlock(&dp_netdev_mutex); | |
921 | ||
922 | unixctl_command_reply(conn, ds_cstr(&reply)); | |
923 | ds_destroy(&reply); | |
924 | } | |
925 | \f | |
926 | static int | |
927 | dpif_netdev_init(void) | |
928 | { | |
929 | static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS, | |
ce179f11 IM |
930 | clear_aux = PMD_INFO_CLEAR_STATS, |
931 | poll_aux = PMD_INFO_SHOW_RXQ; | |
6553d06b DDP |
932 | |
933 | unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]", | |
934 | 0, 1, dpif_netdev_pmd_info, | |
935 | (void *)&show_aux); | |
936 | unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]", | |
937 | 0, 1, dpif_netdev_pmd_info, | |
938 | (void *)&clear_aux); | |
ce179f11 IM |
939 | unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]", |
940 | 0, 1, dpif_netdev_pmd_info, | |
941 | (void *)&poll_aux); | |
6553d06b DDP |
942 | return 0; |
943 | } | |
72865317 | 944 | |
2197d7ab | 945 | static int |
2240af25 DDP |
946 | dpif_netdev_enumerate(struct sset *all_dps, |
947 | const struct dpif_class *dpif_class) | |
2197d7ab GL |
948 | { |
949 | struct shash_node *node; | |
950 | ||
97be1538 | 951 | ovs_mutex_lock(&dp_netdev_mutex); |
2197d7ab | 952 | SHASH_FOR_EACH(node, &dp_netdevs) { |
2240af25 DDP |
953 | struct dp_netdev *dp = node->data; |
954 | if (dpif_class != dp->class) { | |
955 | /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs. | |
956 | * If the class doesn't match, skip this dpif. */ | |
957 | continue; | |
958 | } | |
2197d7ab GL |
959 | sset_add(all_dps, node->name); |
960 | } | |
97be1538 | 961 | ovs_mutex_unlock(&dp_netdev_mutex); |
5279f8fd | 962 | |
2197d7ab GL |
963 | return 0; |
964 | } | |
965 | ||
add90f6f EJ |
966 | static bool |
967 | dpif_netdev_class_is_dummy(const struct dpif_class *class) | |
968 | { | |
969 | return class != &dpif_netdev_class; | |
970 | } | |
971 | ||
0aeaabc8 JP |
972 | static const char * |
973 | dpif_netdev_port_open_type(const struct dpif_class *class, const char *type) | |
974 | { | |
975 | return strcmp(type, "internal") ? type | |
e98d0cb3 | 976 | : dpif_netdev_class_is_dummy(class) ? "dummy-internal" |
0aeaabc8 JP |
977 | : "tap"; |
978 | } | |
979 | ||
72865317 BP |
980 | static struct dpif * |
981 | create_dpif_netdev(struct dp_netdev *dp) | |
982 | { | |
462278db | 983 | uint16_t netflow_id = hash_string(dp->name, 0); |
72865317 | 984 | struct dpif_netdev *dpif; |
72865317 | 985 | |
6a8267c5 | 986 | ovs_refcount_ref(&dp->ref_cnt); |
72865317 | 987 | |
72865317 | 988 | dpif = xmalloc(sizeof *dpif); |
614c4892 | 989 | dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id); |
72865317 | 990 | dpif->dp = dp; |
d33ed218 | 991 | dpif->last_port_seq = seq_read(dp->port_seq); |
72865317 BP |
992 | |
993 | return &dpif->dpif; | |
994 | } | |
995 | ||
4e022ec0 AW |
996 | /* Choose an unused, non-zero port number and return it on success. |
997 | * Return ODPP_NONE on failure. */ | |
998 | static odp_port_t | |
e44768b7 | 999 | choose_port(struct dp_netdev *dp, const char *name) |
59e6d833 | 1000 | OVS_REQUIRES(dp->port_mutex) |
e44768b7 | 1001 | { |
4e022ec0 | 1002 | uint32_t port_no; |
e44768b7 JP |
1003 | |
1004 | if (dp->class != &dpif_netdev_class) { | |
1005 | const char *p; | |
1006 | int start_no = 0; | |
1007 | ||
1008 | /* If the port name begins with "br", start the number search at | |
1009 | * 100 to make writing tests easier. */ | |
1010 | if (!strncmp(name, "br", 2)) { | |
1011 | start_no = 100; | |
1012 | } | |
1013 | ||
1014 | /* If the port name contains a number, try to assign that port number. | |
1015 | * This can make writing unit tests easier because port numbers are | |
1016 | * predictable. */ | |
1017 | for (p = name; *p != '\0'; p++) { | |
1018 | if (isdigit((unsigned char) *p)) { | |
1019 | port_no = start_no + strtol(p, NULL, 10); | |
ff073a71 BP |
1020 | if (port_no > 0 && port_no != odp_to_u32(ODPP_NONE) |
1021 | && !dp_netdev_lookup_port(dp, u32_to_odp(port_no))) { | |
4e022ec0 | 1022 | return u32_to_odp(port_no); |
e44768b7 JP |
1023 | } |
1024 | break; | |
1025 | } | |
1026 | } | |
1027 | } | |
1028 | ||
ff073a71 BP |
1029 | for (port_no = 1; port_no <= UINT16_MAX; port_no++) { |
1030 | if (!dp_netdev_lookup_port(dp, u32_to_odp(port_no))) { | |
4e022ec0 | 1031 | return u32_to_odp(port_no); |
e44768b7 JP |
1032 | } |
1033 | } | |
1034 | ||
4e022ec0 | 1035 | return ODPP_NONE; |
e44768b7 JP |
1036 | } |
1037 | ||
72865317 | 1038 | static int |
614c4892 BP |
1039 | create_dp_netdev(const char *name, const struct dpif_class *class, |
1040 | struct dp_netdev **dpp) | |
8a4e3a85 | 1041 | OVS_REQUIRES(dp_netdev_mutex) |
72865317 BP |
1042 | { |
1043 | struct dp_netdev *dp; | |
1044 | int error; | |
72865317 | 1045 | |
462278db | 1046 | dp = xzalloc(sizeof *dp); |
8a4e3a85 BP |
1047 | shash_add(&dp_netdevs, name, dp); |
1048 | ||
1049 | *CONST_CAST(const struct dpif_class **, &dp->class) = class; | |
1050 | *CONST_CAST(const char **, &dp->name) = xstrdup(name); | |
6a8267c5 | 1051 | ovs_refcount_init(&dp->ref_cnt); |
1a65ba85 | 1052 | atomic_flag_clear(&dp->destroyed); |
8a4e3a85 | 1053 | |
59e6d833 | 1054 | ovs_mutex_init(&dp->port_mutex); |
e9985d6a | 1055 | hmap_init(&dp->ports); |
d33ed218 | 1056 | dp->port_seq = seq_create(); |
6b31e073 RW |
1057 | fat_rwlock_init(&dp->upcall_rwlock); |
1058 | ||
a6a426d6 IM |
1059 | dp->reconfigure_seq = seq_create(); |
1060 | dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq); | |
1061 | ||
6b31e073 RW |
1062 | /* Disable upcalls by default. */ |
1063 | dp_netdev_disable_upcall(dp); | |
623540e4 | 1064 | dp->upcall_aux = NULL; |
6b31e073 | 1065 | dp->upcall_cb = NULL; |
e44768b7 | 1066 | |
5cf3edb3 DDP |
1067 | conntrack_init(&dp->conntrack); |
1068 | ||
65f13b50 AW |
1069 | cmap_init(&dp->poll_threads); |
1070 | ovs_mutex_init_recursive(&dp->non_pmd_mutex); | |
1071 | ovsthread_key_create(&dp->per_pmd_key, NULL); | |
1072 | ||
e9985d6a | 1073 | ovs_mutex_lock(&dp->port_mutex); |
f2eee189 | 1074 | dp_netdev_set_nonpmd(dp); |
65f13b50 | 1075 | |
a3e8437a TLSC |
1076 | error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class, |
1077 | "internal"), | |
1078 | ODPP_LOCAL); | |
59e6d833 | 1079 | ovs_mutex_unlock(&dp->port_mutex); |
72865317 BP |
1080 | if (error) { |
1081 | dp_netdev_free(dp); | |
462278db | 1082 | return error; |
72865317 BP |
1083 | } |
1084 | ||
a36de779 | 1085 | dp->last_tnl_conf_seq = seq_read(tnl_conf_seq); |
462278db | 1086 | *dpp = dp; |
72865317 BP |
1087 | return 0; |
1088 | } | |
1089 | ||
a6a426d6 IM |
1090 | static void |
1091 | dp_netdev_request_reconfigure(struct dp_netdev *dp) | |
1092 | { | |
1093 | seq_change(dp->reconfigure_seq); | |
1094 | } | |
1095 | ||
1096 | static bool | |
1097 | dp_netdev_is_reconf_required(struct dp_netdev *dp) | |
1098 | { | |
1099 | return seq_read(dp->reconfigure_seq) != dp->last_reconfigure_seq; | |
1100 | } | |
1101 | ||
72865317 | 1102 | static int |
614c4892 | 1103 | dpif_netdev_open(const struct dpif_class *class, const char *name, |
4a387741 | 1104 | bool create, struct dpif **dpifp) |
72865317 | 1105 | { |
462278db | 1106 | struct dp_netdev *dp; |
5279f8fd | 1107 | int error; |
462278db | 1108 | |
97be1538 | 1109 | ovs_mutex_lock(&dp_netdev_mutex); |
462278db BP |
1110 | dp = shash_find_data(&dp_netdevs, name); |
1111 | if (!dp) { | |
5279f8fd | 1112 | error = create ? create_dp_netdev(name, class, &dp) : ENODEV; |
72865317 | 1113 | } else { |
5279f8fd BP |
1114 | error = (dp->class != class ? EINVAL |
1115 | : create ? EEXIST | |
1116 | : 0); | |
1117 | } | |
1118 | if (!error) { | |
1119 | *dpifp = create_dpif_netdev(dp); | |
6b31e073 | 1120 | dp->dpif = *dpifp; |
72865317 | 1121 | } |
97be1538 | 1122 | ovs_mutex_unlock(&dp_netdev_mutex); |
462278db | 1123 | |
5279f8fd | 1124 | return error; |
72865317 BP |
1125 | } |
1126 | ||
88ace79b DDP |
1127 | static void |
1128 | dp_netdev_destroy_upcall_lock(struct dp_netdev *dp) | |
1129 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
1130 | { | |
1131 | /* Check that upcalls are disabled, i.e. that the rwlock is taken */ | |
1132 | ovs_assert(fat_rwlock_tryrdlock(&dp->upcall_rwlock)); | |
1133 | ||
1134 | /* Before freeing a lock we should release it */ | |
1135 | fat_rwlock_unlock(&dp->upcall_rwlock); | |
1136 | fat_rwlock_destroy(&dp->upcall_rwlock); | |
1137 | } | |
1138 | ||
8a4e3a85 BP |
1139 | /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp' |
1140 | * through the 'dp_netdevs' shash while freeing 'dp'. */ | |
1ba530f4 BP |
1141 | static void |
1142 | dp_netdev_free(struct dp_netdev *dp) | |
8a4e3a85 | 1143 | OVS_REQUIRES(dp_netdev_mutex) |
1ba530f4 | 1144 | { |
e9985d6a | 1145 | struct dp_netdev_port *port, *next; |
4ad28026 | 1146 | |
8a4e3a85 BP |
1147 | shash_find_and_delete(&dp_netdevs, dp->name); |
1148 | ||
59e6d833 | 1149 | ovs_mutex_lock(&dp->port_mutex); |
e9985d6a | 1150 | HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) { |
c40b890f | 1151 | do_del_port(dp, port); |
1ba530f4 | 1152 | } |
59e6d833 | 1153 | ovs_mutex_unlock(&dp->port_mutex); |
e32971b8 | 1154 | dp_netdev_destroy_all_pmds(dp, true); |
d916785c | 1155 | cmap_destroy(&dp->poll_threads); |
51852a57 | 1156 | |
b9584f21 DDP |
1157 | ovs_mutex_destroy(&dp->non_pmd_mutex); |
1158 | ovsthread_key_delete(dp->per_pmd_key); | |
1159 | ||
1160 | conntrack_destroy(&dp->conntrack); | |
1161 | ||
1162 | ||
a6a426d6 IM |
1163 | seq_destroy(dp->reconfigure_seq); |
1164 | ||
d33ed218 | 1165 | seq_destroy(dp->port_seq); |
e9985d6a | 1166 | hmap_destroy(&dp->ports); |
3186ea46 | 1167 | ovs_mutex_destroy(&dp->port_mutex); |
88ace79b DDP |
1168 | |
1169 | /* Upcalls must be disabled at this point */ | |
1170 | dp_netdev_destroy_upcall_lock(dp); | |
9bbf1c3d | 1171 | |
f2eee189 | 1172 | free(dp->pmd_cmask); |
8a4e3a85 | 1173 | free(CONST_CAST(char *, dp->name)); |
72865317 BP |
1174 | free(dp); |
1175 | } | |
1176 | ||
8a4e3a85 BP |
1177 | static void |
1178 | dp_netdev_unref(struct dp_netdev *dp) | |
1179 | { | |
1180 | if (dp) { | |
1181 | /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't | |
1182 | * get a new reference to 'dp' through the 'dp_netdevs' shash. */ | |
1183 | ovs_mutex_lock(&dp_netdev_mutex); | |
24f83812 | 1184 | if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) { |
8a4e3a85 BP |
1185 | dp_netdev_free(dp); |
1186 | } | |
1187 | ovs_mutex_unlock(&dp_netdev_mutex); | |
1188 | } | |
1189 | } | |
1190 | ||
72865317 BP |
1191 | static void |
1192 | dpif_netdev_close(struct dpif *dpif) | |
1193 | { | |
1194 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5279f8fd | 1195 | |
8a4e3a85 | 1196 | dp_netdev_unref(dp); |
72865317 BP |
1197 | free(dpif); |
1198 | } | |
1199 | ||
1200 | static int | |
7dab847a | 1201 | dpif_netdev_destroy(struct dpif *dpif) |
72865317 BP |
1202 | { |
1203 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5279f8fd | 1204 | |
6a8267c5 | 1205 | if (!atomic_flag_test_and_set(&dp->destroyed)) { |
24f83812 | 1206 | if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) { |
6a8267c5 BP |
1207 | /* Can't happen: 'dpif' still owns a reference to 'dp'. */ |
1208 | OVS_NOT_REACHED(); | |
1209 | } | |
1210 | } | |
5279f8fd | 1211 | |
72865317 BP |
1212 | return 0; |
1213 | } | |
1214 | ||
eb94da30 DDP |
1215 | /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed |
1216 | * load/store semantics. While the increment is not atomic, the load and | |
1217 | * store operations are, making it impossible to read inconsistent values. | |
1218 | * | |
1219 | * This is used to update thread local stats counters. */ | |
1220 | static void | |
1221 | non_atomic_ullong_add(atomic_ullong *var, unsigned long long n) | |
1222 | { | |
1223 | unsigned long long tmp; | |
1224 | ||
1225 | atomic_read_relaxed(var, &tmp); | |
1226 | tmp += n; | |
1227 | atomic_store_relaxed(var, tmp); | |
1228 | } | |
1229 | ||
72865317 | 1230 | static int |
a8d9304d | 1231 | dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) |
72865317 BP |
1232 | { |
1233 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
1c1e46ed | 1234 | struct dp_netdev_pmd_thread *pmd; |
8a4e3a85 | 1235 | |
1c1e46ed AW |
1236 | stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0; |
1237 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
eb94da30 | 1238 | unsigned long long n; |
1c1e46ed | 1239 | stats->n_flows += cmap_count(&pmd->flow_table); |
eb94da30 | 1240 | |
abcf3ef4 DDP |
1241 | atomic_read_relaxed(&pmd->stats.n[DP_STAT_MASKED_HIT], &n); |
1242 | stats->n_hit += n; | |
1243 | atomic_read_relaxed(&pmd->stats.n[DP_STAT_EXACT_HIT], &n); | |
eb94da30 DDP |
1244 | stats->n_hit += n; |
1245 | atomic_read_relaxed(&pmd->stats.n[DP_STAT_MISS], &n); | |
1246 | stats->n_missed += n; | |
1247 | atomic_read_relaxed(&pmd->stats.n[DP_STAT_LOST], &n); | |
1248 | stats->n_lost += n; | |
51852a57 | 1249 | } |
1ce3fa06 | 1250 | stats->n_masks = UINT32_MAX; |
847108dc | 1251 | stats->n_mask_hit = UINT64_MAX; |
5279f8fd | 1252 | |
72865317 BP |
1253 | return 0; |
1254 | } | |
1255 | ||
e4cfed38 | 1256 | static void |
65f13b50 | 1257 | dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd) |
e4cfed38 | 1258 | { |
accf8626 | 1259 | if (pmd->core_id == NON_PMD_CORE_ID) { |
d0cca6c3 DDP |
1260 | ovs_mutex_lock(&pmd->dp->non_pmd_mutex); |
1261 | ovs_mutex_lock(&pmd->port_mutex); | |
1262 | pmd_load_cached_ports(pmd); | |
1263 | ovs_mutex_unlock(&pmd->port_mutex); | |
1264 | ovs_mutex_unlock(&pmd->dp->non_pmd_mutex); | |
accf8626 AW |
1265 | return; |
1266 | } | |
1267 | ||
1268 | ovs_mutex_lock(&pmd->cond_mutex); | |
2788a1b1 | 1269 | seq_change(pmd->reload_seq); |
14e3e12a | 1270 | atomic_store_relaxed(&pmd->reload, true); |
accf8626 AW |
1271 | ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex); |
1272 | ovs_mutex_unlock(&pmd->cond_mutex); | |
65f13b50 | 1273 | } |
e4cfed38 | 1274 | |
59e6d833 BP |
1275 | static uint32_t |
1276 | hash_port_no(odp_port_t port_no) | |
1277 | { | |
1278 | return hash_int(odp_to_u32(port_no), 0); | |
1279 | } | |
1280 | ||
72865317 | 1281 | static int |
a3e8437a | 1282 | port_create(const char *devname, const char *type, |
b8d29252 | 1283 | odp_port_t port_no, struct dp_netdev_port **portp) |
72865317 | 1284 | { |
4b609110 | 1285 | struct netdev_saved_flags *sf; |
72865317 | 1286 | struct dp_netdev_port *port; |
2499a8ce | 1287 | enum netdev_flags flags; |
b8d29252 | 1288 | struct netdev *netdev; |
e32971b8 | 1289 | int error; |
72865317 | 1290 | |
b8d29252 | 1291 | *portp = NULL; |
72865317 BP |
1292 | |
1293 | /* Open and validate network device. */ | |
a3e8437a | 1294 | error = netdev_open(devname, type, &netdev); |
72865317 | 1295 | if (error) { |
b8d29252 | 1296 | return error; |
72865317 | 1297 | } |
72865317 BP |
1298 | /* XXX reject non-Ethernet devices */ |
1299 | ||
2499a8ce AC |
1300 | netdev_get_flags(netdev, &flags); |
1301 | if (flags & NETDEV_LOOPBACK) { | |
1302 | VLOG_ERR("%s: cannot add a loopback device", devname); | |
d17f4f08 | 1303 | error = EINVAL; |
b8d29252 | 1304 | goto out; |
2499a8ce AC |
1305 | } |
1306 | ||
e32971b8 DDP |
1307 | error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf); |
1308 | if (error) { | |
1309 | VLOG_ERR("%s: cannot set promisc flag", devname); | |
1310 | goto out; | |
324c8374 IM |
1311 | } |
1312 | ||
e4cfed38 | 1313 | port = xzalloc(sizeof *port); |
35303d71 | 1314 | port->port_no = port_no; |
e4cfed38 PS |
1315 | port->netdev = netdev; |
1316 | port->type = xstrdup(type); | |
4b609110 | 1317 | port->sf = sf; |
e32971b8 DDP |
1318 | port->need_reconfigure = true; |
1319 | ovs_mutex_init(&port->txq_used_mutex); | |
e4cfed38 | 1320 | |
b8d29252 | 1321 | *portp = port; |
72865317 BP |
1322 | |
1323 | return 0; | |
d17f4f08 | 1324 | |
d17f4f08 | 1325 | out: |
b8d29252 | 1326 | netdev_close(netdev); |
d17f4f08 | 1327 | return error; |
72865317 BP |
1328 | } |
1329 | ||
b8d29252 DDP |
1330 | static int |
1331 | do_add_port(struct dp_netdev *dp, const char *devname, const char *type, | |
1332 | odp_port_t port_no) | |
1333 | OVS_REQUIRES(dp->port_mutex) | |
1334 | { | |
1335 | struct dp_netdev_port *port; | |
1336 | int error; | |
1337 | ||
1338 | /* Reject devices already in 'dp'. */ | |
1339 | if (!get_port_by_name(dp, devname, &port)) { | |
1340 | return EEXIST; | |
1341 | } | |
1342 | ||
a3e8437a | 1343 | error = port_create(devname, type, port_no, &port); |
b8d29252 DDP |
1344 | if (error) { |
1345 | return error; | |
1346 | } | |
1347 | ||
e9985d6a | 1348 | hmap_insert(&dp->ports, &port->node, hash_port_no(port_no)); |
b8d29252 DDP |
1349 | seq_change(dp->port_seq); |
1350 | ||
e32971b8 DDP |
1351 | reconfigure_datapath(dp); |
1352 | ||
b8d29252 DDP |
1353 | return 0; |
1354 | } | |
1355 | ||
247527db BP |
1356 | static int |
1357 | dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev, | |
4e022ec0 | 1358 | odp_port_t *port_nop) |
247527db BP |
1359 | { |
1360 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
3aa30359 BP |
1361 | char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; |
1362 | const char *dpif_port; | |
4e022ec0 | 1363 | odp_port_t port_no; |
5279f8fd | 1364 | int error; |
247527db | 1365 | |
59e6d833 | 1366 | ovs_mutex_lock(&dp->port_mutex); |
3aa30359 | 1367 | dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); |
4e022ec0 | 1368 | if (*port_nop != ODPP_NONE) { |
ff073a71 BP |
1369 | port_no = *port_nop; |
1370 | error = dp_netdev_lookup_port(dp, *port_nop) ? EBUSY : 0; | |
232dfa4a | 1371 | } else { |
3aa30359 | 1372 | port_no = choose_port(dp, dpif_port); |
5279f8fd | 1373 | error = port_no == ODPP_NONE ? EFBIG : 0; |
232dfa4a | 1374 | } |
5279f8fd | 1375 | if (!error) { |
247527db | 1376 | *port_nop = port_no; |
5279f8fd | 1377 | error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no); |
247527db | 1378 | } |
59e6d833 | 1379 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd BP |
1380 | |
1381 | return error; | |
72865317 BP |
1382 | } |
1383 | ||
1384 | static int | |
4e022ec0 | 1385 | dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no) |
72865317 BP |
1386 | { |
1387 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5279f8fd BP |
1388 | int error; |
1389 | ||
59e6d833 | 1390 | ovs_mutex_lock(&dp->port_mutex); |
c40b890f BP |
1391 | if (port_no == ODPP_LOCAL) { |
1392 | error = EINVAL; | |
1393 | } else { | |
1394 | struct dp_netdev_port *port; | |
1395 | ||
1396 | error = get_port_by_number(dp, port_no, &port); | |
1397 | if (!error) { | |
1398 | do_del_port(dp, port); | |
1399 | } | |
1400 | } | |
59e6d833 | 1401 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd BP |
1402 | |
1403 | return error; | |
72865317 BP |
1404 | } |
1405 | ||
1406 | static bool | |
4e022ec0 | 1407 | is_valid_port_number(odp_port_t port_no) |
72865317 | 1408 | { |
ff073a71 BP |
1409 | return port_no != ODPP_NONE; |
1410 | } | |
1411 | ||
1412 | static struct dp_netdev_port * | |
1413 | dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no) | |
e9985d6a | 1414 | OVS_REQUIRES(dp->port_mutex) |
ff073a71 BP |
1415 | { |
1416 | struct dp_netdev_port *port; | |
1417 | ||
e9985d6a | 1418 | HMAP_FOR_EACH_WITH_HASH (port, node, hash_port_no(port_no), &dp->ports) { |
35303d71 | 1419 | if (port->port_no == port_no) { |
ff073a71 BP |
1420 | return port; |
1421 | } | |
1422 | } | |
1423 | return NULL; | |
72865317 BP |
1424 | } |
1425 | ||
1426 | static int | |
1427 | get_port_by_number(struct dp_netdev *dp, | |
4e022ec0 | 1428 | odp_port_t port_no, struct dp_netdev_port **portp) |
e9985d6a | 1429 | OVS_REQUIRES(dp->port_mutex) |
72865317 BP |
1430 | { |
1431 | if (!is_valid_port_number(port_no)) { | |
1432 | *portp = NULL; | |
1433 | return EINVAL; | |
1434 | } else { | |
ff073a71 | 1435 | *portp = dp_netdev_lookup_port(dp, port_no); |
0f6a066f | 1436 | return *portp ? 0 : ENODEV; |
72865317 BP |
1437 | } |
1438 | } | |
1439 | ||
b284085e | 1440 | static void |
62453dad | 1441 | port_destroy(struct dp_netdev_port *port) |
b284085e | 1442 | { |
62453dad DDP |
1443 | if (!port) { |
1444 | return; | |
b284085e | 1445 | } |
b284085e | 1446 | |
62453dad DDP |
1447 | netdev_close(port->netdev); |
1448 | netdev_restore_flags(port->sf); | |
accf8626 | 1449 | |
62453dad | 1450 | for (unsigned i = 0; i < port->n_rxq; i++) { |
947dc567 | 1451 | netdev_rxq_close(port->rxqs[i].rx); |
b284085e | 1452 | } |
324c8374 | 1453 | ovs_mutex_destroy(&port->txq_used_mutex); |
3eb67853 | 1454 | free(port->rxq_affinity_list); |
324c8374 | 1455 | free(port->txq_used); |
3eb67853 | 1456 | free(port->rxqs); |
62453dad DDP |
1457 | free(port->type); |
1458 | free(port); | |
b284085e PS |
1459 | } |
1460 | ||
72865317 BP |
1461 | static int |
1462 | get_port_by_name(struct dp_netdev *dp, | |
1463 | const char *devname, struct dp_netdev_port **portp) | |
59e6d833 | 1464 | OVS_REQUIRES(dp->port_mutex) |
72865317 BP |
1465 | { |
1466 | struct dp_netdev_port *port; | |
1467 | ||
e9985d6a | 1468 | HMAP_FOR_EACH (port, node, &dp->ports) { |
3efb6063 | 1469 | if (!strcmp(netdev_get_name(port->netdev), devname)) { |
72865317 BP |
1470 | *portp = port; |
1471 | return 0; | |
1472 | } | |
1473 | } | |
0f6a066f DDP |
1474 | |
1475 | /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non | |
1476 | * existing port. */ | |
1477 | return ENODEV; | |
72865317 BP |
1478 | } |
1479 | ||
b9584f21 | 1480 | /* Returns 'true' if there is a port with pmd netdev. */ |
65f13b50 | 1481 | static bool |
b9584f21 | 1482 | has_pmd_port(struct dp_netdev *dp) |
e9985d6a | 1483 | OVS_REQUIRES(dp->port_mutex) |
65f13b50 AW |
1484 | { |
1485 | struct dp_netdev_port *port; | |
1486 | ||
e9985d6a | 1487 | HMAP_FOR_EACH (port, node, &dp->ports) { |
5dd57e80 | 1488 | if (netdev_is_pmd(port->netdev)) { |
b9584f21 | 1489 | return true; |
65f13b50 AW |
1490 | } |
1491 | } | |
1492 | ||
1493 | return false; | |
1494 | } | |
1495 | ||
c40b890f BP |
1496 | static void |
1497 | do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port) | |
59e6d833 | 1498 | OVS_REQUIRES(dp->port_mutex) |
72865317 | 1499 | { |
e9985d6a | 1500 | hmap_remove(&dp->ports, &port->node); |
d33ed218 | 1501 | seq_change(dp->port_seq); |
d0cca6c3 | 1502 | |
e32971b8 | 1503 | reconfigure_datapath(dp); |
72865317 | 1504 | |
62453dad | 1505 | port_destroy(port); |
72865317 BP |
1506 | } |
1507 | ||
1508 | static void | |
4c738a8d BP |
1509 | answer_port_query(const struct dp_netdev_port *port, |
1510 | struct dpif_port *dpif_port) | |
72865317 | 1511 | { |
3efb6063 | 1512 | dpif_port->name = xstrdup(netdev_get_name(port->netdev)); |
0cbfe35d | 1513 | dpif_port->type = xstrdup(port->type); |
35303d71 | 1514 | dpif_port->port_no = port->port_no; |
72865317 BP |
1515 | } |
1516 | ||
1517 | static int | |
4e022ec0 | 1518 | dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no, |
4c738a8d | 1519 | struct dpif_port *dpif_port) |
72865317 BP |
1520 | { |
1521 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
1522 | struct dp_netdev_port *port; | |
1523 | int error; | |
1524 | ||
e9985d6a | 1525 | ovs_mutex_lock(&dp->port_mutex); |
72865317 | 1526 | error = get_port_by_number(dp, port_no, &port); |
4afba28d | 1527 | if (!error && dpif_port) { |
4c738a8d | 1528 | answer_port_query(port, dpif_port); |
72865317 | 1529 | } |
e9985d6a | 1530 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd | 1531 | |
72865317 BP |
1532 | return error; |
1533 | } | |
1534 | ||
1535 | static int | |
1536 | dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname, | |
4c738a8d | 1537 | struct dpif_port *dpif_port) |
72865317 BP |
1538 | { |
1539 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
1540 | struct dp_netdev_port *port; | |
1541 | int error; | |
1542 | ||
59e6d833 | 1543 | ovs_mutex_lock(&dp->port_mutex); |
72865317 | 1544 | error = get_port_by_name(dp, devname, &port); |
4afba28d | 1545 | if (!error && dpif_port) { |
4c738a8d | 1546 | answer_port_query(port, dpif_port); |
72865317 | 1547 | } |
59e6d833 | 1548 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd | 1549 | |
72865317 BP |
1550 | return error; |
1551 | } | |
1552 | ||
61e7deb1 BP |
1553 | static void |
1554 | dp_netdev_flow_free(struct dp_netdev_flow *flow) | |
1555 | { | |
61e7deb1 | 1556 | dp_netdev_actions_free(dp_netdev_flow_get_actions(flow)); |
61e7deb1 BP |
1557 | free(flow); |
1558 | } | |
1559 | ||
ed79f89a DDP |
1560 | static void dp_netdev_flow_unref(struct dp_netdev_flow *flow) |
1561 | { | |
1562 | if (ovs_refcount_unref_relaxed(&flow->ref_cnt) == 1) { | |
1563 | ovsrcu_postpone(dp_netdev_flow_free, flow); | |
1564 | } | |
1565 | } | |
1566 | ||
70e5ed6f JS |
1567 | static uint32_t |
1568 | dp_netdev_flow_hash(const ovs_u128 *ufid) | |
1569 | { | |
1570 | return ufid->u32[0]; | |
1571 | } | |
1572 | ||
3453b4d6 JS |
1573 | static inline struct dpcls * |
1574 | dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread *pmd, | |
1575 | odp_port_t in_port) | |
1576 | { | |
1577 | struct dpcls *cls; | |
1578 | uint32_t hash = hash_port_no(in_port); | |
1579 | CMAP_FOR_EACH_WITH_HASH (cls, node, hash, &pmd->classifiers) { | |
1580 | if (cls->in_port == in_port) { | |
1581 | /* Port classifier exists already */ | |
1582 | return cls; | |
1583 | } | |
1584 | } | |
1585 | return NULL; | |
1586 | } | |
1587 | ||
1588 | static inline struct dpcls * | |
1589 | dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread *pmd, | |
1590 | odp_port_t in_port) | |
1591 | OVS_REQUIRES(pmd->flow_mutex) | |
1592 | { | |
1593 | struct dpcls *cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); | |
1594 | uint32_t hash = hash_port_no(in_port); | |
1595 | ||
1596 | if (!cls) { | |
1597 | /* Create new classifier for in_port */ | |
1598 | cls = xmalloc(sizeof(*cls)); | |
1599 | dpcls_init(cls); | |
1600 | cls->in_port = in_port; | |
1601 | cmap_insert(&pmd->classifiers, &cls->node, hash); | |
1602 | VLOG_DBG("Creating dpcls %p for in_port %d", cls, in_port); | |
1603 | } | |
1604 | return cls; | |
1605 | } | |
1606 | ||
72865317 | 1607 | static void |
1c1e46ed AW |
1608 | dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd, |
1609 | struct dp_netdev_flow *flow) | |
1610 | OVS_REQUIRES(pmd->flow_mutex) | |
72865317 | 1611 | { |
9f361d6b | 1612 | struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node); |
3453b4d6 JS |
1613 | struct dpcls *cls; |
1614 | odp_port_t in_port = flow->flow.in_port.odp_port; | |
2c0ea78f | 1615 | |
3453b4d6 JS |
1616 | cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); |
1617 | ovs_assert(cls != NULL); | |
1618 | dpcls_remove(cls, &flow->cr); | |
1c1e46ed | 1619 | cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid)); |
9bbf1c3d | 1620 | flow->dead = true; |
ed79f89a DDP |
1621 | |
1622 | dp_netdev_flow_unref(flow); | |
72865317 BP |
1623 | } |
1624 | ||
1625 | static void | |
1c1e46ed | 1626 | dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd) |
72865317 | 1627 | { |
78c8df12 | 1628 | struct dp_netdev_flow *netdev_flow; |
72865317 | 1629 | |
1c1e46ed AW |
1630 | ovs_mutex_lock(&pmd->flow_mutex); |
1631 | CMAP_FOR_EACH (netdev_flow, node, &pmd->flow_table) { | |
1632 | dp_netdev_pmd_remove_flow(pmd, netdev_flow); | |
72865317 | 1633 | } |
1c1e46ed | 1634 | ovs_mutex_unlock(&pmd->flow_mutex); |
72865317 BP |
1635 | } |
1636 | ||
1637 | static int | |
1638 | dpif_netdev_flow_flush(struct dpif *dpif) | |
1639 | { | |
1640 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
1c1e46ed AW |
1641 | struct dp_netdev_pmd_thread *pmd; |
1642 | ||
1643 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
1644 | dp_netdev_pmd_flow_flush(pmd); | |
1645 | } | |
5279f8fd | 1646 | |
72865317 BP |
1647 | return 0; |
1648 | } | |
1649 | ||
b0ec0f27 | 1650 | struct dp_netdev_port_state { |
e9985d6a | 1651 | struct hmap_position position; |
4c738a8d | 1652 | char *name; |
b0ec0f27 BP |
1653 | }; |
1654 | ||
1655 | static int | |
1656 | dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep) | |
1657 | { | |
1658 | *statep = xzalloc(sizeof(struct dp_netdev_port_state)); | |
1659 | return 0; | |
1660 | } | |
1661 | ||
72865317 | 1662 | static int |
b0ec0f27 | 1663 | dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_, |
4c738a8d | 1664 | struct dpif_port *dpif_port) |
72865317 | 1665 | { |
b0ec0f27 | 1666 | struct dp_netdev_port_state *state = state_; |
72865317 | 1667 | struct dp_netdev *dp = get_dp_netdev(dpif); |
e9985d6a | 1668 | struct hmap_node *node; |
ff073a71 | 1669 | int retval; |
72865317 | 1670 | |
e9985d6a DDP |
1671 | ovs_mutex_lock(&dp->port_mutex); |
1672 | node = hmap_at_position(&dp->ports, &state->position); | |
ff073a71 BP |
1673 | if (node) { |
1674 | struct dp_netdev_port *port; | |
5279f8fd | 1675 | |
ff073a71 BP |
1676 | port = CONTAINER_OF(node, struct dp_netdev_port, node); |
1677 | ||
1678 | free(state->name); | |
1679 | state->name = xstrdup(netdev_get_name(port->netdev)); | |
1680 | dpif_port->name = state->name; | |
1681 | dpif_port->type = port->type; | |
35303d71 | 1682 | dpif_port->port_no = port->port_no; |
ff073a71 BP |
1683 | |
1684 | retval = 0; | |
1685 | } else { | |
1686 | retval = EOF; | |
72865317 | 1687 | } |
e9985d6a | 1688 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd | 1689 | |
ff073a71 | 1690 | return retval; |
b0ec0f27 BP |
1691 | } |
1692 | ||
1693 | static int | |
4c738a8d | 1694 | dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_) |
b0ec0f27 | 1695 | { |
4c738a8d BP |
1696 | struct dp_netdev_port_state *state = state_; |
1697 | free(state->name); | |
b0ec0f27 BP |
1698 | free(state); |
1699 | return 0; | |
72865317 BP |
1700 | } |
1701 | ||
1702 | static int | |
67a4917b | 1703 | dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED) |
72865317 BP |
1704 | { |
1705 | struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); | |
d33ed218 | 1706 | uint64_t new_port_seq; |
5279f8fd BP |
1707 | int error; |
1708 | ||
d33ed218 BP |
1709 | new_port_seq = seq_read(dpif->dp->port_seq); |
1710 | if (dpif->last_port_seq != new_port_seq) { | |
1711 | dpif->last_port_seq = new_port_seq; | |
5279f8fd | 1712 | error = ENOBUFS; |
72865317 | 1713 | } else { |
5279f8fd | 1714 | error = EAGAIN; |
72865317 | 1715 | } |
5279f8fd BP |
1716 | |
1717 | return error; | |
72865317 BP |
1718 | } |
1719 | ||
1720 | static void | |
1721 | dpif_netdev_port_poll_wait(const struct dpif *dpif_) | |
1722 | { | |
1723 | struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); | |
5279f8fd | 1724 | |
d33ed218 | 1725 | seq_wait(dpif->dp->port_seq, dpif->last_port_seq); |
8a4e3a85 BP |
1726 | } |
1727 | ||
1728 | static struct dp_netdev_flow * | |
0de8783a | 1729 | dp_netdev_flow_cast(const struct dpcls_rule *cr) |
8a4e3a85 BP |
1730 | { |
1731 | return cr ? CONTAINER_OF(cr, struct dp_netdev_flow, cr) : NULL; | |
72865317 BP |
1732 | } |
1733 | ||
9bbf1c3d DDP |
1734 | static bool dp_netdev_flow_ref(struct dp_netdev_flow *flow) |
1735 | { | |
1736 | return ovs_refcount_try_ref_rcu(&flow->ref_cnt); | |
1737 | } | |
1738 | ||
79df317f DDP |
1739 | /* netdev_flow_key utilities. |
1740 | * | |
1741 | * netdev_flow_key is basically a miniflow. We use these functions | |
1742 | * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow | |
1743 | * functions (miniflow_clone_inline, miniflow_equal, ...), because: | |
1744 | * | |
1745 | * - Since we are dealing exclusively with miniflows created by | |
1746 | * miniflow_extract(), if the map is different the miniflow is different. | |
1747 | * Therefore we can be faster by comparing the map and the miniflow in a | |
1748 | * single memcmp(). | |
5fcff47b | 1749 | * - These functions can be inlined by the compiler. */ |
79df317f | 1750 | |
361d808d | 1751 | /* Given the number of bits set in miniflow's maps, returns the size of the |
caeb4906 | 1752 | * 'netdev_flow_key.mf' */ |
361d808d JR |
1753 | static inline size_t |
1754 | netdev_flow_key_size(size_t flow_u64s) | |
79df317f | 1755 | { |
361d808d | 1756 | return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s); |
79df317f DDP |
1757 | } |
1758 | ||
79df317f DDP |
1759 | static inline bool |
1760 | netdev_flow_key_equal(const struct netdev_flow_key *a, | |
0de8783a JR |
1761 | const struct netdev_flow_key *b) |
1762 | { | |
caeb4906 JR |
1763 | /* 'b->len' may be not set yet. */ |
1764 | return a->hash == b->hash && !memcmp(&a->mf, &b->mf, a->len); | |
0de8783a JR |
1765 | } |
1766 | ||
1767 | /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow. | |
d79a39fe | 1768 | * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been |
0de8783a JR |
1769 | * generated by miniflow_extract. */ |
1770 | static inline bool | |
1771 | netdev_flow_key_equal_mf(const struct netdev_flow_key *key, | |
1772 | const struct miniflow *mf) | |
79df317f | 1773 | { |
caeb4906 | 1774 | return !memcmp(&key->mf, mf, key->len); |
79df317f DDP |
1775 | } |
1776 | ||
1777 | static inline void | |
1778 | netdev_flow_key_clone(struct netdev_flow_key *dst, | |
0de8783a JR |
1779 | const struct netdev_flow_key *src) |
1780 | { | |
caeb4906 JR |
1781 | memcpy(dst, src, |
1782 | offsetof(struct netdev_flow_key, mf) + src->len); | |
0de8783a JR |
1783 | } |
1784 | ||
1785 | /* Slow. */ | |
1786 | static void | |
1787 | netdev_flow_key_from_flow(struct netdev_flow_key *dst, | |
1788 | const struct flow *src) | |
1789 | { | |
cf62fa4c | 1790 | struct dp_packet packet; |
0de8783a | 1791 | uint64_t buf_stub[512 / 8]; |
0de8783a | 1792 | |
cf62fa4c PS |
1793 | dp_packet_use_stub(&packet, buf_stub, sizeof buf_stub); |
1794 | pkt_metadata_from_flow(&packet.md, src); | |
0de8783a | 1795 | flow_compose(&packet, src); |
cf62fa4c PS |
1796 | miniflow_extract(&packet, &dst->mf); |
1797 | dp_packet_uninit(&packet); | |
0de8783a | 1798 | |
361d808d | 1799 | dst->len = netdev_flow_key_size(miniflow_n_values(&dst->mf)); |
0de8783a JR |
1800 | dst->hash = 0; /* Not computed yet. */ |
1801 | } | |
1802 | ||
1803 | /* Initialize a netdev_flow_key 'mask' from 'match'. */ | |
1804 | static inline void | |
1805 | netdev_flow_mask_init(struct netdev_flow_key *mask, | |
1806 | const struct match *match) | |
1807 | { | |
09b0fa9c | 1808 | uint64_t *dst = miniflow_values(&mask->mf); |
5fcff47b | 1809 | struct flowmap fmap; |
0de8783a | 1810 | uint32_t hash = 0; |
5fcff47b | 1811 | size_t idx; |
0de8783a JR |
1812 | |
1813 | /* Only check masks that make sense for the flow. */ | |
5fcff47b JR |
1814 | flow_wc_map(&match->flow, &fmap); |
1815 | flowmap_init(&mask->mf.map); | |
0de8783a | 1816 | |
5fcff47b JR |
1817 | FLOWMAP_FOR_EACH_INDEX(idx, fmap) { |
1818 | uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx); | |
0de8783a | 1819 | |
5fcff47b JR |
1820 | if (mask_u64) { |
1821 | flowmap_set(&mask->mf.map, idx, 1); | |
1822 | *dst++ = mask_u64; | |
1823 | hash = hash_add64(hash, mask_u64); | |
0de8783a | 1824 | } |
0de8783a JR |
1825 | } |
1826 | ||
5fcff47b | 1827 | map_t map; |
0de8783a | 1828 | |
5fcff47b JR |
1829 | FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) { |
1830 | hash = hash_add64(hash, map); | |
1831 | } | |
0de8783a | 1832 | |
5fcff47b | 1833 | size_t n = dst - miniflow_get_values(&mask->mf); |
0de8783a | 1834 | |
d70e8c28 | 1835 | mask->hash = hash_finish(hash, n * 8); |
0de8783a JR |
1836 | mask->len = netdev_flow_key_size(n); |
1837 | } | |
1838 | ||
361d808d | 1839 | /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */ |
0de8783a JR |
1840 | static inline void |
1841 | netdev_flow_key_init_masked(struct netdev_flow_key *dst, | |
1842 | const struct flow *flow, | |
1843 | const struct netdev_flow_key *mask) | |
79df317f | 1844 | { |
09b0fa9c JR |
1845 | uint64_t *dst_u64 = miniflow_values(&dst->mf); |
1846 | const uint64_t *mask_u64 = miniflow_get_values(&mask->mf); | |
0de8783a | 1847 | uint32_t hash = 0; |
d70e8c28 | 1848 | uint64_t value; |
0de8783a JR |
1849 | |
1850 | dst->len = mask->len; | |
361d808d | 1851 | dst->mf = mask->mf; /* Copy maps. */ |
0de8783a | 1852 | |
5fcff47b | 1853 | FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) { |
d70e8c28 JR |
1854 | *dst_u64 = value & *mask_u64++; |
1855 | hash = hash_add64(hash, *dst_u64++); | |
0de8783a | 1856 | } |
09b0fa9c JR |
1857 | dst->hash = hash_finish(hash, |
1858 | (dst_u64 - miniflow_get_values(&dst->mf)) * 8); | |
0de8783a JR |
1859 | } |
1860 | ||
5fcff47b JR |
1861 | /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */ |
1862 | #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \ | |
1863 | MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP) | |
0de8783a JR |
1864 | |
1865 | /* Returns a hash value for the bits of 'key' where there are 1-bits in | |
1866 | * 'mask'. */ | |
1867 | static inline uint32_t | |
1868 | netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key, | |
1869 | const struct netdev_flow_key *mask) | |
1870 | { | |
09b0fa9c | 1871 | const uint64_t *p = miniflow_get_values(&mask->mf); |
0de8783a | 1872 | uint32_t hash = 0; |
5fcff47b | 1873 | uint64_t value; |
0de8783a | 1874 | |
5fcff47b JR |
1875 | NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) { |
1876 | hash = hash_add64(hash, value & *p++); | |
0de8783a JR |
1877 | } |
1878 | ||
09b0fa9c | 1879 | return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8); |
79df317f DDP |
1880 | } |
1881 | ||
9bbf1c3d DDP |
1882 | static inline bool |
1883 | emc_entry_alive(struct emc_entry *ce) | |
1884 | { | |
1885 | return ce->flow && !ce->flow->dead; | |
1886 | } | |
1887 | ||
1888 | static void | |
1889 | emc_clear_entry(struct emc_entry *ce) | |
1890 | { | |
1891 | if (ce->flow) { | |
1892 | dp_netdev_flow_unref(ce->flow); | |
1893 | ce->flow = NULL; | |
1894 | } | |
1895 | } | |
1896 | ||
1897 | static inline void | |
1898 | emc_change_entry(struct emc_entry *ce, struct dp_netdev_flow *flow, | |
0de8783a | 1899 | const struct netdev_flow_key *key) |
9bbf1c3d DDP |
1900 | { |
1901 | if (ce->flow != flow) { | |
1902 | if (ce->flow) { | |
1903 | dp_netdev_flow_unref(ce->flow); | |
1904 | } | |
1905 | ||
1906 | if (dp_netdev_flow_ref(flow)) { | |
1907 | ce->flow = flow; | |
1908 | } else { | |
1909 | ce->flow = NULL; | |
1910 | } | |
1911 | } | |
0de8783a JR |
1912 | if (key) { |
1913 | netdev_flow_key_clone(&ce->key, key); | |
9bbf1c3d DDP |
1914 | } |
1915 | } | |
1916 | ||
1917 | static inline void | |
0de8783a | 1918 | emc_insert(struct emc_cache *cache, const struct netdev_flow_key *key, |
9bbf1c3d DDP |
1919 | struct dp_netdev_flow *flow) |
1920 | { | |
1921 | struct emc_entry *to_be_replaced = NULL; | |
1922 | struct emc_entry *current_entry; | |
1923 | ||
0de8783a JR |
1924 | EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) { |
1925 | if (netdev_flow_key_equal(¤t_entry->key, key)) { | |
9bbf1c3d | 1926 | /* We found the entry with the 'mf' miniflow */ |
0de8783a | 1927 | emc_change_entry(current_entry, flow, NULL); |
9bbf1c3d DDP |
1928 | return; |
1929 | } | |
1930 | ||
1931 | /* Replacement policy: put the flow in an empty (not alive) entry, or | |
1932 | * in the first entry where it can be */ | |
1933 | if (!to_be_replaced | |
1934 | || (emc_entry_alive(to_be_replaced) | |
1935 | && !emc_entry_alive(current_entry)) | |
0de8783a | 1936 | || current_entry->key.hash < to_be_replaced->key.hash) { |
9bbf1c3d DDP |
1937 | to_be_replaced = current_entry; |
1938 | } | |
1939 | } | |
1940 | /* We didn't find the miniflow in the cache. | |
1941 | * The 'to_be_replaced' entry is where the new flow will be stored */ | |
1942 | ||
0de8783a | 1943 | emc_change_entry(to_be_replaced, flow, key); |
9bbf1c3d DDP |
1944 | } |
1945 | ||
1946 | static inline struct dp_netdev_flow * | |
0de8783a | 1947 | emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key) |
9bbf1c3d DDP |
1948 | { |
1949 | struct emc_entry *current_entry; | |
1950 | ||
0de8783a JR |
1951 | EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) { |
1952 | if (current_entry->key.hash == key->hash | |
1953 | && emc_entry_alive(current_entry) | |
1954 | && netdev_flow_key_equal_mf(¤t_entry->key, &key->mf)) { | |
9bbf1c3d | 1955 | |
0de8783a | 1956 | /* We found the entry with the 'key->mf' miniflow */ |
9bbf1c3d DDP |
1957 | return current_entry->flow; |
1958 | } | |
1959 | } | |
1960 | ||
1961 | return NULL; | |
1962 | } | |
1963 | ||
72865317 | 1964 | static struct dp_netdev_flow * |
3453b4d6 JS |
1965 | dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread *pmd, |
1966 | const struct netdev_flow_key *key, | |
1967 | int *lookup_num_p) | |
2c0ea78f | 1968 | { |
3453b4d6 | 1969 | struct dpcls *cls; |
0de8783a | 1970 | struct dpcls_rule *rule; |
3453b4d6 JS |
1971 | odp_port_t in_port = u32_to_odp(MINIFLOW_GET_U32(&key->mf, in_port)); |
1972 | struct dp_netdev_flow *netdev_flow = NULL; | |
2c0ea78f | 1973 | |
3453b4d6 JS |
1974 | cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); |
1975 | if (OVS_LIKELY(cls)) { | |
1976 | dpcls_lookup(cls, key, &rule, 1, lookup_num_p); | |
1977 | netdev_flow = dp_netdev_flow_cast(rule); | |
1978 | } | |
8a4e3a85 | 1979 | return netdev_flow; |
2c0ea78f GS |
1980 | } |
1981 | ||
1982 | static struct dp_netdev_flow * | |
1c1e46ed AW |
1983 | dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd, |
1984 | const ovs_u128 *ufidp, const struct nlattr *key, | |
1985 | size_t key_len) | |
72865317 | 1986 | { |
1763b4b8 | 1987 | struct dp_netdev_flow *netdev_flow; |
70e5ed6f JS |
1988 | struct flow flow; |
1989 | ovs_u128 ufid; | |
1990 | ||
1991 | /* If a UFID is not provided, determine one based on the key. */ | |
1992 | if (!ufidp && key && key_len | |
1993 | && !dpif_netdev_flow_from_nlattrs(key, key_len, &flow)) { | |
1c1e46ed | 1994 | dpif_flow_hash(pmd->dp->dpif, &flow, sizeof flow, &ufid); |
70e5ed6f JS |
1995 | ufidp = &ufid; |
1996 | } | |
72865317 | 1997 | |
70e5ed6f JS |
1998 | if (ufidp) { |
1999 | CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp), | |
1c1e46ed | 2000 | &pmd->flow_table) { |
2ff8484b | 2001 | if (ovs_u128_equals(netdev_flow->ufid, *ufidp)) { |
70e5ed6f JS |
2002 | return netdev_flow; |
2003 | } | |
72865317 BP |
2004 | } |
2005 | } | |
8a4e3a85 | 2006 | |
72865317 BP |
2007 | return NULL; |
2008 | } | |
2009 | ||
2010 | static void | |
eb94da30 | 2011 | get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_, |
1763b4b8 | 2012 | struct dpif_flow_stats *stats) |
feebdea2 | 2013 | { |
eb94da30 DDP |
2014 | struct dp_netdev_flow *netdev_flow; |
2015 | unsigned long long n; | |
2016 | long long used; | |
2017 | uint16_t flags; | |
2018 | ||
2019 | netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_); | |
2020 | ||
2021 | atomic_read_relaxed(&netdev_flow->stats.packet_count, &n); | |
2022 | stats->n_packets = n; | |
2023 | atomic_read_relaxed(&netdev_flow->stats.byte_count, &n); | |
2024 | stats->n_bytes = n; | |
2025 | atomic_read_relaxed(&netdev_flow->stats.used, &used); | |
2026 | stats->used = used; | |
2027 | atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); | |
2028 | stats->tcp_flags = flags; | |
72865317 BP |
2029 | } |
2030 | ||
7af12bd7 JS |
2031 | /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for |
2032 | * storing the netlink-formatted key/mask. 'key_buf' may be the same as | |
2033 | * 'mask_buf'. Actions will be returned without copying, by relying on RCU to | |
2034 | * protect them. */ | |
6fe09f8c | 2035 | static void |
70e5ed6f | 2036 | dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow, |
7af12bd7 | 2037 | struct ofpbuf *key_buf, struct ofpbuf *mask_buf, |
64bb477f | 2038 | struct dpif_flow *flow, bool terse) |
6fe09f8c | 2039 | { |
64bb477f JS |
2040 | if (terse) { |
2041 | memset(flow, 0, sizeof *flow); | |
2042 | } else { | |
2043 | struct flow_wildcards wc; | |
2044 | struct dp_netdev_actions *actions; | |
2045 | size_t offset; | |
5262eea1 JG |
2046 | struct odp_flow_key_parms odp_parms = { |
2047 | .flow = &netdev_flow->flow, | |
2048 | .mask = &wc.masks, | |
2494ccd7 | 2049 | .support = dp_netdev_support, |
5262eea1 | 2050 | }; |
64bb477f JS |
2051 | |
2052 | miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks); | |
f4b835bb JR |
2053 | /* in_port is exact matched, but we have left it out from the mask for |
2054 | * optimnization reasons. Add in_port back to the mask. */ | |
2055 | wc.masks.in_port.odp_port = ODPP_NONE; | |
64bb477f JS |
2056 | |
2057 | /* Key */ | |
6fd6ed71 | 2058 | offset = key_buf->size; |
64bb477f | 2059 | flow->key = ofpbuf_tail(key_buf); |
5262eea1 | 2060 | odp_flow_key_from_flow(&odp_parms, key_buf); |
6fd6ed71 | 2061 | flow->key_len = key_buf->size - offset; |
64bb477f JS |
2062 | |
2063 | /* Mask */ | |
6fd6ed71 | 2064 | offset = mask_buf->size; |
64bb477f | 2065 | flow->mask = ofpbuf_tail(mask_buf); |
ec1f6f32 | 2066 | odp_parms.key_buf = key_buf; |
5262eea1 | 2067 | odp_flow_key_from_mask(&odp_parms, mask_buf); |
6fd6ed71 | 2068 | flow->mask_len = mask_buf->size - offset; |
64bb477f JS |
2069 | |
2070 | /* Actions */ | |
2071 | actions = dp_netdev_flow_get_actions(netdev_flow); | |
2072 | flow->actions = actions->actions; | |
2073 | flow->actions_len = actions->size; | |
2074 | } | |
6fe09f8c | 2075 | |
70e5ed6f JS |
2076 | flow->ufid = netdev_flow->ufid; |
2077 | flow->ufid_present = true; | |
1c1e46ed | 2078 | flow->pmd_id = netdev_flow->pmd_id; |
6fe09f8c JS |
2079 | get_dpif_flow_stats(netdev_flow, &flow->stats); |
2080 | } | |
2081 | ||
36956a7d | 2082 | static int |
8c301900 JR |
2083 | dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len, |
2084 | const struct nlattr *mask_key, | |
2085 | uint32_t mask_key_len, const struct flow *flow, | |
9f861c91 | 2086 | struct flow_wildcards *wc) |
8c301900 | 2087 | { |
ca8d3442 DDP |
2088 | enum odp_key_fitness fitness; |
2089 | ||
8d8ab6c2 | 2090 | fitness = odp_flow_key_to_mask(mask_key, mask_key_len, wc, flow); |
ca8d3442 DDP |
2091 | if (fitness) { |
2092 | /* This should not happen: it indicates that | |
2093 | * odp_flow_key_from_mask() and odp_flow_key_to_mask() | |
2094 | * disagree on the acceptable form of a mask. Log the problem | |
2095 | * as an error, with enough details to enable debugging. */ | |
2096 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
2097 | ||
2098 | if (!VLOG_DROP_ERR(&rl)) { | |
2099 | struct ds s; | |
8c301900 | 2100 | |
ca8d3442 DDP |
2101 | ds_init(&s); |
2102 | odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s, | |
2103 | true); | |
2104 | VLOG_ERR("internal error parsing flow mask %s (%s)", | |
2105 | ds_cstr(&s), odp_key_fitness_to_string(fitness)); | |
2106 | ds_destroy(&s); | |
8c301900 | 2107 | } |
ca8d3442 DDP |
2108 | |
2109 | return EINVAL; | |
8c301900 JR |
2110 | } |
2111 | ||
2112 | return 0; | |
2113 | } | |
2114 | ||
2115 | static int | |
2116 | dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len, | |
2117 | struct flow *flow) | |
36956a7d | 2118 | { |
586ddea5 BP |
2119 | odp_port_t in_port; |
2120 | ||
8d8ab6c2 | 2121 | if (odp_flow_key_to_flow(key, key_len, flow)) { |
36956a7d | 2122 | /* This should not happen: it indicates that odp_flow_key_from_flow() |
8c301900 JR |
2123 | * and odp_flow_key_to_flow() disagree on the acceptable form of a |
2124 | * flow. Log the problem as an error, with enough details to enable | |
2125 | * debugging. */ | |
36956a7d BP |
2126 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
2127 | ||
2128 | if (!VLOG_DROP_ERR(&rl)) { | |
2129 | struct ds s; | |
2130 | ||
2131 | ds_init(&s); | |
8c301900 | 2132 | odp_flow_format(key, key_len, NULL, 0, NULL, &s, true); |
36956a7d BP |
2133 | VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s)); |
2134 | ds_destroy(&s); | |
2135 | } | |
2136 | ||
2137 | return EINVAL; | |
2138 | } | |
2139 | ||
586ddea5 BP |
2140 | in_port = flow->in_port.odp_port; |
2141 | if (!is_valid_port_number(in_port) && in_port != ODPP_NONE) { | |
18886b60 BP |
2142 | return EINVAL; |
2143 | } | |
2144 | ||
5cf3edb3 | 2145 | if (flow->ct_state & DP_NETDEV_CS_UNSUPPORTED_MASK) { |
07659514 JS |
2146 | return EINVAL; |
2147 | } | |
2148 | ||
36956a7d BP |
2149 | return 0; |
2150 | } | |
2151 | ||
72865317 | 2152 | static int |
6fe09f8c | 2153 | dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get) |
72865317 BP |
2154 | { |
2155 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
1763b4b8 | 2156 | struct dp_netdev_flow *netdev_flow; |
1c1e46ed | 2157 | struct dp_netdev_pmd_thread *pmd; |
c673049c IM |
2158 | struct hmapx to_find = HMAPX_INITIALIZER(&to_find); |
2159 | struct hmapx_node *node; | |
2160 | int error = EINVAL; | |
2161 | ||
2162 | if (get->pmd_id == PMD_ID_NULL) { | |
2163 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
2164 | if (dp_netdev_pmd_try_ref(pmd) && !hmapx_add(&to_find, pmd)) { | |
2165 | dp_netdev_pmd_unref(pmd); | |
2166 | } | |
2167 | } | |
2168 | } else { | |
2169 | pmd = dp_netdev_get_pmd(dp, get->pmd_id); | |
2170 | if (!pmd) { | |
2171 | goto out; | |
2172 | } | |
2173 | hmapx_add(&to_find, pmd); | |
1c1e46ed AW |
2174 | } |
2175 | ||
c673049c IM |
2176 | if (!hmapx_count(&to_find)) { |
2177 | goto out; | |
72865317 | 2178 | } |
1c1e46ed | 2179 | |
c673049c IM |
2180 | HMAPX_FOR_EACH (node, &to_find) { |
2181 | pmd = (struct dp_netdev_pmd_thread *) node->data; | |
2182 | netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key, | |
2183 | get->key_len); | |
2184 | if (netdev_flow) { | |
2185 | dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer, | |
2186 | get->flow, false); | |
2187 | error = 0; | |
2188 | break; | |
2189 | } else { | |
2190 | error = ENOENT; | |
2191 | } | |
2192 | } | |
bc4a05c6 | 2193 | |
c673049c IM |
2194 | HMAPX_FOR_EACH (node, &to_find) { |
2195 | pmd = (struct dp_netdev_pmd_thread *) node->data; | |
2196 | dp_netdev_pmd_unref(pmd); | |
2197 | } | |
2198 | out: | |
2199 | hmapx_destroy(&to_find); | |
5279f8fd | 2200 | return error; |
72865317 BP |
2201 | } |
2202 | ||
0de8783a | 2203 | static struct dp_netdev_flow * |
1c1e46ed AW |
2204 | dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, |
2205 | struct match *match, const ovs_u128 *ufid, | |
ae2ceebd | 2206 | const struct nlattr *actions, size_t actions_len) |
1c1e46ed | 2207 | OVS_REQUIRES(pmd->flow_mutex) |
72865317 | 2208 | { |
0de8783a JR |
2209 | struct dp_netdev_flow *flow; |
2210 | struct netdev_flow_key mask; | |
3453b4d6 | 2211 | struct dpcls *cls; |
f4b835bb JR |
2212 | |
2213 | /* Make sure in_port is exact matched before we read it. */ | |
2214 | ovs_assert(match->wc.masks.in_port.odp_port == ODPP_NONE); | |
3453b4d6 | 2215 | odp_port_t in_port = match->flow.in_port.odp_port; |
ed79f89a | 2216 | |
f4b835bb JR |
2217 | /* As we select the dpcls based on the port number, each netdev flow |
2218 | * belonging to the same dpcls will have the same odp_port value. | |
2219 | * For performance reasons we wildcard odp_port here in the mask. In the | |
2220 | * typical case dp_hash is also wildcarded, and the resulting 8-byte | |
2221 | * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and | |
2222 | * will not be part of the subtable mask. | |
2223 | * This will speed up the hash computation during dpcls_lookup() because | |
2224 | * there is one less call to hash_add64() in this case. */ | |
2225 | match->wc.masks.in_port.odp_port = 0; | |
0de8783a | 2226 | netdev_flow_mask_init(&mask, match); |
f4b835bb JR |
2227 | match->wc.masks.in_port.odp_port = ODPP_NONE; |
2228 | ||
0de8783a | 2229 | /* Make sure wc does not have metadata. */ |
5fcff47b JR |
2230 | ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata) |
2231 | && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs)); | |
679ba04c | 2232 | |
0de8783a | 2233 | /* Do not allocate extra space. */ |
caeb4906 | 2234 | flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len); |
1c1e46ed | 2235 | memset(&flow->stats, 0, sizeof flow->stats); |
0de8783a | 2236 | flow->dead = false; |
11e5cf1f | 2237 | flow->batch = NULL; |
bd5131ba | 2238 | *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id; |
0de8783a | 2239 | *CONST_CAST(struct flow *, &flow->flow) = match->flow; |
70e5ed6f | 2240 | *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid; |
0de8783a | 2241 | ovs_refcount_init(&flow->ref_cnt); |
0de8783a | 2242 | ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len)); |
2c0ea78f | 2243 | |
0de8783a | 2244 | netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask); |
3453b4d6 | 2245 | |
f4b835bb | 2246 | /* Select dpcls for in_port. Relies on in_port to be exact match. */ |
3453b4d6 JS |
2247 | cls = dp_netdev_pmd_find_dpcls(pmd, in_port); |
2248 | dpcls_insert(cls, &flow->cr, &mask); | |
72865317 | 2249 | |
4c75aaab EJ |
2250 | cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node), |
2251 | dp_netdev_flow_hash(&flow->ufid)); | |
2252 | ||
623540e4 EJ |
2253 | if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) { |
2254 | struct ds ds = DS_EMPTY_INITIALIZER; | |
9044f2c1 JG |
2255 | struct ofpbuf key_buf, mask_buf; |
2256 | struct odp_flow_key_parms odp_parms = { | |
2257 | .flow = &match->flow, | |
2258 | .mask = &match->wc.masks, | |
2259 | .support = dp_netdev_support, | |
2260 | }; | |
2261 | ||
2262 | ofpbuf_init(&key_buf, 0); | |
2263 | ofpbuf_init(&mask_buf, 0); | |
623540e4 | 2264 | |
9044f2c1 JG |
2265 | odp_flow_key_from_flow(&odp_parms, &key_buf); |
2266 | odp_parms.key_buf = &key_buf; | |
2267 | odp_flow_key_from_mask(&odp_parms, &mask_buf); | |
0de8783a | 2268 | |
623540e4 | 2269 | ds_put_cstr(&ds, "flow_add: "); |
70e5ed6f JS |
2270 | odp_format_ufid(ufid, &ds); |
2271 | ds_put_cstr(&ds, " "); | |
9044f2c1 JG |
2272 | odp_flow_format(key_buf.data, key_buf.size, |
2273 | mask_buf.data, mask_buf.size, | |
2274 | NULL, &ds, false); | |
623540e4 EJ |
2275 | ds_put_cstr(&ds, ", actions:"); |
2276 | format_odp_actions(&ds, actions, actions_len); | |
2277 | ||
2278 | VLOG_DBG_RL(&upcall_rl, "%s", ds_cstr(&ds)); | |
2279 | ||
9044f2c1 JG |
2280 | ofpbuf_uninit(&key_buf); |
2281 | ofpbuf_uninit(&mask_buf); | |
623540e4 EJ |
2282 | ds_destroy(&ds); |
2283 | } | |
2284 | ||
0de8783a | 2285 | return flow; |
72865317 BP |
2286 | } |
2287 | ||
72865317 | 2288 | static int |
f5d317a1 DDP |
2289 | flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd, |
2290 | struct netdev_flow_key *key, | |
2291 | struct match *match, | |
2292 | ovs_u128 *ufid, | |
2293 | const struct dpif_flow_put *put, | |
2294 | struct dpif_flow_stats *stats) | |
72865317 | 2295 | { |
1763b4b8 | 2296 | struct dp_netdev_flow *netdev_flow; |
f5d317a1 | 2297 | int error = 0; |
72865317 | 2298 | |
f5d317a1 DDP |
2299 | if (stats) { |
2300 | memset(stats, 0, sizeof *stats); | |
70e5ed6f JS |
2301 | } |
2302 | ||
1c1e46ed | 2303 | ovs_mutex_lock(&pmd->flow_mutex); |
f5d317a1 | 2304 | netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL); |
1763b4b8 | 2305 | if (!netdev_flow) { |
89625d1e | 2306 | if (put->flags & DPIF_FP_CREATE) { |
1c1e46ed | 2307 | if (cmap_count(&pmd->flow_table) < MAX_FLOWS) { |
f5d317a1 | 2308 | dp_netdev_flow_add(pmd, match, ufid, put->actions, |
70e5ed6f | 2309 | put->actions_len); |
0de8783a | 2310 | error = 0; |
72865317 | 2311 | } else { |
5279f8fd | 2312 | error = EFBIG; |
72865317 BP |
2313 | } |
2314 | } else { | |
5279f8fd | 2315 | error = ENOENT; |
72865317 BP |
2316 | } |
2317 | } else { | |
2c0ea78f | 2318 | if (put->flags & DPIF_FP_MODIFY |
f5d317a1 | 2319 | && flow_equal(&match->flow, &netdev_flow->flow)) { |
8a4e3a85 BP |
2320 | struct dp_netdev_actions *new_actions; |
2321 | struct dp_netdev_actions *old_actions; | |
2322 | ||
2323 | new_actions = dp_netdev_actions_create(put->actions, | |
2324 | put->actions_len); | |
2325 | ||
61e7deb1 BP |
2326 | old_actions = dp_netdev_flow_get_actions(netdev_flow); |
2327 | ovsrcu_set(&netdev_flow->actions, new_actions); | |
679ba04c | 2328 | |
f5d317a1 DDP |
2329 | if (stats) { |
2330 | get_dpif_flow_stats(netdev_flow, stats); | |
a84cb64a BP |
2331 | } |
2332 | if (put->flags & DPIF_FP_ZERO_STATS) { | |
97447f55 DDP |
2333 | /* XXX: The userspace datapath uses thread local statistics |
2334 | * (for flows), which should be updated only by the owning | |
2335 | * thread. Since we cannot write on stats memory here, | |
2336 | * we choose not to support this flag. Please note: | |
2337 | * - This feature is currently used only by dpctl commands with | |
2338 | * option --clear. | |
2339 | * - Should the need arise, this operation can be implemented | |
2340 | * by keeping a base value (to be update here) for each | |
2341 | * counter, and subtracting it before outputting the stats */ | |
2342 | error = EOPNOTSUPP; | |
72865317 | 2343 | } |
8a4e3a85 | 2344 | |
61e7deb1 | 2345 | ovsrcu_postpone(dp_netdev_actions_free, old_actions); |
2c0ea78f | 2346 | } else if (put->flags & DPIF_FP_CREATE) { |
5279f8fd | 2347 | error = EEXIST; |
2c0ea78f GS |
2348 | } else { |
2349 | /* Overlapping flow. */ | |
2350 | error = EINVAL; | |
72865317 BP |
2351 | } |
2352 | } | |
1c1e46ed | 2353 | ovs_mutex_unlock(&pmd->flow_mutex); |
5279f8fd | 2354 | return error; |
72865317 BP |
2355 | } |
2356 | ||
72865317 | 2357 | static int |
f5d317a1 | 2358 | dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) |
72865317 BP |
2359 | { |
2360 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
f5d317a1 | 2361 | struct netdev_flow_key key; |
1c1e46ed | 2362 | struct dp_netdev_pmd_thread *pmd; |
f5d317a1 DDP |
2363 | struct match match; |
2364 | ovs_u128 ufid; | |
2365 | int error; | |
72865317 | 2366 | |
f5d317a1 DDP |
2367 | if (put->stats) { |
2368 | memset(put->stats, 0, sizeof *put->stats); | |
2369 | } | |
2370 | error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow); | |
2371 | if (error) { | |
2372 | return error; | |
2373 | } | |
2374 | error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len, | |
2375 | put->mask, put->mask_len, | |
2376 | &match.flow, &match.wc); | |
2377 | if (error) { | |
2378 | return error; | |
1c1e46ed AW |
2379 | } |
2380 | ||
f5d317a1 DDP |
2381 | if (put->ufid) { |
2382 | ufid = *put->ufid; | |
2383 | } else { | |
2384 | dpif_flow_hash(dpif, &match.flow, sizeof match.flow, &ufid); | |
2385 | } | |
2386 | ||
2387 | /* Must produce a netdev_flow_key for lookup. | |
2388 | * This interface is no longer performance critical, since it is not used | |
2389 | * for upcall processing any more. */ | |
2390 | netdev_flow_key_from_flow(&key, &match.flow); | |
2391 | ||
2392 | if (put->pmd_id == PMD_ID_NULL) { | |
2393 | if (cmap_count(&dp->poll_threads) == 0) { | |
2394 | return EINVAL; | |
2395 | } | |
2396 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
2397 | struct dpif_flow_stats pmd_stats; | |
2398 | int pmd_error; | |
2399 | ||
2400 | pmd_error = flow_put_on_pmd(pmd, &key, &match, &ufid, put, | |
2401 | &pmd_stats); | |
2402 | if (pmd_error) { | |
2403 | error = pmd_error; | |
2404 | } else if (put->stats) { | |
2405 | put->stats->n_packets += pmd_stats.n_packets; | |
2406 | put->stats->n_bytes += pmd_stats.n_bytes; | |
2407 | put->stats->used = MAX(put->stats->used, pmd_stats.used); | |
2408 | put->stats->tcp_flags |= pmd_stats.tcp_flags; | |
2409 | } | |
2410 | } | |
2411 | } else { | |
2412 | pmd = dp_netdev_get_pmd(dp, put->pmd_id); | |
2413 | if (!pmd) { | |
2414 | return EINVAL; | |
2415 | } | |
2416 | error = flow_put_on_pmd(pmd, &key, &match, &ufid, put, put->stats); | |
2417 | dp_netdev_pmd_unref(pmd); | |
2418 | } | |
2419 | ||
2420 | return error; | |
2421 | } | |
2422 | ||
2423 | static int | |
2424 | flow_del_on_pmd(struct dp_netdev_pmd_thread *pmd, | |
2425 | struct dpif_flow_stats *stats, | |
2426 | const struct dpif_flow_del *del) | |
2427 | { | |
2428 | struct dp_netdev_flow *netdev_flow; | |
2429 | int error = 0; | |
2430 | ||
1c1e46ed AW |
2431 | ovs_mutex_lock(&pmd->flow_mutex); |
2432 | netdev_flow = dp_netdev_pmd_find_flow(pmd, del->ufid, del->key, | |
2433 | del->key_len); | |
1763b4b8 | 2434 | if (netdev_flow) { |
f5d317a1 DDP |
2435 | if (stats) { |
2436 | get_dpif_flow_stats(netdev_flow, stats); | |
feebdea2 | 2437 | } |
1c1e46ed | 2438 | dp_netdev_pmd_remove_flow(pmd, netdev_flow); |
72865317 | 2439 | } else { |
5279f8fd | 2440 | error = ENOENT; |
72865317 | 2441 | } |
1c1e46ed | 2442 | ovs_mutex_unlock(&pmd->flow_mutex); |
f5d317a1 DDP |
2443 | |
2444 | return error; | |
2445 | } | |
2446 | ||
2447 | static int | |
2448 | dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) | |
2449 | { | |
2450 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
2451 | struct dp_netdev_pmd_thread *pmd; | |
2452 | int error = 0; | |
2453 | ||
2454 | if (del->stats) { | |
2455 | memset(del->stats, 0, sizeof *del->stats); | |
2456 | } | |
2457 | ||
2458 | if (del->pmd_id == PMD_ID_NULL) { | |
2459 | if (cmap_count(&dp->poll_threads) == 0) { | |
2460 | return EINVAL; | |
2461 | } | |
2462 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
2463 | struct dpif_flow_stats pmd_stats; | |
2464 | int pmd_error; | |
2465 | ||
2466 | pmd_error = flow_del_on_pmd(pmd, &pmd_stats, del); | |
2467 | if (pmd_error) { | |
2468 | error = pmd_error; | |
2469 | } else if (del->stats) { | |
2470 | del->stats->n_packets += pmd_stats.n_packets; | |
2471 | del->stats->n_bytes += pmd_stats.n_bytes; | |
2472 | del->stats->used = MAX(del->stats->used, pmd_stats.used); | |
2473 | del->stats->tcp_flags |= pmd_stats.tcp_flags; | |
2474 | } | |
2475 | } | |
2476 | } else { | |
2477 | pmd = dp_netdev_get_pmd(dp, del->pmd_id); | |
2478 | if (!pmd) { | |
2479 | return EINVAL; | |
2480 | } | |
2481 | error = flow_del_on_pmd(pmd, del->stats, del); | |
2482 | dp_netdev_pmd_unref(pmd); | |
2483 | } | |
2484 | ||
5279f8fd BP |
2485 | |
2486 | return error; | |
72865317 BP |
2487 | } |
2488 | ||
ac64794a BP |
2489 | struct dpif_netdev_flow_dump { |
2490 | struct dpif_flow_dump up; | |
1c1e46ed AW |
2491 | struct cmap_position poll_thread_pos; |
2492 | struct cmap_position flow_pos; | |
2493 | struct dp_netdev_pmd_thread *cur_pmd; | |
d2ad7ef1 JS |
2494 | int status; |
2495 | struct ovs_mutex mutex; | |
e723fd32 JS |
2496 | }; |
2497 | ||
ac64794a BP |
2498 | static struct dpif_netdev_flow_dump * |
2499 | dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump) | |
72865317 | 2500 | { |
ac64794a | 2501 | return CONTAINER_OF(dump, struct dpif_netdev_flow_dump, up); |
e723fd32 JS |
2502 | } |
2503 | ||
ac64794a | 2504 | static struct dpif_flow_dump * |
64bb477f | 2505 | dpif_netdev_flow_dump_create(const struct dpif *dpif_, bool terse) |
e723fd32 | 2506 | { |
ac64794a | 2507 | struct dpif_netdev_flow_dump *dump; |
e723fd32 | 2508 | |
1c1e46ed | 2509 | dump = xzalloc(sizeof *dump); |
ac64794a | 2510 | dpif_flow_dump_init(&dump->up, dpif_); |
64bb477f | 2511 | dump->up.terse = terse; |
ac64794a BP |
2512 | ovs_mutex_init(&dump->mutex); |
2513 | ||
2514 | return &dump->up; | |
e723fd32 JS |
2515 | } |
2516 | ||
2517 | static int | |
ac64794a | 2518 | dpif_netdev_flow_dump_destroy(struct dpif_flow_dump *dump_) |
e723fd32 | 2519 | { |
ac64794a | 2520 | struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_); |
e723fd32 | 2521 | |
ac64794a BP |
2522 | ovs_mutex_destroy(&dump->mutex); |
2523 | free(dump); | |
704a1e09 BP |
2524 | return 0; |
2525 | } | |
2526 | ||
ac64794a BP |
2527 | struct dpif_netdev_flow_dump_thread { |
2528 | struct dpif_flow_dump_thread up; | |
2529 | struct dpif_netdev_flow_dump *dump; | |
8bb113da RW |
2530 | struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH]; |
2531 | struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH]; | |
ac64794a BP |
2532 | }; |
2533 | ||
2534 | static struct dpif_netdev_flow_dump_thread * | |
2535 | dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread) | |
2536 | { | |
2537 | return CONTAINER_OF(thread, struct dpif_netdev_flow_dump_thread, up); | |
2538 | } | |
2539 | ||
2540 | static struct dpif_flow_dump_thread * | |
2541 | dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump *dump_) | |
2542 | { | |
2543 | struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_); | |
2544 | struct dpif_netdev_flow_dump_thread *thread; | |
2545 | ||
2546 | thread = xmalloc(sizeof *thread); | |
2547 | dpif_flow_dump_thread_init(&thread->up, &dump->up); | |
2548 | thread->dump = dump; | |
2549 | return &thread->up; | |
2550 | } | |
2551 | ||
2552 | static void | |
2553 | dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_) | |
2554 | { | |
2555 | struct dpif_netdev_flow_dump_thread *thread | |
2556 | = dpif_netdev_flow_dump_thread_cast(thread_); | |
2557 | ||
2558 | free(thread); | |
2559 | } | |
2560 | ||
704a1e09 | 2561 | static int |
ac64794a | 2562 | dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_, |
8bb113da | 2563 | struct dpif_flow *flows, int max_flows) |
ac64794a BP |
2564 | { |
2565 | struct dpif_netdev_flow_dump_thread *thread | |
2566 | = dpif_netdev_flow_dump_thread_cast(thread_); | |
2567 | struct dpif_netdev_flow_dump *dump = thread->dump; | |
8bb113da | 2568 | struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH]; |
8bb113da RW |
2569 | int n_flows = 0; |
2570 | int i; | |
14608a15 | 2571 | |
ac64794a | 2572 | ovs_mutex_lock(&dump->mutex); |
8bb113da | 2573 | if (!dump->status) { |
1c1e46ed AW |
2574 | struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif); |
2575 | struct dp_netdev *dp = get_dp_netdev(&dpif->dpif); | |
2576 | struct dp_netdev_pmd_thread *pmd = dump->cur_pmd; | |
2577 | int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH); | |
2578 | ||
2579 | /* First call to dump_next(), extracts the first pmd thread. | |
2580 | * If there is no pmd thread, returns immediately. */ | |
2581 | if (!pmd) { | |
2582 | pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); | |
2583 | if (!pmd) { | |
2584 | ovs_mutex_unlock(&dump->mutex); | |
2585 | return n_flows; | |
8bb113da | 2586 | |
8bb113da | 2587 | } |
d2ad7ef1 | 2588 | } |
1c1e46ed AW |
2589 | |
2590 | do { | |
2591 | for (n_flows = 0; n_flows < flow_limit; n_flows++) { | |
2592 | struct cmap_node *node; | |
2593 | ||
2594 | node = cmap_next_position(&pmd->flow_table, &dump->flow_pos); | |
2595 | if (!node) { | |
2596 | break; | |
2597 | } | |
2598 | netdev_flows[n_flows] = CONTAINER_OF(node, | |
2599 | struct dp_netdev_flow, | |
2600 | node); | |
2601 | } | |
2602 | /* When finishing dumping the current pmd thread, moves to | |
2603 | * the next. */ | |
2604 | if (n_flows < flow_limit) { | |
2605 | memset(&dump->flow_pos, 0, sizeof dump->flow_pos); | |
2606 | dp_netdev_pmd_unref(pmd); | |
2607 | pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); | |
2608 | if (!pmd) { | |
2609 | dump->status = EOF; | |
2610 | break; | |
2611 | } | |
2612 | } | |
2613 | /* Keeps the reference to next caller. */ | |
2614 | dump->cur_pmd = pmd; | |
2615 | ||
2616 | /* If the current dump is empty, do not exit the loop, since the | |
2617 | * remaining pmds could have flows to be dumped. Just dumps again | |
2618 | * on the new 'pmd'. */ | |
2619 | } while (!n_flows); | |
8a4e3a85 | 2620 | } |
ac64794a | 2621 | ovs_mutex_unlock(&dump->mutex); |
ac64794a | 2622 | |
8bb113da RW |
2623 | for (i = 0; i < n_flows; i++) { |
2624 | struct odputil_keybuf *maskbuf = &thread->maskbuf[i]; | |
2625 | struct odputil_keybuf *keybuf = &thread->keybuf[i]; | |
2626 | struct dp_netdev_flow *netdev_flow = netdev_flows[i]; | |
2627 | struct dpif_flow *f = &flows[i]; | |
7af12bd7 | 2628 | struct ofpbuf key, mask; |
8bb113da | 2629 | |
7af12bd7 JS |
2630 | ofpbuf_use_stack(&key, keybuf, sizeof *keybuf); |
2631 | ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf); | |
64bb477f JS |
2632 | dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f, |
2633 | dump->up.terse); | |
8bb113da | 2634 | } |
feebdea2 | 2635 | |
8bb113da | 2636 | return n_flows; |
72865317 BP |
2637 | } |
2638 | ||
2639 | static int | |
758c456d | 2640 | dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) |
65f13b50 | 2641 | OVS_NO_THREAD_SAFETY_ANALYSIS |
72865317 BP |
2642 | { |
2643 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
65f13b50 | 2644 | struct dp_netdev_pmd_thread *pmd; |
1895cc8d | 2645 | struct dp_packet_batch pp; |
72865317 | 2646 | |
cf62fa4c PS |
2647 | if (dp_packet_size(execute->packet) < ETH_HEADER_LEN || |
2648 | dp_packet_size(execute->packet) > UINT16_MAX) { | |
72865317 BP |
2649 | return EINVAL; |
2650 | } | |
2651 | ||
65f13b50 AW |
2652 | /* Tries finding the 'pmd'. If NULL is returned, that means |
2653 | * the current thread is a non-pmd thread and should use | |
b19befae | 2654 | * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */ |
65f13b50 AW |
2655 | pmd = ovsthread_getspecific(dp->per_pmd_key); |
2656 | if (!pmd) { | |
b19befae | 2657 | pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); |
546e57d4 DDP |
2658 | if (!pmd) { |
2659 | return EBUSY; | |
2660 | } | |
65f13b50 AW |
2661 | } |
2662 | ||
05267613 AZ |
2663 | if (execute->probe) { |
2664 | /* If this is part of a probe, Drop the packet, since executing | |
2665 | * the action may actually cause spurious packets be sent into | |
2666 | * the network. */ | |
2667 | return 0; | |
2668 | } | |
2669 | ||
65f13b50 AW |
2670 | /* If the current thread is non-pmd thread, acquires |
2671 | * the 'non_pmd_mutex'. */ | |
2672 | if (pmd->core_id == NON_PMD_CORE_ID) { | |
2673 | ovs_mutex_lock(&dp->non_pmd_mutex); | |
2674 | } | |
1c1e46ed | 2675 | |
36d8de17 DDP |
2676 | /* The action processing expects the RSS hash to be valid, because |
2677 | * it's always initialized at the beginning of datapath processing. | |
2678 | * In this case, though, 'execute->packet' may not have gone through | |
2679 | * the datapath at all, it may have been generated by the upper layer | |
2680 | * (OpenFlow packet-out, BFD frame, ...). */ | |
2681 | if (!dp_packet_rss_valid(execute->packet)) { | |
2682 | dp_packet_set_rss_hash(execute->packet, | |
2683 | flow_hash_5tuple(execute->flow, 0)); | |
2684 | } | |
2685 | ||
72c84bc2 | 2686 | dp_packet_batch_init_packet(&pp, execute->packet); |
66e4ad8a DDP |
2687 | dp_netdev_execute_actions(pmd, &pp, false, execute->flow, |
2688 | execute->actions, execute->actions_len, | |
2689 | time_msec()); | |
36d8de17 | 2690 | |
65f13b50 AW |
2691 | if (pmd->core_id == NON_PMD_CORE_ID) { |
2692 | ovs_mutex_unlock(&dp->non_pmd_mutex); | |
e9985d6a | 2693 | dp_netdev_pmd_unref(pmd); |
65f13b50 | 2694 | } |
8a4e3a85 | 2695 | |
758c456d | 2696 | return 0; |
72865317 BP |
2697 | } |
2698 | ||
1a0c894a BP |
2699 | static void |
2700 | dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops) | |
2701 | { | |
2702 | size_t i; | |
2703 | ||
2704 | for (i = 0; i < n_ops; i++) { | |
2705 | struct dpif_op *op = ops[i]; | |
2706 | ||
2707 | switch (op->type) { | |
2708 | case DPIF_OP_FLOW_PUT: | |
2709 | op->error = dpif_netdev_flow_put(dpif, &op->u.flow_put); | |
2710 | break; | |
2711 | ||
2712 | case DPIF_OP_FLOW_DEL: | |
2713 | op->error = dpif_netdev_flow_del(dpif, &op->u.flow_del); | |
2714 | break; | |
2715 | ||
2716 | case DPIF_OP_EXECUTE: | |
2717 | op->error = dpif_netdev_execute(dpif, &op->u.execute); | |
2718 | break; | |
6fe09f8c JS |
2719 | |
2720 | case DPIF_OP_FLOW_GET: | |
2721 | op->error = dpif_netdev_flow_get(dpif, &op->u.flow_get); | |
2722 | break; | |
1a0c894a BP |
2723 | } |
2724 | } | |
2725 | } | |
2726 | ||
d4f6865c DDP |
2727 | /* Applies datapath configuration from the database. Some of the changes are |
2728 | * actually applied in dpif_netdev_run(). */ | |
f2eee189 | 2729 | static int |
d4f6865c | 2730 | dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) |
f2eee189 AW |
2731 | { |
2732 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
d4f6865c | 2733 | const char *cmask = smap_get(other_config, "pmd-cpu-mask"); |
f2eee189 | 2734 | |
a6a426d6 IM |
2735 | if (!nullable_string_is_equal(dp->pmd_cmask, cmask)) { |
2736 | free(dp->pmd_cmask); | |
2737 | dp->pmd_cmask = nullable_xstrdup(cmask); | |
2738 | dp_netdev_request_reconfigure(dp); | |
f2eee189 AW |
2739 | } |
2740 | ||
2741 | return 0; | |
2742 | } | |
2743 | ||
3eb67853 IM |
2744 | /* Parses affinity list and returns result in 'core_ids'. */ |
2745 | static int | |
2746 | parse_affinity_list(const char *affinity_list, unsigned *core_ids, int n_rxq) | |
2747 | { | |
2748 | unsigned i; | |
2749 | char *list, *copy, *key, *value; | |
2750 | int error = 0; | |
2751 | ||
2752 | for (i = 0; i < n_rxq; i++) { | |
51c37a56 | 2753 | core_ids[i] = OVS_CORE_UNSPEC; |
3eb67853 IM |
2754 | } |
2755 | ||
2756 | if (!affinity_list) { | |
2757 | return 0; | |
2758 | } | |
2759 | ||
2760 | list = copy = xstrdup(affinity_list); | |
2761 | ||
2762 | while (ofputil_parse_key_value(&list, &key, &value)) { | |
2763 | int rxq_id, core_id; | |
2764 | ||
2765 | if (!str_to_int(key, 0, &rxq_id) || rxq_id < 0 | |
2766 | || !str_to_int(value, 0, &core_id) || core_id < 0) { | |
2767 | error = EINVAL; | |
2768 | break; | |
2769 | } | |
2770 | ||
2771 | if (rxq_id < n_rxq) { | |
2772 | core_ids[rxq_id] = core_id; | |
2773 | } | |
2774 | } | |
2775 | ||
2776 | free(copy); | |
2777 | return error; | |
2778 | } | |
2779 | ||
2780 | /* Parses 'affinity_list' and applies configuration if it is valid. */ | |
2781 | static int | |
2782 | dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port *port, | |
2783 | const char *affinity_list) | |
2784 | { | |
2785 | unsigned *core_ids, i; | |
2786 | int error = 0; | |
2787 | ||
2788 | core_ids = xmalloc(port->n_rxq * sizeof *core_ids); | |
2789 | if (parse_affinity_list(affinity_list, core_ids, port->n_rxq)) { | |
2790 | error = EINVAL; | |
2791 | goto exit; | |
2792 | } | |
2793 | ||
2794 | for (i = 0; i < port->n_rxq; i++) { | |
2795 | port->rxqs[i].core_id = core_ids[i]; | |
2796 | } | |
2797 | ||
2798 | exit: | |
2799 | free(core_ids); | |
2800 | return error; | |
2801 | } | |
2802 | ||
2803 | /* Changes the affinity of port's rx queues. The changes are actually applied | |
2804 | * in dpif_netdev_run(). */ | |
2805 | static int | |
2806 | dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no, | |
2807 | const struct smap *cfg) | |
2808 | { | |
2809 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
2810 | struct dp_netdev_port *port; | |
2811 | int error = 0; | |
2812 | const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity"); | |
2813 | ||
2814 | ovs_mutex_lock(&dp->port_mutex); | |
2815 | error = get_port_by_number(dp, port_no, &port); | |
2816 | if (error || !netdev_is_pmd(port->netdev) | |
2817 | || nullable_string_is_equal(affinity_list, port->rxq_affinity_list)) { | |
2818 | goto unlock; | |
2819 | } | |
2820 | ||
2821 | error = dpif_netdev_port_set_rxq_affinity(port, affinity_list); | |
2822 | if (error) { | |
2823 | goto unlock; | |
2824 | } | |
2825 | free(port->rxq_affinity_list); | |
2826 | port->rxq_affinity_list = nullable_xstrdup(affinity_list); | |
2827 | ||
2828 | dp_netdev_request_reconfigure(dp); | |
2829 | unlock: | |
2830 | ovs_mutex_unlock(&dp->port_mutex); | |
2831 | return error; | |
2832 | } | |
2833 | ||
5bf93d67 EJ |
2834 | static int |
2835 | dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED, | |
2836 | uint32_t queue_id, uint32_t *priority) | |
2837 | { | |
2838 | *priority = queue_id; | |
2839 | return 0; | |
2840 | } | |
2841 | ||
72865317 | 2842 | \f |
9ff55ae2 DDP |
2843 | /* Creates and returns a new 'struct dp_netdev_actions', whose actions are |
2844 | * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */ | |
a84cb64a BP |
2845 | struct dp_netdev_actions * |
2846 | dp_netdev_actions_create(const struct nlattr *actions, size_t size) | |
2847 | { | |
2848 | struct dp_netdev_actions *netdev_actions; | |
2849 | ||
9ff55ae2 DDP |
2850 | netdev_actions = xmalloc(sizeof *netdev_actions + size); |
2851 | memcpy(netdev_actions->actions, actions, size); | |
a84cb64a BP |
2852 | netdev_actions->size = size; |
2853 | ||
2854 | return netdev_actions; | |
2855 | } | |
2856 | ||
a84cb64a | 2857 | struct dp_netdev_actions * |
61e7deb1 | 2858 | dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow) |
a84cb64a | 2859 | { |
61e7deb1 | 2860 | return ovsrcu_get(struct dp_netdev_actions *, &flow->actions); |
a84cb64a BP |
2861 | } |
2862 | ||
61e7deb1 BP |
2863 | static void |
2864 | dp_netdev_actions_free(struct dp_netdev_actions *actions) | |
a84cb64a | 2865 | { |
61e7deb1 | 2866 | free(actions); |
a84cb64a BP |
2867 | } |
2868 | \f | |
55e3ca97 DDP |
2869 | static inline unsigned long long |
2870 | cycles_counter(void) | |
2871 | { | |
2872 | #ifdef DPDK_NETDEV | |
2873 | return rte_get_tsc_cycles(); | |
2874 | #else | |
2875 | return 0; | |
2876 | #endif | |
2877 | } | |
2878 | ||
2879 | /* Fake mutex to make sure that the calls to cycles_count_* are balanced */ | |
2880 | extern struct ovs_mutex cycles_counter_fake_mutex; | |
2881 | ||
2882 | /* Start counting cycles. Must be followed by 'cycles_count_end()' */ | |
2883 | static inline void | |
2884 | cycles_count_start(struct dp_netdev_pmd_thread *pmd) | |
2885 | OVS_ACQUIRES(&cycles_counter_fake_mutex) | |
2886 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
2887 | { | |
2888 | pmd->last_cycles = cycles_counter(); | |
2889 | } | |
2890 | ||
2891 | /* Stop counting cycles and add them to the counter 'type' */ | |
2892 | static inline void | |
2893 | cycles_count_end(struct dp_netdev_pmd_thread *pmd, | |
2894 | enum pmd_cycles_counter_type type) | |
2895 | OVS_RELEASES(&cycles_counter_fake_mutex) | |
2896 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
2897 | { | |
2898 | unsigned long long interval = cycles_counter() - pmd->last_cycles; | |
2899 | ||
2900 | non_atomic_ullong_add(&pmd->cycles.n[type], interval); | |
2901 | } | |
e4cfed38 | 2902 | |
5794e276 | 2903 | static void |
65f13b50 | 2904 | dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, |
947dc567 DDP |
2905 | struct netdev_rxq *rx, |
2906 | odp_port_t port_no) | |
e4cfed38 | 2907 | { |
1895cc8d PS |
2908 | struct dp_packet_batch batch; |
2909 | int error; | |
e4cfed38 | 2910 | |
1895cc8d | 2911 | dp_packet_batch_init(&batch); |
55e3ca97 | 2912 | cycles_count_start(pmd); |
947dc567 | 2913 | error = netdev_rxq_recv(rx, &batch); |
55e3ca97 | 2914 | cycles_count_end(pmd, PMD_CYCLES_POLLING); |
e4cfed38 | 2915 | if (!error) { |
3c33f0ff | 2916 | *recirc_depth_get() = 0; |
41ccaa24 | 2917 | |
55e3ca97 | 2918 | cycles_count_start(pmd); |
947dc567 | 2919 | dp_netdev_input(pmd, &batch, port_no); |
55e3ca97 | 2920 | cycles_count_end(pmd, PMD_CYCLES_PROCESSING); |
e4cfed38 | 2921 | } else if (error != EAGAIN && error != EOPNOTSUPP) { |
3c33f0ff | 2922 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); |
e4cfed38 PS |
2923 | |
2924 | VLOG_ERR_RL(&rl, "error receiving data from %s: %s", | |
947dc567 | 2925 | netdev_rxq_get_name(rx), ovs_strerror(error)); |
e4cfed38 PS |
2926 | } |
2927 | } | |
2928 | ||
e32971b8 DDP |
2929 | static struct tx_port * |
2930 | tx_port_lookup(const struct hmap *hmap, odp_port_t port_no) | |
2931 | { | |
2932 | struct tx_port *tx; | |
2933 | ||
2934 | HMAP_FOR_EACH_IN_BUCKET (tx, node, hash_port_no(port_no), hmap) { | |
2935 | if (tx->port->port_no == port_no) { | |
2936 | return tx; | |
2937 | } | |
2938 | } | |
2939 | ||
2940 | return NULL; | |
2941 | } | |
2942 | ||
dc36593c DDP |
2943 | static int |
2944 | port_reconfigure(struct dp_netdev_port *port) | |
2945 | { | |
2946 | struct netdev *netdev = port->netdev; | |
dc36593c DDP |
2947 | int i, err; |
2948 | ||
e32971b8 | 2949 | port->need_reconfigure = false; |
dc36593c DDP |
2950 | |
2951 | /* Closes the existing 'rxq's. */ | |
2952 | for (i = 0; i < port->n_rxq; i++) { | |
947dc567 DDP |
2953 | netdev_rxq_close(port->rxqs[i].rx); |
2954 | port->rxqs[i].rx = NULL; | |
dc36593c DDP |
2955 | } |
2956 | port->n_rxq = 0; | |
2957 | ||
050c60bf | 2958 | /* Allows 'netdev' to apply the pending configuration changes. */ |
e32971b8 DDP |
2959 | if (netdev_is_reconf_required(netdev)) { |
2960 | err = netdev_reconfigure(netdev); | |
2961 | if (err && (err != EOPNOTSUPP)) { | |
2962 | VLOG_ERR("Failed to set interface %s new configuration", | |
2963 | netdev_get_name(netdev)); | |
2964 | return err; | |
2965 | } | |
dc36593c | 2966 | } |
050c60bf | 2967 | /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */ |
3eb67853 IM |
2968 | port->rxqs = xrealloc(port->rxqs, |
2969 | sizeof *port->rxqs * netdev_n_rxq(netdev)); | |
324c8374 IM |
2970 | /* Realloc 'used' counters for tx queues. */ |
2971 | free(port->txq_used); | |
2972 | port->txq_used = xcalloc(netdev_n_txq(netdev), sizeof *port->txq_used); | |
2973 | ||
dc36593c | 2974 | for (i = 0; i < netdev_n_rxq(netdev); i++) { |
947dc567 DDP |
2975 | port->rxqs[i].port = port; |
2976 | err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i); | |
dc36593c DDP |
2977 | if (err) { |
2978 | return err; | |
2979 | } | |
2980 | port->n_rxq++; | |
2981 | } | |
2982 | ||
3eb67853 IM |
2983 | /* Parse affinity list to apply configuration for new queues. */ |
2984 | dpif_netdev_port_set_rxq_affinity(port, port->rxq_affinity_list); | |
2985 | ||
dc36593c DDP |
2986 | return 0; |
2987 | } | |
2988 | ||
e32971b8 DDP |
2989 | struct rr_numa_list { |
2990 | struct hmap numas; /* Contains 'struct rr_numa' */ | |
2991 | }; | |
2992 | ||
2993 | struct rr_numa { | |
2994 | struct hmap_node node; | |
2995 | ||
2996 | int numa_id; | |
2997 | ||
2998 | /* Non isolated pmds on numa node 'numa_id' */ | |
2999 | struct dp_netdev_pmd_thread **pmds; | |
3000 | int n_pmds; | |
3001 | ||
3002 | int cur_index; | |
3003 | }; | |
3004 | ||
3005 | static struct rr_numa * | |
3006 | rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id) | |
3007 | { | |
3008 | struct rr_numa *numa; | |
3009 | ||
3010 | HMAP_FOR_EACH_WITH_HASH (numa, node, hash_int(numa_id, 0), &rr->numas) { | |
3011 | if (numa->numa_id == numa_id) { | |
3012 | return numa; | |
3013 | } | |
3014 | } | |
3015 | ||
3016 | return NULL; | |
3017 | } | |
3018 | ||
3019 | static void | |
3020 | rr_numa_list_populate(struct dp_netdev *dp, struct rr_numa_list *rr) | |
3021 | { | |
3022 | struct dp_netdev_pmd_thread *pmd; | |
3023 | struct rr_numa *numa; | |
3024 | ||
3025 | hmap_init(&rr->numas); | |
3026 | ||
3027 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3028 | if (pmd->core_id == NON_PMD_CORE_ID || pmd->isolated) { | |
3029 | continue; | |
3030 | } | |
3031 | ||
3032 | numa = rr_numa_list_lookup(rr, pmd->numa_id); | |
3033 | if (!numa) { | |
3034 | numa = xzalloc(sizeof *numa); | |
3035 | numa->numa_id = pmd->numa_id; | |
3036 | hmap_insert(&rr->numas, &numa->node, hash_int(pmd->numa_id, 0)); | |
3037 | } | |
3038 | numa->n_pmds++; | |
3039 | numa->pmds = xrealloc(numa->pmds, numa->n_pmds * sizeof *numa->pmds); | |
3040 | numa->pmds[numa->n_pmds - 1] = pmd; | |
3041 | } | |
3042 | } | |
3043 | ||
3044 | static struct dp_netdev_pmd_thread * | |
3045 | rr_numa_get_pmd(struct rr_numa *numa) | |
3046 | { | |
3047 | return numa->pmds[numa->cur_index++ % numa->n_pmds]; | |
3048 | } | |
3049 | ||
3050 | static void | |
3051 | rr_numa_list_destroy(struct rr_numa_list *rr) | |
3052 | { | |
3053 | struct rr_numa *numa; | |
3054 | ||
3055 | HMAP_FOR_EACH_POP (numa, node, &rr->numas) { | |
3056 | free(numa->pmds); | |
3057 | free(numa); | |
3058 | } | |
3059 | hmap_destroy(&rr->numas); | |
3060 | } | |
3061 | ||
3062 | /* Assign pmds to queues. If 'pinned' is true, assign pmds to pinned | |
3063 | * queues and marks the pmds as isolated. Otherwise, assign non isolated | |
3064 | * pmds to unpinned queues. | |
3065 | * | |
3066 | * The function doesn't touch the pmd threads, it just stores the assignment | |
3067 | * in the 'pmd' member of each rxq. */ | |
3068 | static void | |
3069 | rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex) | |
3070 | { | |
3071 | struct dp_netdev_port *port; | |
3072 | struct rr_numa_list rr; | |
3073 | ||
3074 | rr_numa_list_populate(dp, &rr); | |
3075 | ||
3076 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
3077 | struct rr_numa *numa; | |
3078 | int numa_id; | |
3079 | ||
3080 | if (!netdev_is_pmd(port->netdev)) { | |
3081 | continue; | |
3082 | } | |
3083 | ||
3084 | numa_id = netdev_get_numa_id(port->netdev); | |
3085 | numa = rr_numa_list_lookup(&rr, numa_id); | |
3086 | ||
3087 | for (int qid = 0; qid < port->n_rxq; qid++) { | |
3088 | struct dp_netdev_rxq *q = &port->rxqs[qid]; | |
3089 | ||
3090 | if (pinned && q->core_id != OVS_CORE_UNSPEC) { | |
3091 | struct dp_netdev_pmd_thread *pmd; | |
3092 | ||
3093 | pmd = dp_netdev_get_pmd(dp, q->core_id); | |
3094 | if (!pmd) { | |
3095 | VLOG_WARN("There is no PMD thread on core %d. Queue " | |
3096 | "%d on port \'%s\' will not be polled.", | |
3097 | q->core_id, qid, netdev_get_name(port->netdev)); | |
3098 | } else { | |
3099 | q->pmd = pmd; | |
3100 | pmd->isolated = true; | |
3101 | dp_netdev_pmd_unref(pmd); | |
3102 | } | |
3103 | } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) { | |
3104 | if (!numa) { | |
3105 | VLOG_WARN("There's no available (non isolated) pmd thread " | |
3106 | "on numa node %d. Queue %d on port \'%s\' will " | |
3107 | "not be polled.", | |
3108 | numa_id, qid, netdev_get_name(port->netdev)); | |
3109 | } else { | |
3110 | q->pmd = rr_numa_get_pmd(numa); | |
3111 | } | |
3112 | } | |
3113 | } | |
3114 | } | |
3115 | ||
3116 | rr_numa_list_destroy(&rr); | |
3117 | } | |
3118 | ||
6e3c6fa4 DDP |
3119 | static void |
3120 | reconfigure_pmd_threads(struct dp_netdev *dp) | |
3121 | OVS_REQUIRES(dp->port_mutex) | |
3122 | { | |
e32971b8 DDP |
3123 | struct dp_netdev_pmd_thread *pmd; |
3124 | struct ovs_numa_dump *pmd_cores; | |
3125 | bool changed = false; | |
3126 | ||
3127 | /* The pmd threads should be started only if there's a pmd port in the | |
3128 | * datapath. If the user didn't provide any "pmd-cpu-mask", we start | |
3129 | * NR_PMD_THREADS per numa node. */ | |
3130 | if (!has_pmd_port(dp)) { | |
3131 | pmd_cores = ovs_numa_dump_n_cores_per_numa(0); | |
3132 | } else if (dp->pmd_cmask && dp->pmd_cmask[0]) { | |
3133 | pmd_cores = ovs_numa_dump_cores_with_cmask(dp->pmd_cmask); | |
3134 | } else { | |
3135 | pmd_cores = ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS); | |
3136 | } | |
3137 | ||
3138 | /* Check for changed configuration */ | |
3139 | if (ovs_numa_dump_count(pmd_cores) != cmap_count(&dp->poll_threads) - 1) { | |
3140 | changed = true; | |
3141 | } else { | |
3142 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3143 | if (pmd->core_id != NON_PMD_CORE_ID | |
3144 | && !ovs_numa_dump_contains_core(pmd_cores, | |
3145 | pmd->numa_id, | |
3146 | pmd->core_id)) { | |
3147 | changed = true; | |
3148 | break; | |
3149 | } | |
3150 | } | |
3151 | } | |
3152 | ||
3153 | /* Destroy the old and recreate the new pmd threads. We don't perform an | |
3154 | * incremental update because we would have to adjust 'static_tx_qid'. */ | |
3155 | if (changed) { | |
3156 | struct ovs_numa_info_core *core; | |
3157 | struct ovs_numa_info_numa *numa; | |
3158 | ||
3159 | /* Do not destroy the non pmd thread. */ | |
3160 | dp_netdev_destroy_all_pmds(dp, false); | |
3161 | FOR_EACH_CORE_ON_DUMP (core, pmd_cores) { | |
3162 | struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd); | |
3163 | ||
3164 | dp_netdev_configure_pmd(pmd, dp, core->core_id, core->numa_id); | |
3165 | ||
3166 | pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd); | |
3167 | } | |
3168 | ||
3169 | /* Log the number of pmd threads per numa node. */ | |
3170 | FOR_EACH_NUMA_ON_DUMP (numa, pmd_cores) { | |
3171 | VLOG_INFO("Created %"PRIuSIZE" pmd threads on numa node %d", | |
3172 | numa->n_cores, numa->numa_id); | |
3173 | } | |
3174 | } | |
3175 | ||
3176 | ovs_numa_dump_destroy(pmd_cores); | |
3177 | } | |
3178 | ||
3179 | static void | |
3180 | reload_affected_pmds(struct dp_netdev *dp) | |
3181 | { | |
3182 | struct dp_netdev_pmd_thread *pmd; | |
3183 | ||
3184 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3185 | if (pmd->need_reload) { | |
3186 | dp_netdev_reload_pmd__(pmd); | |
3187 | pmd->need_reload = false; | |
3188 | } | |
3189 | } | |
3190 | } | |
3191 | ||
3192 | static void | |
3193 | pmd_remove_stale_ports(struct dp_netdev *dp, | |
3194 | struct dp_netdev_pmd_thread *pmd) | |
3195 | OVS_EXCLUDED(pmd->port_mutex) | |
3196 | OVS_REQUIRES(dp->port_mutex) | |
3197 | { | |
3198 | struct rxq_poll *poll, *poll_next; | |
3199 | struct tx_port *tx, *tx_next; | |
3200 | ||
3201 | ovs_mutex_lock(&pmd->port_mutex); | |
3202 | HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) { | |
3203 | struct dp_netdev_port *port = poll->rxq->port; | |
3204 | ||
3205 | if (port->need_reconfigure | |
3206 | || !hmap_contains(&dp->ports, &port->node)) { | |
3207 | dp_netdev_del_rxq_from_pmd(pmd, poll); | |
3208 | } | |
3209 | } | |
3210 | HMAP_FOR_EACH_SAFE (tx, tx_next, node, &pmd->tx_ports) { | |
3211 | struct dp_netdev_port *port = tx->port; | |
3212 | ||
3213 | if (port->need_reconfigure | |
3214 | || !hmap_contains(&dp->ports, &port->node)) { | |
3215 | dp_netdev_del_port_tx_from_pmd(pmd, tx); | |
3216 | } | |
3217 | } | |
3218 | ovs_mutex_unlock(&pmd->port_mutex); | |
3219 | } | |
3220 | ||
3221 | /* Must be called each time a port is added/removed or the cmask changes. | |
3222 | * This creates and destroys pmd threads, reconfigures ports, opens their | |
3223 | * rxqs and assigns all rxqs/txqs to pmd threads. */ | |
3224 | static void | |
3225 | reconfigure_datapath(struct dp_netdev *dp) | |
3226 | OVS_REQUIRES(dp->port_mutex) | |
3227 | { | |
3228 | struct dp_netdev_pmd_thread *pmd; | |
3229 | struct dp_netdev_port *port; | |
3230 | int wanted_txqs; | |
6e3c6fa4 | 3231 | |
a6a426d6 IM |
3232 | dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq); |
3233 | ||
e32971b8 DDP |
3234 | /* Step 1: Adjust the pmd threads based on the datapath ports, the cores |
3235 | * on the system and the user configuration. */ | |
3236 | reconfigure_pmd_threads(dp); | |
6e3c6fa4 | 3237 | |
e32971b8 | 3238 | wanted_txqs = cmap_count(&dp->poll_threads); |
324c8374 | 3239 | |
e32971b8 DDP |
3240 | /* The number of pmd threads might have changed, or a port can be new: |
3241 | * adjust the txqs. */ | |
3242 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
3243 | netdev_set_tx_multiq(port->netdev, wanted_txqs); | |
324c8374 IM |
3244 | } |
3245 | ||
e32971b8 DDP |
3246 | /* Step 2: Remove from the pmd threads ports that have been removed or |
3247 | * need reconfiguration. */ | |
3248 | ||
3249 | /* Check for all the ports that need reconfiguration. We cache this in | |
3250 | * 'port->reconfigure', because netdev_is_reconf_required() can change at | |
3251 | * any time. */ | |
3252 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
3253 | if (netdev_is_reconf_required(port->netdev)) { | |
3254 | port->need_reconfigure = true; | |
3255 | } | |
3256 | } | |
3257 | ||
3258 | /* Remove from the pmd threads all the ports that have been deleted or | |
3259 | * need reconfiguration. */ | |
3260 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3261 | pmd_remove_stale_ports(dp, pmd); | |
3262 | } | |
3263 | ||
3264 | /* Reload affected pmd threads. We must wait for the pmd threads before | |
3265 | * reconfiguring the ports, because a port cannot be reconfigured while | |
3266 | * it's being used. */ | |
3267 | reload_affected_pmds(dp); | |
3268 | ||
3269 | /* Step 3: Reconfigure ports. */ | |
3270 | ||
3271 | /* We only reconfigure the ports that we determined above, because they're | |
3272 | * not being used by any pmd thread at the moment. If a port fails to | |
3273 | * reconfigure we remove it from the datapath. */ | |
3274 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
dc36593c | 3275 | int err; |
6e3c6fa4 | 3276 | |
e32971b8 DDP |
3277 | if (!port->need_reconfigure) { |
3278 | continue; | |
3279 | } | |
3280 | ||
dc36593c DDP |
3281 | err = port_reconfigure(port); |
3282 | if (err) { | |
3283 | hmap_remove(&dp->ports, &port->node); | |
3284 | seq_change(dp->port_seq); | |
3285 | port_destroy(port); | |
324c8374 | 3286 | } else { |
e32971b8 | 3287 | port->dynamic_txqs = netdev_n_txq(port->netdev) < wanted_txqs; |
6e3c6fa4 DDP |
3288 | } |
3289 | } | |
e32971b8 DDP |
3290 | |
3291 | /* Step 4: Compute new rxq scheduling. We don't touch the pmd threads | |
3292 | * for now, we just update the 'pmd' pointer in each rxq to point to the | |
3293 | * wanted thread according to the scheduling policy. */ | |
3294 | ||
3295 | /* Reset all the pmd threads to non isolated. */ | |
3296 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3297 | pmd->isolated = false; | |
3298 | } | |
3299 | ||
3300 | /* Reset all the queues to unassigned */ | |
3301 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
3302 | for (int i = 0; i < port->n_rxq; i++) { | |
3303 | port->rxqs[i].pmd = NULL; | |
3304 | } | |
3305 | } | |
3306 | ||
3307 | /* Add pinned queues and mark pmd threads isolated. */ | |
3308 | rxq_scheduling(dp, true); | |
3309 | ||
3310 | /* Add non-pinned queues. */ | |
3311 | rxq_scheduling(dp, false); | |
3312 | ||
3313 | /* Step 5: Remove queues not compliant with new scheduling. */ | |
3314 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3315 | struct rxq_poll *poll, *poll_next; | |
3316 | ||
3317 | ovs_mutex_lock(&pmd->port_mutex); | |
3318 | HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) { | |
3319 | if (poll->rxq->pmd != pmd) { | |
3320 | dp_netdev_del_rxq_from_pmd(pmd, poll); | |
3321 | } | |
3322 | } | |
3323 | ovs_mutex_unlock(&pmd->port_mutex); | |
3324 | } | |
3325 | ||
3326 | /* Reload affected pmd threads. We must wait for the pmd threads to remove | |
3327 | * the old queues before readding them, otherwise a queue can be polled by | |
3328 | * two threads at the same time. */ | |
3329 | reload_affected_pmds(dp); | |
3330 | ||
3331 | /* Step 6: Add queues from scheduling, if they're not there already. */ | |
3332 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
3333 | if (!netdev_is_pmd(port->netdev)) { | |
3334 | continue; | |
3335 | } | |
3336 | ||
3337 | for (int qid = 0; qid < port->n_rxq; qid++) { | |
3338 | struct dp_netdev_rxq *q = &port->rxqs[qid]; | |
3339 | ||
3340 | if (q->pmd) { | |
3341 | ovs_mutex_lock(&q->pmd->port_mutex); | |
3342 | dp_netdev_add_rxq_to_pmd(q->pmd, q); | |
3343 | ovs_mutex_unlock(&q->pmd->port_mutex); | |
3344 | } | |
3345 | } | |
3346 | } | |
3347 | ||
3348 | /* Add every port to the tx cache of every pmd thread, if it's not | |
3349 | * there already and if this pmd has at least one rxq to poll. */ | |
3350 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3351 | ovs_mutex_lock(&pmd->port_mutex); | |
3352 | if (hmap_count(&pmd->poll_list) || pmd->core_id == NON_PMD_CORE_ID) { | |
3353 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
3354 | dp_netdev_add_port_tx_to_pmd(pmd, port); | |
3355 | } | |
3356 | } | |
3357 | ovs_mutex_unlock(&pmd->port_mutex); | |
3358 | } | |
3359 | ||
3360 | /* Reload affected pmd threads. */ | |
3361 | reload_affected_pmds(dp); | |
6e3c6fa4 DDP |
3362 | } |
3363 | ||
050c60bf DDP |
3364 | /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */ |
3365 | static bool | |
3366 | ports_require_restart(const struct dp_netdev *dp) | |
3367 | OVS_REQUIRES(dp->port_mutex) | |
3368 | { | |
3369 | struct dp_netdev_port *port; | |
3370 | ||
3371 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
3372 | if (netdev_is_reconf_required(port->netdev)) { | |
3373 | return true; | |
3374 | } | |
3375 | } | |
3376 | ||
3377 | return false; | |
3378 | } | |
3379 | ||
a36de779 PS |
3380 | /* Return true if needs to revalidate datapath flows. */ |
3381 | static bool | |
e4cfed38 PS |
3382 | dpif_netdev_run(struct dpif *dpif) |
3383 | { | |
3384 | struct dp_netdev_port *port; | |
3385 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
546e57d4 | 3386 | struct dp_netdev_pmd_thread *non_pmd; |
a36de779 | 3387 | uint64_t new_tnl_seq; |
e4cfed38 | 3388 | |
e9985d6a | 3389 | ovs_mutex_lock(&dp->port_mutex); |
546e57d4 DDP |
3390 | non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); |
3391 | if (non_pmd) { | |
3392 | ovs_mutex_lock(&dp->non_pmd_mutex); | |
3393 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
3394 | if (!netdev_is_pmd(port->netdev)) { | |
3395 | int i; | |
55c955bd | 3396 | |
546e57d4 | 3397 | for (i = 0; i < port->n_rxq; i++) { |
947dc567 DDP |
3398 | dp_netdev_process_rxq_port(non_pmd, port->rxqs[i].rx, |
3399 | port->port_no); | |
546e57d4 | 3400 | } |
55c955bd | 3401 | } |
e4cfed38 | 3402 | } |
546e57d4 DDP |
3403 | dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false); |
3404 | ovs_mutex_unlock(&dp->non_pmd_mutex); | |
6e3c6fa4 | 3405 | |
546e57d4 DDP |
3406 | dp_netdev_pmd_unref(non_pmd); |
3407 | } | |
1c1e46ed | 3408 | |
a6a426d6 | 3409 | if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) { |
e32971b8 | 3410 | reconfigure_datapath(dp); |
6e3c6fa4 DDP |
3411 | } |
3412 | ovs_mutex_unlock(&dp->port_mutex); | |
3413 | ||
53902038 | 3414 | tnl_neigh_cache_run(); |
7f9b8504 | 3415 | tnl_port_map_run(); |
a36de779 PS |
3416 | new_tnl_seq = seq_read(tnl_conf_seq); |
3417 | ||
3418 | if (dp->last_tnl_conf_seq != new_tnl_seq) { | |
3419 | dp->last_tnl_conf_seq = new_tnl_seq; | |
3420 | return true; | |
3421 | } | |
3422 | return false; | |
e4cfed38 PS |
3423 | } |
3424 | ||
3425 | static void | |
3426 | dpif_netdev_wait(struct dpif *dpif) | |
3427 | { | |
3428 | struct dp_netdev_port *port; | |
3429 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
3430 | ||
59e6d833 | 3431 | ovs_mutex_lock(&dp_netdev_mutex); |
e9985d6a DDP |
3432 | ovs_mutex_lock(&dp->port_mutex); |
3433 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
050c60bf | 3434 | netdev_wait_reconf_required(port->netdev); |
55c955bd PS |
3435 | if (!netdev_is_pmd(port->netdev)) { |
3436 | int i; | |
3437 | ||
490e82af | 3438 | for (i = 0; i < port->n_rxq; i++) { |
947dc567 | 3439 | netdev_rxq_wait(port->rxqs[i].rx); |
55c955bd | 3440 | } |
e4cfed38 PS |
3441 | } |
3442 | } | |
e9985d6a | 3443 | ovs_mutex_unlock(&dp->port_mutex); |
59e6d833 | 3444 | ovs_mutex_unlock(&dp_netdev_mutex); |
a36de779 | 3445 | seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq); |
e4cfed38 PS |
3446 | } |
3447 | ||
d0cca6c3 DDP |
3448 | static void |
3449 | pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd) | |
3450 | { | |
3451 | struct tx_port *tx_port_cached; | |
3452 | ||
324c8374 IM |
3453 | /* Free all used tx queue ids. */ |
3454 | dpif_netdev_xps_revalidate_pmd(pmd, 0, true); | |
3455 | ||
57eebbb4 DDP |
3456 | HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->tnl_port_cache) { |
3457 | free(tx_port_cached); | |
3458 | } | |
3459 | HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->send_port_cache) { | |
d0cca6c3 DDP |
3460 | free(tx_port_cached); |
3461 | } | |
3462 | } | |
3463 | ||
3464 | /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to | |
3465 | * 'pmd->port_cache' (thread local) */ | |
3466 | static void | |
3467 | pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) | |
3468 | OVS_REQUIRES(pmd->port_mutex) | |
3469 | { | |
3470 | struct tx_port *tx_port, *tx_port_cached; | |
3471 | ||
3472 | pmd_free_cached_ports(pmd); | |
57eebbb4 DDP |
3473 | hmap_shrink(&pmd->send_port_cache); |
3474 | hmap_shrink(&pmd->tnl_port_cache); | |
d0cca6c3 DDP |
3475 | |
3476 | HMAP_FOR_EACH (tx_port, node, &pmd->tx_ports) { | |
57eebbb4 DDP |
3477 | if (netdev_has_tunnel_push_pop(tx_port->port->netdev)) { |
3478 | tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached); | |
3479 | hmap_insert(&pmd->tnl_port_cache, &tx_port_cached->node, | |
3480 | hash_port_no(tx_port_cached->port->port_no)); | |
3481 | } | |
3482 | ||
3483 | if (netdev_n_txq(tx_port->port->netdev)) { | |
3484 | tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached); | |
3485 | hmap_insert(&pmd->send_port_cache, &tx_port_cached->node, | |
3486 | hash_port_no(tx_port_cached->port->port_no)); | |
3487 | } | |
d0cca6c3 DDP |
3488 | } |
3489 | } | |
3490 | ||
e4cfed38 | 3491 | static int |
d0cca6c3 | 3492 | pmd_load_queues_and_ports(struct dp_netdev_pmd_thread *pmd, |
947dc567 | 3493 | struct polled_queue **ppoll_list) |
e4cfed38 | 3494 | { |
947dc567 | 3495 | struct polled_queue *poll_list = *ppoll_list; |
ae7ad0a1 IM |
3496 | struct rxq_poll *poll; |
3497 | int i; | |
e4cfed38 | 3498 | |
d0cca6c3 | 3499 | ovs_mutex_lock(&pmd->port_mutex); |
947dc567 DDP |
3500 | poll_list = xrealloc(poll_list, hmap_count(&pmd->poll_list) |
3501 | * sizeof *poll_list); | |
a1fdee13 | 3502 | |
ae7ad0a1 | 3503 | i = 0; |
947dc567 DDP |
3504 | HMAP_FOR_EACH (poll, node, &pmd->poll_list) { |
3505 | poll_list[i].rx = poll->rxq->rx; | |
3506 | poll_list[i].port_no = poll->rxq->port->port_no; | |
3507 | i++; | |
e4cfed38 | 3508 | } |
d0cca6c3 DDP |
3509 | |
3510 | pmd_load_cached_ports(pmd); | |
3511 | ||
3512 | ovs_mutex_unlock(&pmd->port_mutex); | |
e4cfed38 | 3513 | |
e4cfed38 | 3514 | *ppoll_list = poll_list; |
d42f9307 | 3515 | return i; |
e4cfed38 PS |
3516 | } |
3517 | ||
6c3eee82 | 3518 | static void * |
e4cfed38 | 3519 | pmd_thread_main(void *f_) |
6c3eee82 | 3520 | { |
65f13b50 | 3521 | struct dp_netdev_pmd_thread *pmd = f_; |
e4cfed38 | 3522 | unsigned int lc = 0; |
947dc567 | 3523 | struct polled_queue *poll_list; |
d42f9307 | 3524 | bool exiting; |
e4cfed38 PS |
3525 | int poll_cnt; |
3526 | int i; | |
6c3eee82 | 3527 | |
e4cfed38 PS |
3528 | poll_list = NULL; |
3529 | ||
65f13b50 AW |
3530 | /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */ |
3531 | ovsthread_setspecific(pmd->dp->per_pmd_key, pmd); | |
6930c7e0 DDP |
3532 | ovs_numa_thread_setaffinity_core(pmd->core_id); |
3533 | dpdk_set_lcore_id(pmd->core_id); | |
d0cca6c3 | 3534 | poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list); |
e4cfed38 | 3535 | reload: |
65f13b50 | 3536 | emc_cache_init(&pmd->flow_cache); |
ae7ad0a1 | 3537 | |
7dd671f0 MK |
3538 | /* List port/core affinity */ |
3539 | for (i = 0; i < poll_cnt; i++) { | |
ce179f11 | 3540 | VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n", |
947dc567 | 3541 | pmd->core_id, netdev_rxq_get_name(poll_list[i].rx), |
ce179f11 | 3542 | netdev_rxq_get_queue_id(poll_list[i].rx)); |
7dd671f0 MK |
3543 | } |
3544 | ||
2788a1b1 DDP |
3545 | if (!poll_cnt) { |
3546 | while (seq_read(pmd->reload_seq) == pmd->last_reload_seq) { | |
3547 | seq_wait(pmd->reload_seq, pmd->last_reload_seq); | |
3548 | poll_block(); | |
3549 | } | |
3550 | lc = UINT_MAX; | |
3551 | } | |
3552 | ||
e4cfed38 | 3553 | for (;;) { |
e4cfed38 | 3554 | for (i = 0; i < poll_cnt; i++) { |
947dc567 DDP |
3555 | dp_netdev_process_rxq_port(pmd, poll_list[i].rx, |
3556 | poll_list[i].port_no); | |
e4cfed38 PS |
3557 | } |
3558 | ||
3559 | if (lc++ > 1024) { | |
14e3e12a | 3560 | bool reload; |
6c3eee82 | 3561 | |
e4cfed38 | 3562 | lc = 0; |
84067a4c | 3563 | |
fbe0962b | 3564 | coverage_try_clear(); |
3453b4d6 | 3565 | dp_netdev_pmd_try_optimize(pmd); |
9dede5cf FL |
3566 | if (!ovsrcu_try_quiesce()) { |
3567 | emc_cache_slow_sweep(&pmd->flow_cache); | |
3568 | } | |
84067a4c | 3569 | |
14e3e12a DDP |
3570 | atomic_read_relaxed(&pmd->reload, &reload); |
3571 | if (reload) { | |
6c3eee82 BP |
3572 | break; |
3573 | } | |
3574 | } | |
e4cfed38 | 3575 | } |
6c3eee82 | 3576 | |
d0cca6c3 | 3577 | poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list); |
d42f9307 DDP |
3578 | exiting = latch_is_set(&pmd->exit_latch); |
3579 | /* Signal here to make sure the pmd finishes | |
3580 | * reloading the updated configuration. */ | |
3581 | dp_netdev_pmd_reload_done(pmd); | |
3582 | ||
65f13b50 | 3583 | emc_cache_uninit(&pmd->flow_cache); |
9bbf1c3d | 3584 | |
d42f9307 | 3585 | if (!exiting) { |
e4cfed38 PS |
3586 | goto reload; |
3587 | } | |
6c3eee82 | 3588 | |
e4cfed38 | 3589 | free(poll_list); |
d0cca6c3 | 3590 | pmd_free_cached_ports(pmd); |
6c3eee82 BP |
3591 | return NULL; |
3592 | } | |
3593 | ||
6b31e073 RW |
3594 | static void |
3595 | dp_netdev_disable_upcall(struct dp_netdev *dp) | |
3596 | OVS_ACQUIRES(dp->upcall_rwlock) | |
3597 | { | |
3598 | fat_rwlock_wrlock(&dp->upcall_rwlock); | |
3599 | } | |
3600 | ||
3601 | static void | |
3602 | dpif_netdev_disable_upcall(struct dpif *dpif) | |
3603 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
3604 | { | |
3605 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
3606 | dp_netdev_disable_upcall(dp); | |
3607 | } | |
3608 | ||
3609 | static void | |
3610 | dp_netdev_enable_upcall(struct dp_netdev *dp) | |
3611 | OVS_RELEASES(dp->upcall_rwlock) | |
3612 | { | |
3613 | fat_rwlock_unlock(&dp->upcall_rwlock); | |
3614 | } | |
3615 | ||
3616 | static void | |
3617 | dpif_netdev_enable_upcall(struct dpif *dpif) | |
3618 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
3619 | { | |
3620 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
3621 | dp_netdev_enable_upcall(dp); | |
3622 | } | |
3623 | ||
ae7ad0a1 | 3624 | static void |
accf8626 AW |
3625 | dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd) |
3626 | { | |
3627 | ovs_mutex_lock(&pmd->cond_mutex); | |
14e3e12a | 3628 | atomic_store_relaxed(&pmd->reload, false); |
2788a1b1 | 3629 | pmd->last_reload_seq = seq_read(pmd->reload_seq); |
accf8626 AW |
3630 | xpthread_cond_signal(&pmd->cond); |
3631 | ovs_mutex_unlock(&pmd->cond_mutex); | |
3632 | } | |
3633 | ||
1c1e46ed | 3634 | /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns |
546e57d4 DDP |
3635 | * the pointer if succeeds, otherwise, NULL (it can return NULL even if |
3636 | * 'core_id' is NON_PMD_CORE_ID). | |
1c1e46ed AW |
3637 | * |
3638 | * Caller must unrefs the returned reference. */ | |
65f13b50 | 3639 | static struct dp_netdev_pmd_thread * |
bd5131ba | 3640 | dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id) |
65f13b50 AW |
3641 | { |
3642 | struct dp_netdev_pmd_thread *pmd; | |
55847abe | 3643 | const struct cmap_node *pnode; |
65f13b50 | 3644 | |
b19befae | 3645 | pnode = cmap_find(&dp->poll_threads, hash_int(core_id, 0)); |
1c1e46ed AW |
3646 | if (!pnode) { |
3647 | return NULL; | |
3648 | } | |
65f13b50 AW |
3649 | pmd = CONTAINER_OF(pnode, struct dp_netdev_pmd_thread, node); |
3650 | ||
1c1e46ed | 3651 | return dp_netdev_pmd_try_ref(pmd) ? pmd : NULL; |
65f13b50 AW |
3652 | } |
3653 | ||
f2eee189 AW |
3654 | /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */ |
3655 | static void | |
3656 | dp_netdev_set_nonpmd(struct dp_netdev *dp) | |
e9985d6a | 3657 | OVS_REQUIRES(dp->port_mutex) |
f2eee189 AW |
3658 | { |
3659 | struct dp_netdev_pmd_thread *non_pmd; | |
3660 | ||
3661 | non_pmd = xzalloc(sizeof *non_pmd); | |
00873463 | 3662 | dp_netdev_configure_pmd(non_pmd, dp, NON_PMD_CORE_ID, OVS_NUMA_UNSPEC); |
f2eee189 AW |
3663 | } |
3664 | ||
1c1e46ed AW |
3665 | /* Caller must have valid pointer to 'pmd'. */ |
3666 | static bool | |
3667 | dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd) | |
3668 | { | |
3669 | return ovs_refcount_try_ref_rcu(&pmd->ref_cnt); | |
3670 | } | |
3671 | ||
3672 | static void | |
3673 | dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd) | |
3674 | { | |
3675 | if (pmd && ovs_refcount_unref(&pmd->ref_cnt) == 1) { | |
3676 | ovsrcu_postpone(dp_netdev_destroy_pmd, pmd); | |
3677 | } | |
3678 | } | |
3679 | ||
3680 | /* Given cmap position 'pos', tries to ref the next node. If try_ref() | |
3681 | * fails, keeps checking for next node until reaching the end of cmap. | |
3682 | * | |
3683 | * Caller must unrefs the returned reference. */ | |
3684 | static struct dp_netdev_pmd_thread * | |
3685 | dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos) | |
3686 | { | |
3687 | struct dp_netdev_pmd_thread *next; | |
3688 | ||
3689 | do { | |
3690 | struct cmap_node *node; | |
3691 | ||
3692 | node = cmap_next_position(&dp->poll_threads, pos); | |
3693 | next = node ? CONTAINER_OF(node, struct dp_netdev_pmd_thread, node) | |
3694 | : NULL; | |
3695 | } while (next && !dp_netdev_pmd_try_ref(next)); | |
3696 | ||
3697 | return next; | |
3698 | } | |
3699 | ||
65f13b50 | 3700 | /* Configures the 'pmd' based on the input argument. */ |
6c3eee82 | 3701 | static void |
65f13b50 | 3702 | dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, |
00873463 | 3703 | unsigned core_id, int numa_id) |
65f13b50 AW |
3704 | { |
3705 | pmd->dp = dp; | |
65f13b50 AW |
3706 | pmd->core_id = core_id; |
3707 | pmd->numa_id = numa_id; | |
e32971b8 | 3708 | pmd->need_reload = false; |
1c1e46ed | 3709 | |
e32971b8 | 3710 | *CONST_CAST(int *, &pmd->static_tx_qid) = cmap_count(&dp->poll_threads); |
347ba9bb | 3711 | |
1c1e46ed | 3712 | ovs_refcount_init(&pmd->ref_cnt); |
65f13b50 | 3713 | latch_init(&pmd->exit_latch); |
2788a1b1 DDP |
3714 | pmd->reload_seq = seq_create(); |
3715 | pmd->last_reload_seq = seq_read(pmd->reload_seq); | |
14e3e12a | 3716 | atomic_init(&pmd->reload, false); |
accf8626 AW |
3717 | xpthread_cond_init(&pmd->cond, NULL); |
3718 | ovs_mutex_init(&pmd->cond_mutex); | |
1c1e46ed | 3719 | ovs_mutex_init(&pmd->flow_mutex); |
d0cca6c3 | 3720 | ovs_mutex_init(&pmd->port_mutex); |
1c1e46ed | 3721 | cmap_init(&pmd->flow_table); |
3453b4d6 JS |
3722 | cmap_init(&pmd->classifiers); |
3723 | pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL; | |
947dc567 | 3724 | hmap_init(&pmd->poll_list); |
d0cca6c3 | 3725 | hmap_init(&pmd->tx_ports); |
57eebbb4 DDP |
3726 | hmap_init(&pmd->tnl_port_cache); |
3727 | hmap_init(&pmd->send_port_cache); | |
65f13b50 AW |
3728 | /* init the 'flow_cache' since there is no |
3729 | * actual thread created for NON_PMD_CORE_ID. */ | |
3730 | if (core_id == NON_PMD_CORE_ID) { | |
3731 | emc_cache_init(&pmd->flow_cache); | |
3732 | } | |
3733 | cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node), | |
3734 | hash_int(core_id, 0)); | |
3735 | } | |
3736 | ||
1c1e46ed AW |
3737 | static void |
3738 | dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd) | |
3739 | { | |
3453b4d6 JS |
3740 | struct dpcls *cls; |
3741 | ||
1c1e46ed | 3742 | dp_netdev_pmd_flow_flush(pmd); |
57eebbb4 DDP |
3743 | hmap_destroy(&pmd->send_port_cache); |
3744 | hmap_destroy(&pmd->tnl_port_cache); | |
d0cca6c3 | 3745 | hmap_destroy(&pmd->tx_ports); |
947dc567 | 3746 | hmap_destroy(&pmd->poll_list); |
3453b4d6 JS |
3747 | /* All flows (including their dpcls_rules) have been deleted already */ |
3748 | CMAP_FOR_EACH (cls, node, &pmd->classifiers) { | |
3749 | dpcls_destroy(cls); | |
7c269972 | 3750 | ovsrcu_postpone(free, cls); |
3453b4d6 JS |
3751 | } |
3752 | cmap_destroy(&pmd->classifiers); | |
1c1e46ed AW |
3753 | cmap_destroy(&pmd->flow_table); |
3754 | ovs_mutex_destroy(&pmd->flow_mutex); | |
3755 | latch_destroy(&pmd->exit_latch); | |
2788a1b1 | 3756 | seq_destroy(pmd->reload_seq); |
1c1e46ed AW |
3757 | xpthread_cond_destroy(&pmd->cond); |
3758 | ovs_mutex_destroy(&pmd->cond_mutex); | |
d0cca6c3 | 3759 | ovs_mutex_destroy(&pmd->port_mutex); |
1c1e46ed AW |
3760 | free(pmd); |
3761 | } | |
3762 | ||
3763 | /* Stops the pmd thread, removes it from the 'dp->poll_threads', | |
3764 | * and unrefs the struct. */ | |
65f13b50 | 3765 | static void |
e4e74c3a | 3766 | dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd) |
6c3eee82 | 3767 | { |
d0cca6c3 DDP |
3768 | /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize, |
3769 | * but extra cleanup is necessary */ | |
65f13b50 | 3770 | if (pmd->core_id == NON_PMD_CORE_ID) { |
febf4a7a | 3771 | ovs_mutex_lock(&dp->non_pmd_mutex); |
65f13b50 | 3772 | emc_cache_uninit(&pmd->flow_cache); |
d0cca6c3 | 3773 | pmd_free_cached_ports(pmd); |
febf4a7a | 3774 | ovs_mutex_unlock(&dp->non_pmd_mutex); |
65f13b50 AW |
3775 | } else { |
3776 | latch_set(&pmd->exit_latch); | |
3777 | dp_netdev_reload_pmd__(pmd); | |
65f13b50 AW |
3778 | xpthread_join(pmd->thread, NULL); |
3779 | } | |
ae7ad0a1 | 3780 | |
d0cca6c3 | 3781 | dp_netdev_pmd_clear_ports(pmd); |
ae7ad0a1 | 3782 | |
e4e74c3a AW |
3783 | /* Purges the 'pmd''s flows after stopping the thread, but before |
3784 | * destroying the flows, so that the flow stats can be collected. */ | |
3785 | if (dp->dp_purge_cb) { | |
3786 | dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id); | |
3787 | } | |
65f13b50 | 3788 | cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0)); |
1c1e46ed | 3789 | dp_netdev_pmd_unref(pmd); |
65f13b50 | 3790 | } |
6c3eee82 | 3791 | |
e32971b8 DDP |
3792 | /* Destroys all pmd threads. If 'non_pmd' is true it also destroys the non pmd |
3793 | * thread. */ | |
65f13b50 | 3794 | static void |
e32971b8 | 3795 | dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd) |
65f13b50 AW |
3796 | { |
3797 | struct dp_netdev_pmd_thread *pmd; | |
d916785c DDP |
3798 | struct dp_netdev_pmd_thread **pmd_list; |
3799 | size_t k = 0, n_pmds; | |
3800 | ||
e32971b8 | 3801 | n_pmds = cmap_count(&dp->poll_threads); |
d916785c | 3802 | pmd_list = xcalloc(n_pmds, sizeof *pmd_list); |
65f13b50 AW |
3803 | |
3804 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
e32971b8 | 3805 | if (!non_pmd && pmd->core_id == NON_PMD_CORE_ID) { |
b9584f21 DDP |
3806 | continue; |
3807 | } | |
d916785c DDP |
3808 | /* We cannot call dp_netdev_del_pmd(), since it alters |
3809 | * 'dp->poll_threads' (while we're iterating it) and it | |
3810 | * might quiesce. */ | |
3811 | ovs_assert(k < n_pmds); | |
3812 | pmd_list[k++] = pmd; | |
6c3eee82 | 3813 | } |
d916785c DDP |
3814 | |
3815 | for (size_t i = 0; i < k; i++) { | |
3816 | dp_netdev_del_pmd(dp, pmd_list[i]); | |
3817 | } | |
3818 | free(pmd_list); | |
65f13b50 | 3819 | } |
6c3eee82 | 3820 | |
d0cca6c3 DDP |
3821 | /* Deletes all rx queues from pmd->poll_list and all the ports from |
3822 | * pmd->tx_ports. */ | |
cc245ce8 | 3823 | static void |
d0cca6c3 | 3824 | dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd) |
cc245ce8 IM |
3825 | { |
3826 | struct rxq_poll *poll; | |
d0cca6c3 | 3827 | struct tx_port *port; |
cc245ce8 | 3828 | |
d0cca6c3 | 3829 | ovs_mutex_lock(&pmd->port_mutex); |
947dc567 | 3830 | HMAP_FOR_EACH_POP (poll, node, &pmd->poll_list) { |
cc245ce8 IM |
3831 | free(poll); |
3832 | } | |
d0cca6c3 DDP |
3833 | HMAP_FOR_EACH_POP (port, node, &pmd->tx_ports) { |
3834 | free(port); | |
3835 | } | |
3836 | ovs_mutex_unlock(&pmd->port_mutex); | |
cc245ce8 IM |
3837 | } |
3838 | ||
e32971b8 | 3839 | /* Adds rx queue to poll_list of PMD thread, if it's not there already. */ |
b68872d8 | 3840 | static void |
e32971b8 DDP |
3841 | dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd, |
3842 | struct dp_netdev_rxq *rxq) | |
3843 | OVS_REQUIRES(pmd->port_mutex) | |
b68872d8 | 3844 | { |
e32971b8 DDP |
3845 | int qid = netdev_rxq_get_queue_id(rxq->rx); |
3846 | uint32_t hash = hash_2words(odp_to_u32(rxq->port->port_no), qid); | |
3847 | struct rxq_poll *poll; | |
b68872d8 | 3848 | |
e32971b8 DDP |
3849 | HMAP_FOR_EACH_WITH_HASH (poll, node, hash, &pmd->poll_list) { |
3850 | if (poll->rxq == rxq) { | |
3851 | /* 'rxq' is already polled by this thread. Do nothing. */ | |
3852 | return; | |
d0cca6c3 | 3853 | } |
cc245ce8 | 3854 | } |
cc245ce8 | 3855 | |
e32971b8 DDP |
3856 | poll = xmalloc(sizeof *poll); |
3857 | poll->rxq = rxq; | |
3858 | hmap_insert(&pmd->poll_list, &poll->node, hash); | |
b68872d8 | 3859 | |
e32971b8 | 3860 | pmd->need_reload = true; |
ae7ad0a1 IM |
3861 | } |
3862 | ||
e32971b8 | 3863 | /* Delete 'poll' from poll_list of PMD thread. */ |
ae7ad0a1 | 3864 | static void |
e32971b8 DDP |
3865 | dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd, |
3866 | struct rxq_poll *poll) | |
d0cca6c3 | 3867 | OVS_REQUIRES(pmd->port_mutex) |
ae7ad0a1 | 3868 | { |
e32971b8 DDP |
3869 | hmap_remove(&pmd->poll_list, &poll->node); |
3870 | free(poll); | |
ae7ad0a1 | 3871 | |
e32971b8 | 3872 | pmd->need_reload = true; |
ae7ad0a1 IM |
3873 | } |
3874 | ||
d0cca6c3 DDP |
3875 | /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the |
3876 | * changes to take effect. */ | |
cc245ce8 | 3877 | static void |
d0cca6c3 DDP |
3878 | dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd, |
3879 | struct dp_netdev_port *port) | |
e32971b8 | 3880 | OVS_REQUIRES(pmd->port_mutex) |
d0cca6c3 | 3881 | { |
57eebbb4 DDP |
3882 | struct tx_port *tx; |
3883 | ||
e32971b8 DDP |
3884 | tx = tx_port_lookup(&pmd->tx_ports, port->port_no); |
3885 | if (tx) { | |
3886 | /* 'port' is already on this thread tx cache. Do nothing. */ | |
3887 | return; | |
3888 | } | |
3889 | ||
57eebbb4 | 3890 | tx = xzalloc(sizeof *tx); |
d0cca6c3 | 3891 | |
324c8374 IM |
3892 | tx->port = port; |
3893 | tx->qid = -1; | |
d0cca6c3 | 3894 | |
324c8374 | 3895 | hmap_insert(&pmd->tx_ports, &tx->node, hash_port_no(tx->port->port_no)); |
e32971b8 | 3896 | pmd->need_reload = true; |
d0cca6c3 DDP |
3897 | } |
3898 | ||
e32971b8 DDP |
3899 | /* Del 'tx' from the tx port cache of 'pmd', which must be reloaded for the |
3900 | * changes to take effect. */ | |
b9584f21 | 3901 | static void |
e32971b8 DDP |
3902 | dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd, |
3903 | struct tx_port *tx) | |
3904 | OVS_REQUIRES(pmd->port_mutex) | |
b9584f21 | 3905 | { |
e32971b8 DDP |
3906 | hmap_remove(&pmd->tx_ports, &tx->node); |
3907 | free(tx); | |
3908 | pmd->need_reload = true; | |
6c3eee82 BP |
3909 | } |
3910 | \f | |
b5cbbcf6 AZ |
3911 | static char * |
3912 | dpif_netdev_get_datapath_version(void) | |
3913 | { | |
3914 | return xstrdup("<built-in>"); | |
3915 | } | |
3916 | ||
72865317 | 3917 | static void |
1c1e46ed | 3918 | dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size, |
11bfdadd | 3919 | uint16_t tcp_flags, long long now) |
72865317 | 3920 | { |
eb94da30 | 3921 | uint16_t flags; |
72865317 | 3922 | |
eb94da30 DDP |
3923 | atomic_store_relaxed(&netdev_flow->stats.used, now); |
3924 | non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt); | |
3925 | non_atomic_ullong_add(&netdev_flow->stats.byte_count, size); | |
3926 | atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); | |
3927 | flags |= tcp_flags; | |
3928 | atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags); | |
51852a57 BP |
3929 | } |
3930 | ||
3931 | static void | |
1c1e46ed AW |
3932 | dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd, |
3933 | enum dp_stat_type type, int cnt) | |
51852a57 | 3934 | { |
eb94da30 | 3935 | non_atomic_ullong_add(&pmd->stats.n[type], cnt); |
51852a57 BP |
3936 | } |
3937 | ||
623540e4 | 3938 | static int |
e14deea0 | 3939 | dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet_, |
7af12bd7 | 3940 | struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid, |
623540e4 EJ |
3941 | enum dpif_upcall_type type, const struct nlattr *userdata, |
3942 | struct ofpbuf *actions, struct ofpbuf *put_actions) | |
3943 | { | |
1c1e46ed | 3944 | struct dp_netdev *dp = pmd->dp; |
623540e4 | 3945 | |
623540e4 EJ |
3946 | if (OVS_UNLIKELY(!dp->upcall_cb)) { |
3947 | return ENODEV; | |
3948 | } | |
3949 | ||
3950 | if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) { | |
3951 | struct ds ds = DS_EMPTY_INITIALIZER; | |
623540e4 | 3952 | char *packet_str; |
cf62fa4c | 3953 | struct ofpbuf key; |
5262eea1 JG |
3954 | struct odp_flow_key_parms odp_parms = { |
3955 | .flow = flow, | |
1dea1435 | 3956 | .mask = wc ? &wc->masks : NULL, |
2494ccd7 | 3957 | .support = dp_netdev_support, |
5262eea1 | 3958 | }; |
623540e4 EJ |
3959 | |
3960 | ofpbuf_init(&key, 0); | |
5262eea1 | 3961 | odp_flow_key_from_flow(&odp_parms, &key); |
cf62fa4c PS |
3962 | packet_str = ofp_packet_to_string(dp_packet_data(packet_), |
3963 | dp_packet_size(packet_)); | |
623540e4 | 3964 | |
6fd6ed71 | 3965 | odp_flow_key_format(key.data, key.size, &ds); |
623540e4 EJ |
3966 | |
3967 | VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name, | |
3968 | dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str); | |
3969 | ||
3970 | ofpbuf_uninit(&key); | |
3971 | free(packet_str); | |
6fd6ed71 | 3972 | |
623540e4 EJ |
3973 | ds_destroy(&ds); |
3974 | } | |
3975 | ||
8d8ab6c2 JG |
3976 | return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata, |
3977 | actions, wc, put_actions, dp->upcall_aux); | |
623540e4 EJ |
3978 | } |
3979 | ||
9bbf1c3d | 3980 | static inline uint32_t |
048963aa DDP |
3981 | dpif_netdev_packet_get_rss_hash(struct dp_packet *packet, |
3982 | const struct miniflow *mf) | |
9bbf1c3d | 3983 | { |
048963aa | 3984 | uint32_t hash, recirc_depth; |
9bbf1c3d | 3985 | |
f2f44f5d DDP |
3986 | if (OVS_LIKELY(dp_packet_rss_valid(packet))) { |
3987 | hash = dp_packet_get_rss_hash(packet); | |
3988 | } else { | |
9bbf1c3d | 3989 | hash = miniflow_hash_5tuple(mf, 0); |
2bc1bbd2 | 3990 | dp_packet_set_rss_hash(packet, hash); |
9bbf1c3d | 3991 | } |
048963aa DDP |
3992 | |
3993 | /* The RSS hash must account for the recirculation depth to avoid | |
3994 | * collisions in the exact match cache */ | |
3995 | recirc_depth = *recirc_depth_get_unsafe(); | |
3996 | if (OVS_UNLIKELY(recirc_depth)) { | |
3997 | hash = hash_finish(hash, recirc_depth); | |
3998 | dp_packet_set_rss_hash(packet, hash); | |
3999 | } | |
9bbf1c3d DDP |
4000 | return hash; |
4001 | } | |
4002 | ||
f7ce4811 | 4003 | struct packet_batch_per_flow { |
8cbf4f47 DDP |
4004 | unsigned int byte_count; |
4005 | uint16_t tcp_flags; | |
8cbf4f47 DDP |
4006 | struct dp_netdev_flow *flow; |
4007 | ||
1895cc8d | 4008 | struct dp_packet_batch array; |
8cbf4f47 DDP |
4009 | }; |
4010 | ||
4011 | static inline void | |
f7ce4811 PS |
4012 | packet_batch_per_flow_update(struct packet_batch_per_flow *batch, |
4013 | struct dp_packet *packet, | |
4014 | const struct miniflow *mf) | |
8cbf4f47 | 4015 | { |
cf62fa4c | 4016 | batch->byte_count += dp_packet_size(packet); |
1895cc8d PS |
4017 | batch->tcp_flags |= miniflow_get_tcp_flags(mf); |
4018 | batch->array.packets[batch->array.count++] = packet; | |
8cbf4f47 DDP |
4019 | } |
4020 | ||
4021 | static inline void | |
f7ce4811 PS |
4022 | packet_batch_per_flow_init(struct packet_batch_per_flow *batch, |
4023 | struct dp_netdev_flow *flow) | |
8cbf4f47 | 4024 | { |
11e5cf1f | 4025 | flow->batch = batch; |
8cbf4f47 | 4026 | |
11e5cf1f | 4027 | batch->flow = flow; |
1895cc8d | 4028 | dp_packet_batch_init(&batch->array); |
8cbf4f47 DDP |
4029 | batch->byte_count = 0; |
4030 | batch->tcp_flags = 0; | |
8cbf4f47 DDP |
4031 | } |
4032 | ||
4033 | static inline void | |
f7ce4811 PS |
4034 | packet_batch_per_flow_execute(struct packet_batch_per_flow *batch, |
4035 | struct dp_netdev_pmd_thread *pmd, | |
4036 | long long now) | |
8cbf4f47 DDP |
4037 | { |
4038 | struct dp_netdev_actions *actions; | |
4039 | struct dp_netdev_flow *flow = batch->flow; | |
4040 | ||
1895cc8d | 4041 | dp_netdev_flow_used(flow, batch->array.count, batch->byte_count, |
11bfdadd | 4042 | batch->tcp_flags, now); |
8cbf4f47 DDP |
4043 | |
4044 | actions = dp_netdev_flow_get_actions(flow); | |
4045 | ||
66e4ad8a | 4046 | dp_netdev_execute_actions(pmd, &batch->array, true, &flow->flow, |
324c8374 | 4047 | actions->actions, actions->size, now); |
8cbf4f47 DDP |
4048 | } |
4049 | ||
8aaa125d | 4050 | static inline void |
e14deea0 | 4051 | dp_netdev_queue_batches(struct dp_packet *pkt, |
9bbf1c3d | 4052 | struct dp_netdev_flow *flow, const struct miniflow *mf, |
f7ce4811 | 4053 | struct packet_batch_per_flow *batches, size_t *n_batches) |
9bbf1c3d | 4054 | { |
f7ce4811 | 4055 | struct packet_batch_per_flow *batch = flow->batch; |
11e5cf1f | 4056 | |
f9fe365b AZ |
4057 | if (OVS_UNLIKELY(!batch)) { |
4058 | batch = &batches[(*n_batches)++]; | |
f7ce4811 | 4059 | packet_batch_per_flow_init(batch, flow); |
9bbf1c3d DDP |
4060 | } |
4061 | ||
f7ce4811 | 4062 | packet_batch_per_flow_update(batch, pkt, mf); |
9bbf1c3d DDP |
4063 | } |
4064 | ||
9bbf1c3d | 4065 | /* Try to process all ('cnt') the 'packets' using only the exact match cache |
a90ed026 | 4066 | * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the |
8aaa125d DDP |
4067 | * miniflow is copied into 'keys' and the packet pointer is moved at the |
4068 | * beginning of the 'packets' array. | |
9bbf1c3d DDP |
4069 | * |
4070 | * The function returns the number of packets that needs to be processed in the | |
4071 | * 'packets' array (they have been moved to the beginning of the vector). | |
a90ed026 DDP |
4072 | * |
4073 | * If 'md_is_valid' is false, the metadata in 'packets' is not valid and must be | |
4074 | * initialized by this function using 'port_no'. | |
9bbf1c3d DDP |
4075 | */ |
4076 | static inline size_t | |
72c84bc2 AZ |
4077 | emc_processing(struct dp_netdev_pmd_thread *pmd, |
4078 | struct dp_packet_batch *packets_, | |
1895cc8d | 4079 | struct netdev_flow_key *keys, |
f7ce4811 | 4080 | struct packet_batch_per_flow batches[], size_t *n_batches, |
a90ed026 | 4081 | bool md_is_valid, odp_port_t port_no) |
72865317 | 4082 | { |
65f13b50 | 4083 | struct emc_cache *flow_cache = &pmd->flow_cache; |
b89c678b | 4084 | struct netdev_flow_key *key = &keys[0]; |
72c84bc2 AZ |
4085 | size_t n_missed = 0, n_dropped = 0; |
4086 | struct dp_packet *packet; | |
4087 | const size_t size = dp_packet_batch_size(packets_); | |
4088 | int i; | |
8cbf4f47 | 4089 | |
72c84bc2 | 4090 | DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, packets_) { |
9bbf1c3d | 4091 | struct dp_netdev_flow *flow; |
9bbf1c3d | 4092 | |
5a2fed48 AZ |
4093 | if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) { |
4094 | dp_packet_delete(packet); | |
3d88a620 | 4095 | n_dropped++; |
84d6d5eb EJ |
4096 | continue; |
4097 | } | |
8cbf4f47 | 4098 | |
72c84bc2 AZ |
4099 | if (i != size - 1) { |
4100 | struct dp_packet **packets = packets_->packets; | |
a90ed026 | 4101 | /* Prefetch next packet data and metadata. */ |
72a5e2b8 | 4102 | OVS_PREFETCH(dp_packet_data(packets[i+1])); |
a90ed026 | 4103 | pkt_metadata_prefetch_init(&packets[i+1]->md); |
72a5e2b8 DDP |
4104 | } |
4105 | ||
a90ed026 DDP |
4106 | if (!md_is_valid) { |
4107 | pkt_metadata_init(&packet->md, port_no); | |
4108 | } | |
5a2fed48 | 4109 | miniflow_extract(packet, &key->mf); |
d262ac2c | 4110 | key->len = 0; /* Not computed yet. */ |
5a2fed48 | 4111 | key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf); |
9bbf1c3d | 4112 | |
d262ac2c | 4113 | flow = emc_lookup(flow_cache, key); |
8aaa125d | 4114 | if (OVS_LIKELY(flow)) { |
5a2fed48 | 4115 | dp_netdev_queue_batches(packet, flow, &key->mf, batches, |
8aaa125d DDP |
4116 | n_batches); |
4117 | } else { | |
d1aa0b94 | 4118 | /* Exact match cache missed. Group missed packets together at |
72c84bc2 AZ |
4119 | * the beginning of the 'packets' array. */ |
4120 | dp_packet_batch_refill(packets_, packet, i); | |
400486f7 DDP |
4121 | /* 'key[n_missed]' contains the key of the current packet and it |
4122 | * must be returned to the caller. The next key should be extracted | |
4123 | * to 'keys[n_missed + 1]'. */ | |
4124 | key = &keys[++n_missed]; | |
9bbf1c3d DDP |
4125 | } |
4126 | } | |
4127 | ||
72c84bc2 | 4128 | dp_netdev_count_packet(pmd, DP_STAT_EXACT_HIT, size - n_dropped - n_missed); |
4f150744 | 4129 | |
72c84bc2 | 4130 | return dp_packet_batch_size(packets_); |
9bbf1c3d DDP |
4131 | } |
4132 | ||
a260d966 PS |
4133 | static inline void |
4134 | handle_packet_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet, | |
4135 | const struct netdev_flow_key *key, | |
4136 | struct ofpbuf *actions, struct ofpbuf *put_actions, | |
324c8374 | 4137 | int *lost_cnt, long long now) |
a260d966 PS |
4138 | { |
4139 | struct ofpbuf *add_actions; | |
4140 | struct dp_packet_batch b; | |
4141 | struct match match; | |
4142 | ovs_u128 ufid; | |
4143 | int error; | |
4144 | ||
4145 | match.tun_md.valid = false; | |
4146 | miniflow_expand(&key->mf, &match.flow); | |
4147 | ||
4148 | ofpbuf_clear(actions); | |
4149 | ofpbuf_clear(put_actions); | |
4150 | ||
4151 | dpif_flow_hash(pmd->dp->dpif, &match.flow, sizeof match.flow, &ufid); | |
4152 | error = dp_netdev_upcall(pmd, packet, &match.flow, &match.wc, | |
4153 | &ufid, DPIF_UC_MISS, NULL, actions, | |
4154 | put_actions); | |
4155 | if (OVS_UNLIKELY(error && error != ENOSPC)) { | |
4156 | dp_packet_delete(packet); | |
4157 | (*lost_cnt)++; | |
4158 | return; | |
4159 | } | |
4160 | ||
4161 | /* The Netlink encoding of datapath flow keys cannot express | |
4162 | * wildcarding the presence of a VLAN tag. Instead, a missing VLAN | |
4163 | * tag is interpreted as exact match on the fact that there is no | |
4164 | * VLAN. Unless we refactor a lot of code that translates between | |
4165 | * Netlink and struct flow representations, we have to do the same | |
4166 | * here. */ | |
4167 | if (!match.wc.masks.vlan_tci) { | |
4168 | match.wc.masks.vlan_tci = htons(0xffff); | |
4169 | } | |
4170 | ||
4171 | /* We can't allow the packet batching in the next loop to execute | |
4172 | * the actions. Otherwise, if there are any slow path actions, | |
4173 | * we'll send the packet up twice. */ | |
72c84bc2 | 4174 | dp_packet_batch_init_packet(&b, packet); |
66e4ad8a | 4175 | dp_netdev_execute_actions(pmd, &b, true, &match.flow, |
324c8374 | 4176 | actions->data, actions->size, now); |
a260d966 PS |
4177 | |
4178 | add_actions = put_actions->size ? put_actions : actions; | |
4179 | if (OVS_LIKELY(error != ENOSPC)) { | |
4180 | struct dp_netdev_flow *netdev_flow; | |
4181 | ||
4182 | /* XXX: There's a race window where a flow covering this packet | |
4183 | * could have already been installed since we last did the flow | |
4184 | * lookup before upcall. This could be solved by moving the | |
4185 | * mutex lock outside the loop, but that's an awful long time | |
4186 | * to be locking everyone out of making flow installs. If we | |
4187 | * move to a per-core classifier, it would be reasonable. */ | |
4188 | ovs_mutex_lock(&pmd->flow_mutex); | |
3453b4d6 | 4189 | netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL); |
a260d966 PS |
4190 | if (OVS_LIKELY(!netdev_flow)) { |
4191 | netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid, | |
4192 | add_actions->data, | |
4193 | add_actions->size); | |
4194 | } | |
4195 | ovs_mutex_unlock(&pmd->flow_mutex); | |
4196 | ||
4197 | emc_insert(&pmd->flow_cache, key, netdev_flow); | |
4198 | } | |
4199 | } | |
4200 | ||
9bbf1c3d | 4201 | static inline void |
65f13b50 | 4202 | fast_path_processing(struct dp_netdev_pmd_thread *pmd, |
1895cc8d | 4203 | struct dp_packet_batch *packets_, |
8aaa125d | 4204 | struct netdev_flow_key *keys, |
324c8374 | 4205 | struct packet_batch_per_flow batches[], size_t *n_batches, |
3453b4d6 | 4206 | odp_port_t in_port, |
324c8374 | 4207 | long long now) |
9bbf1c3d | 4208 | { |
1895cc8d | 4209 | int cnt = packets_->count; |
1a0d5831 | 4210 | #if !defined(__CHECKER__) && !defined(_WIN32) |
9bbf1c3d DDP |
4211 | const size_t PKT_ARRAY_SIZE = cnt; |
4212 | #else | |
1a0d5831 | 4213 | /* Sparse or MSVC doesn't like variable length array. */ |
cd159f1a | 4214 | enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; |
9bbf1c3d | 4215 | #endif |
1895cc8d | 4216 | struct dp_packet **packets = packets_->packets; |
3453b4d6 | 4217 | struct dpcls *cls; |
0de8783a | 4218 | struct dpcls_rule *rules[PKT_ARRAY_SIZE]; |
65f13b50 AW |
4219 | struct dp_netdev *dp = pmd->dp; |
4220 | struct emc_cache *flow_cache = &pmd->flow_cache; | |
8aaa125d | 4221 | int miss_cnt = 0, lost_cnt = 0; |
3453b4d6 | 4222 | int lookup_cnt = 0, add_lookup_cnt; |
9bbf1c3d | 4223 | bool any_miss; |
8aaa125d | 4224 | size_t i; |
9bbf1c3d DDP |
4225 | |
4226 | for (i = 0; i < cnt; i++) { | |
0de8783a | 4227 | /* Key length is needed in all the cases, hash computed on demand. */ |
361d808d | 4228 | keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf)); |
9bbf1c3d | 4229 | } |
3453b4d6 JS |
4230 | /* Get the classifier for the in_port */ |
4231 | cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); | |
4232 | if (OVS_LIKELY(cls)) { | |
4233 | any_miss = !dpcls_lookup(cls, keys, rules, cnt, &lookup_cnt); | |
4234 | } else { | |
4235 | any_miss = true; | |
4236 | memset(rules, 0, sizeof(rules)); | |
4237 | } | |
623540e4 EJ |
4238 | if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) { |
4239 | uint64_t actions_stub[512 / 8], slow_stub[512 / 8]; | |
4240 | struct ofpbuf actions, put_actions; | |
623540e4 EJ |
4241 | |
4242 | ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub); | |
4243 | ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub); | |
4244 | ||
4245 | for (i = 0; i < cnt; i++) { | |
0de8783a | 4246 | struct dp_netdev_flow *netdev_flow; |
623540e4 | 4247 | |
0de8783a | 4248 | if (OVS_LIKELY(rules[i])) { |
623540e4 EJ |
4249 | continue; |
4250 | } | |
4251 | ||
4252 | /* It's possible that an earlier slow path execution installed | |
0de8783a | 4253 | * a rule covering this flow. In this case, it's a lot cheaper |
623540e4 | 4254 | * to catch it here than execute a miss. */ |
3453b4d6 JS |
4255 | netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i], |
4256 | &add_lookup_cnt); | |
623540e4 | 4257 | if (netdev_flow) { |
3453b4d6 | 4258 | lookup_cnt += add_lookup_cnt; |
0de8783a | 4259 | rules[i] = &netdev_flow->cr; |
623540e4 EJ |
4260 | continue; |
4261 | } | |
4262 | ||
60fc3b7b | 4263 | miss_cnt++; |
324c8374 IM |
4264 | handle_packet_upcall(pmd, packets[i], &keys[i], &actions, |
4265 | &put_actions, &lost_cnt, now); | |
623540e4 EJ |
4266 | } |
4267 | ||
4268 | ofpbuf_uninit(&actions); | |
4269 | ofpbuf_uninit(&put_actions); | |
4270 | fat_rwlock_unlock(&dp->upcall_rwlock); | |
ac8c2081 | 4271 | } else if (OVS_UNLIKELY(any_miss)) { |
ac8c2081 | 4272 | for (i = 0; i < cnt; i++) { |
0de8783a | 4273 | if (OVS_UNLIKELY(!rules[i])) { |
e14deea0 | 4274 | dp_packet_delete(packets[i]); |
8aaa125d DDP |
4275 | lost_cnt++; |
4276 | miss_cnt++; | |
ac8c2081 DDP |
4277 | } |
4278 | } | |
623540e4 | 4279 | } |
84d6d5eb | 4280 | |
8cbf4f47 | 4281 | for (i = 0; i < cnt; i++) { |
e14deea0 | 4282 | struct dp_packet *packet = packets[i]; |
84d6d5eb | 4283 | struct dp_netdev_flow *flow; |
8cbf4f47 | 4284 | |
0de8783a | 4285 | if (OVS_UNLIKELY(!rules[i])) { |
84d6d5eb EJ |
4286 | continue; |
4287 | } | |
4288 | ||
84d6d5eb | 4289 | flow = dp_netdev_flow_cast(rules[i]); |
0de8783a | 4290 | |
0de8783a | 4291 | emc_insert(flow_cache, &keys[i], flow); |
8aaa125d | 4292 | dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches); |
8cbf4f47 DDP |
4293 | } |
4294 | ||
8aaa125d | 4295 | dp_netdev_count_packet(pmd, DP_STAT_MASKED_HIT, cnt - miss_cnt); |
3453b4d6 | 4296 | dp_netdev_count_packet(pmd, DP_STAT_LOOKUP_HIT, lookup_cnt); |
8aaa125d DDP |
4297 | dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt); |
4298 | dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt); | |
72865317 BP |
4299 | } |
4300 | ||
a90ed026 DDP |
4301 | /* Packets enter the datapath from a port (or from recirculation) here. |
4302 | * | |
4303 | * For performance reasons a caller may choose not to initialize the metadata | |
4304 | * in 'packets': in this case 'mdinit' is false and this function needs to | |
4305 | * initialize it using 'port_no'. If the metadata in 'packets' is already | |
4306 | * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */ | |
adcf00ba | 4307 | static void |
a90ed026 | 4308 | dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, |
1895cc8d | 4309 | struct dp_packet_batch *packets, |
a90ed026 | 4310 | bool md_is_valid, odp_port_t port_no) |
9bbf1c3d | 4311 | { |
1895cc8d | 4312 | int cnt = packets->count; |
1a0d5831 | 4313 | #if !defined(__CHECKER__) && !defined(_WIN32) |
9bbf1c3d DDP |
4314 | const size_t PKT_ARRAY_SIZE = cnt; |
4315 | #else | |
1a0d5831 | 4316 | /* Sparse or MSVC doesn't like variable length array. */ |
cd159f1a | 4317 | enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; |
9bbf1c3d | 4318 | #endif |
89176774 | 4319 | OVS_ALIGNED_VAR(CACHE_LINE_SIZE) struct netdev_flow_key keys[PKT_ARRAY_SIZE]; |
f7ce4811 | 4320 | struct packet_batch_per_flow batches[PKT_ARRAY_SIZE]; |
11bfdadd | 4321 | long long now = time_msec(); |
72c84bc2 | 4322 | size_t n_batches; |
3453b4d6 | 4323 | odp_port_t in_port; |
9bbf1c3d | 4324 | |
8aaa125d | 4325 | n_batches = 0; |
72c84bc2 | 4326 | emc_processing(pmd, packets, keys, batches, &n_batches, |
a90ed026 | 4327 | md_is_valid, port_no); |
72c84bc2 | 4328 | if (!dp_packet_batch_is_empty(packets)) { |
3453b4d6 JS |
4329 | /* Get ingress port from first packet's metadata. */ |
4330 | in_port = packets->packets[0]->md.in_port.odp_port; | |
4331 | fast_path_processing(pmd, packets, keys, batches, &n_batches, in_port, now); | |
8aaa125d DDP |
4332 | } |
4333 | ||
ad9f0581 BB |
4334 | /* All the flow batches need to be reset before any call to |
4335 | * packet_batch_per_flow_execute() as it could potentially trigger | |
4336 | * recirculation. When a packet matching flow ‘j’ happens to be | |
4337 | * recirculated, the nested call to dp_netdev_input__() could potentially | |
4338 | * classify the packet as matching another flow - say 'k'. It could happen | |
4339 | * that in the previous call to dp_netdev_input__() that same flow 'k' had | |
4340 | * already its own batches[k] still waiting to be served. So if its | |
4341 | * ‘batch’ member is not reset, the recirculated packet would be wrongly | |
4342 | * appended to batches[k] of the 1st call to dp_netdev_input__(). */ | |
72c84bc2 | 4343 | size_t i; |
603f2ce0 EJ |
4344 | for (i = 0; i < n_batches; i++) { |
4345 | batches[i].flow->batch = NULL; | |
4346 | } | |
4347 | ||
8aaa125d | 4348 | for (i = 0; i < n_batches; i++) { |
f7ce4811 | 4349 | packet_batch_per_flow_execute(&batches[i], pmd, now); |
9bbf1c3d DDP |
4350 | } |
4351 | } | |
4352 | ||
a90ed026 DDP |
4353 | static void |
4354 | dp_netdev_input(struct dp_netdev_pmd_thread *pmd, | |
1895cc8d | 4355 | struct dp_packet_batch *packets, |
a90ed026 DDP |
4356 | odp_port_t port_no) |
4357 | { | |
3453b4d6 | 4358 | dp_netdev_input__(pmd, packets, false, port_no); |
a90ed026 DDP |
4359 | } |
4360 | ||
4361 | static void | |
4362 | dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, | |
1895cc8d | 4363 | struct dp_packet_batch *packets) |
a90ed026 | 4364 | { |
3453b4d6 | 4365 | dp_netdev_input__(pmd, packets, true, 0); |
a90ed026 DDP |
4366 | } |
4367 | ||
9080a111 | 4368 | struct dp_netdev_execute_aux { |
65f13b50 | 4369 | struct dp_netdev_pmd_thread *pmd; |
324c8374 | 4370 | long long now; |
66e4ad8a | 4371 | const struct flow *flow; |
9080a111 JR |
4372 | }; |
4373 | ||
e4e74c3a AW |
4374 | static void |
4375 | dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, | |
4376 | void *aux) | |
4377 | { | |
4378 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
4379 | dp->dp_purge_aux = aux; | |
4380 | dp->dp_purge_cb = cb; | |
4381 | } | |
4382 | ||
6b31e073 | 4383 | static void |
623540e4 EJ |
4384 | dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, |
4385 | void *aux) | |
6b31e073 RW |
4386 | { |
4387 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
623540e4 | 4388 | dp->upcall_aux = aux; |
6b31e073 RW |
4389 | dp->upcall_cb = cb; |
4390 | } | |
4391 | ||
324c8374 IM |
4392 | static void |
4393 | dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd, | |
4394 | long long now, bool purge) | |
4395 | { | |
4396 | struct tx_port *tx; | |
4397 | struct dp_netdev_port *port; | |
4398 | long long interval; | |
4399 | ||
57eebbb4 | 4400 | HMAP_FOR_EACH (tx, node, &pmd->send_port_cache) { |
9f7a3035 | 4401 | if (!tx->port->dynamic_txqs) { |
324c8374 IM |
4402 | continue; |
4403 | } | |
4404 | interval = now - tx->last_used; | |
4405 | if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) { | |
4406 | port = tx->port; | |
4407 | ovs_mutex_lock(&port->txq_used_mutex); | |
4408 | port->txq_used[tx->qid]--; | |
4409 | ovs_mutex_unlock(&port->txq_used_mutex); | |
4410 | tx->qid = -1; | |
4411 | } | |
4412 | } | |
4413 | } | |
4414 | ||
4415 | static int | |
4416 | dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd, | |
4417 | struct tx_port *tx, long long now) | |
4418 | { | |
4419 | struct dp_netdev_port *port; | |
4420 | long long interval; | |
4421 | int i, min_cnt, min_qid; | |
4422 | ||
4423 | if (OVS_UNLIKELY(!now)) { | |
4424 | now = time_msec(); | |
4425 | } | |
4426 | ||
4427 | interval = now - tx->last_used; | |
4428 | tx->last_used = now; | |
4429 | ||
4430 | if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) { | |
4431 | return tx->qid; | |
4432 | } | |
4433 | ||
4434 | port = tx->port; | |
4435 | ||
4436 | ovs_mutex_lock(&port->txq_used_mutex); | |
4437 | if (tx->qid >= 0) { | |
4438 | port->txq_used[tx->qid]--; | |
4439 | tx->qid = -1; | |
4440 | } | |
4441 | ||
4442 | min_cnt = -1; | |
4443 | min_qid = 0; | |
4444 | for (i = 0; i < netdev_n_txq(port->netdev); i++) { | |
4445 | if (port->txq_used[i] < min_cnt || min_cnt == -1) { | |
4446 | min_cnt = port->txq_used[i]; | |
4447 | min_qid = i; | |
4448 | } | |
4449 | } | |
4450 | ||
4451 | port->txq_used[min_qid]++; | |
4452 | tx->qid = min_qid; | |
4453 | ||
4454 | ovs_mutex_unlock(&port->txq_used_mutex); | |
4455 | ||
4456 | dpif_netdev_xps_revalidate_pmd(pmd, now, false); | |
4457 | ||
4458 | VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.", | |
4459 | pmd->core_id, tx->qid, netdev_get_name(tx->port->netdev)); | |
4460 | return min_qid; | |
4461 | } | |
4462 | ||
d0cca6c3 | 4463 | static struct tx_port * |
57eebbb4 DDP |
4464 | pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd, |
4465 | odp_port_t port_no) | |
4466 | { | |
4467 | return tx_port_lookup(&pmd->tnl_port_cache, port_no); | |
4468 | } | |
4469 | ||
4470 | static struct tx_port * | |
4471 | pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd, | |
4472 | odp_port_t port_no) | |
d0cca6c3 | 4473 | { |
57eebbb4 | 4474 | return tx_port_lookup(&pmd->send_port_cache, port_no); |
d0cca6c3 DDP |
4475 | } |
4476 | ||
a36de779 | 4477 | static int |
d0cca6c3 | 4478 | push_tnl_action(const struct dp_netdev_pmd_thread *pmd, |
1895cc8d PS |
4479 | const struct nlattr *attr, |
4480 | struct dp_packet_batch *batch) | |
a36de779 | 4481 | { |
d0cca6c3 | 4482 | struct tx_port *tun_port; |
a36de779 | 4483 | const struct ovs_action_push_tnl *data; |
4c742796 | 4484 | int err; |
a36de779 PS |
4485 | |
4486 | data = nl_attr_get(attr); | |
4487 | ||
57eebbb4 | 4488 | tun_port = pmd_tnl_port_cache_lookup(pmd, u32_to_odp(data->tnl_port)); |
a36de779 | 4489 | if (!tun_port) { |
4c742796 PS |
4490 | err = -EINVAL; |
4491 | goto error; | |
a36de779 | 4492 | } |
324c8374 | 4493 | err = netdev_push_header(tun_port->port->netdev, batch, data); |
4c742796 PS |
4494 | if (!err) { |
4495 | return 0; | |
4496 | } | |
4497 | error: | |
4498 | dp_packet_delete_batch(batch, true); | |
4499 | return err; | |
a36de779 PS |
4500 | } |
4501 | ||
66525ef3 PS |
4502 | static void |
4503 | dp_execute_userspace_action(struct dp_netdev_pmd_thread *pmd, | |
4504 | struct dp_packet *packet, bool may_steal, | |
4505 | struct flow *flow, ovs_u128 *ufid, | |
4506 | struct ofpbuf *actions, | |
324c8374 | 4507 | const struct nlattr *userdata, long long now) |
66525ef3 PS |
4508 | { |
4509 | struct dp_packet_batch b; | |
4510 | int error; | |
4511 | ||
4512 | ofpbuf_clear(actions); | |
4513 | ||
4514 | error = dp_netdev_upcall(pmd, packet, flow, NULL, ufid, | |
4515 | DPIF_UC_ACTION, userdata, actions, | |
4516 | NULL); | |
4517 | if (!error || error == ENOSPC) { | |
72c84bc2 | 4518 | dp_packet_batch_init_packet(&b, packet); |
66e4ad8a | 4519 | dp_netdev_execute_actions(pmd, &b, may_steal, flow, |
324c8374 | 4520 | actions->data, actions->size, now); |
66525ef3 PS |
4521 | } else if (may_steal) { |
4522 | dp_packet_delete(packet); | |
4523 | } | |
4524 | } | |
4525 | ||
a36de779 | 4526 | static void |
1895cc8d | 4527 | dp_execute_cb(void *aux_, struct dp_packet_batch *packets_, |
09f9da0b | 4528 | const struct nlattr *a, bool may_steal) |
9080a111 JR |
4529 | { |
4530 | struct dp_netdev_execute_aux *aux = aux_; | |
623540e4 | 4531 | uint32_t *depth = recirc_depth_get(); |
28e2fa02 DDP |
4532 | struct dp_netdev_pmd_thread *pmd = aux->pmd; |
4533 | struct dp_netdev *dp = pmd->dp; | |
09f9da0b | 4534 | int type = nl_attr_type(a); |
324c8374 | 4535 | long long now = aux->now; |
d0cca6c3 | 4536 | struct tx_port *p; |
9080a111 | 4537 | |
09f9da0b JR |
4538 | switch ((enum ovs_action_attr)type) { |
4539 | case OVS_ACTION_ATTR_OUTPUT: | |
57eebbb4 | 4540 | p = pmd_send_port_cache_lookup(pmd, nl_attr_get_odp_port(a)); |
26a5075b | 4541 | if (OVS_LIKELY(p)) { |
347ba9bb | 4542 | int tx_qid; |
324c8374 | 4543 | bool dynamic_txqs; |
347ba9bb | 4544 | |
324c8374 IM |
4545 | dynamic_txqs = p->port->dynamic_txqs; |
4546 | if (dynamic_txqs) { | |
4547 | tx_qid = dpif_netdev_xps_get_tx_qid(pmd, p, now); | |
4548 | } else { | |
82d765f6 | 4549 | tx_qid = pmd->static_tx_qid; |
324c8374 | 4550 | } |
347ba9bb | 4551 | |
324c8374 IM |
4552 | netdev_send(p->port->netdev, tx_qid, packets_, may_steal, |
4553 | dynamic_txqs); | |
ac8c2081 | 4554 | return; |
8a4e3a85 | 4555 | } |
09f9da0b JR |
4556 | break; |
4557 | ||
a36de779 PS |
4558 | case OVS_ACTION_ATTR_TUNNEL_PUSH: |
4559 | if (*depth < MAX_RECIRC_DEPTH) { | |
1895cc8d | 4560 | struct dp_packet_batch tnl_pkt; |
aaca4fe0 | 4561 | struct dp_packet_batch *orig_packets_ = packets_; |
a36de779 PS |
4562 | int err; |
4563 | ||
4564 | if (!may_steal) { | |
1895cc8d PS |
4565 | dp_packet_batch_clone(&tnl_pkt, packets_); |
4566 | packets_ = &tnl_pkt; | |
aaca4fe0 | 4567 | dp_packet_batch_reset_cutlen(orig_packets_); |
a36de779 PS |
4568 | } |
4569 | ||
aaca4fe0 WT |
4570 | dp_packet_batch_apply_cutlen(packets_); |
4571 | ||
d0cca6c3 | 4572 | err = push_tnl_action(pmd, a, packets_); |
a36de779 PS |
4573 | if (!err) { |
4574 | (*depth)++; | |
1895cc8d | 4575 | dp_netdev_recirculate(pmd, packets_); |
a36de779 | 4576 | (*depth)--; |
a36de779 PS |
4577 | } |
4578 | return; | |
4579 | } | |
4580 | break; | |
4581 | ||
4582 | case OVS_ACTION_ATTR_TUNNEL_POP: | |
4583 | if (*depth < MAX_RECIRC_DEPTH) { | |
aaca4fe0 | 4584 | struct dp_packet_batch *orig_packets_ = packets_; |
8611f9a4 | 4585 | odp_port_t portno = nl_attr_get_odp_port(a); |
a36de779 | 4586 | |
57eebbb4 | 4587 | p = pmd_tnl_port_cache_lookup(pmd, portno); |
a36de779 | 4588 | if (p) { |
1895cc8d | 4589 | struct dp_packet_batch tnl_pkt; |
a36de779 PS |
4590 | |
4591 | if (!may_steal) { | |
aaca4fe0 WT |
4592 | dp_packet_batch_clone(&tnl_pkt, packets_); |
4593 | packets_ = &tnl_pkt; | |
4594 | dp_packet_batch_reset_cutlen(orig_packets_); | |
a36de779 PS |
4595 | } |
4596 | ||
aaca4fe0 WT |
4597 | dp_packet_batch_apply_cutlen(packets_); |
4598 | ||
324c8374 | 4599 | netdev_pop_header(p->port->netdev, packets_); |
72c84bc2 | 4600 | if (dp_packet_batch_is_empty(packets_)) { |
1c8f98d9 PS |
4601 | return; |
4602 | } | |
9235b479 | 4603 | |
72c84bc2 AZ |
4604 | struct dp_packet *packet; |
4605 | DP_PACKET_BATCH_FOR_EACH (packet, packets_) { | |
4606 | packet->md.in_port.odp_port = portno; | |
a36de779 | 4607 | } |
9235b479 PS |
4608 | |
4609 | (*depth)++; | |
4610 | dp_netdev_recirculate(pmd, packets_); | |
4611 | (*depth)--; | |
a36de779 PS |
4612 | return; |
4613 | } | |
4614 | } | |
4615 | break; | |
4616 | ||
623540e4 EJ |
4617 | case OVS_ACTION_ATTR_USERSPACE: |
4618 | if (!fat_rwlock_tryrdlock(&dp->upcall_rwlock)) { | |
aaca4fe0 | 4619 | struct dp_packet_batch *orig_packets_ = packets_; |
623540e4 | 4620 | const struct nlattr *userdata; |
aaca4fe0 | 4621 | struct dp_packet_batch usr_pkt; |
623540e4 EJ |
4622 | struct ofpbuf actions; |
4623 | struct flow flow; | |
7af12bd7 | 4624 | ovs_u128 ufid; |
aaca4fe0 | 4625 | bool clone = false; |
4fc65926 | 4626 | |
623540e4 EJ |
4627 | userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA); |
4628 | ofpbuf_init(&actions, 0); | |
8cbf4f47 | 4629 | |
aaca4fe0 WT |
4630 | if (packets_->trunc) { |
4631 | if (!may_steal) { | |
4632 | dp_packet_batch_clone(&usr_pkt, packets_); | |
4633 | packets_ = &usr_pkt; | |
aaca4fe0 WT |
4634 | clone = true; |
4635 | dp_packet_batch_reset_cutlen(orig_packets_); | |
4636 | } | |
4637 | ||
4638 | dp_packet_batch_apply_cutlen(packets_); | |
4639 | } | |
4640 | ||
72c84bc2 AZ |
4641 | struct dp_packet *packet; |
4642 | DP_PACKET_BATCH_FOR_EACH (packet, packets_) { | |
4643 | flow_extract(packet, &flow); | |
7af12bd7 | 4644 | dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid); |
72c84bc2 | 4645 | dp_execute_userspace_action(pmd, packet, may_steal, &flow, |
324c8374 | 4646 | &ufid, &actions, userdata, now); |
db73f716 | 4647 | } |
aaca4fe0 WT |
4648 | |
4649 | if (clone) { | |
4650 | dp_packet_delete_batch(packets_, true); | |
4651 | } | |
4652 | ||
623540e4 EJ |
4653 | ofpbuf_uninit(&actions); |
4654 | fat_rwlock_unlock(&dp->upcall_rwlock); | |
6b31e073 | 4655 | |
ac8c2081 DDP |
4656 | return; |
4657 | } | |
09f9da0b | 4658 | break; |
572f732a | 4659 | |
adcf00ba AZ |
4660 | case OVS_ACTION_ATTR_RECIRC: |
4661 | if (*depth < MAX_RECIRC_DEPTH) { | |
1895cc8d | 4662 | struct dp_packet_batch recirc_pkts; |
572f732a | 4663 | |
28e2fa02 | 4664 | if (!may_steal) { |
1895cc8d PS |
4665 | dp_packet_batch_clone(&recirc_pkts, packets_); |
4666 | packets_ = &recirc_pkts; | |
28e2fa02 | 4667 | } |
8cbf4f47 | 4668 | |
72c84bc2 AZ |
4669 | struct dp_packet *packet; |
4670 | DP_PACKET_BATCH_FOR_EACH (packet, packets_) { | |
4671 | packet->md.recirc_id = nl_attr_get_u32(a); | |
8cbf4f47 | 4672 | } |
28e2fa02 DDP |
4673 | |
4674 | (*depth)++; | |
1895cc8d | 4675 | dp_netdev_recirculate(pmd, packets_); |
adcf00ba AZ |
4676 | (*depth)--; |
4677 | ||
ac8c2081 | 4678 | return; |
adcf00ba | 4679 | } |
ac8c2081 DDP |
4680 | |
4681 | VLOG_WARN("Packet dropped. Max recirculation depth exceeded."); | |
572f732a | 4682 | break; |
572f732a | 4683 | |
5cf3edb3 DDP |
4684 | case OVS_ACTION_ATTR_CT: { |
4685 | const struct nlattr *b; | |
4686 | bool commit = false; | |
4687 | unsigned int left; | |
4688 | uint16_t zone = 0; | |
4689 | const char *helper = NULL; | |
4690 | const uint32_t *setmark = NULL; | |
4691 | const struct ovs_key_ct_labels *setlabel = NULL; | |
4692 | ||
4693 | NL_ATTR_FOR_EACH_UNSAFE (b, left, nl_attr_get(a), | |
4694 | nl_attr_get_size(a)) { | |
4695 | enum ovs_ct_attr sub_type = nl_attr_type(b); | |
4696 | ||
4697 | switch(sub_type) { | |
4698 | case OVS_CT_ATTR_COMMIT: | |
4699 | commit = true; | |
4700 | break; | |
4701 | case OVS_CT_ATTR_ZONE: | |
4702 | zone = nl_attr_get_u16(b); | |
4703 | break; | |
4704 | case OVS_CT_ATTR_HELPER: | |
4705 | helper = nl_attr_get_string(b); | |
4706 | break; | |
4707 | case OVS_CT_ATTR_MARK: | |
4708 | setmark = nl_attr_get(b); | |
4709 | break; | |
4710 | case OVS_CT_ATTR_LABELS: | |
4711 | setlabel = nl_attr_get(b); | |
4712 | break; | |
4713 | case OVS_CT_ATTR_NAT: | |
4714 | case OVS_CT_ATTR_UNSPEC: | |
4715 | case __OVS_CT_ATTR_MAX: | |
4716 | OVS_NOT_REACHED(); | |
4717 | } | |
4718 | } | |
4719 | ||
66e4ad8a DDP |
4720 | conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type, commit, |
4721 | zone, setmark, setlabel, helper); | |
07659514 | 4722 | break; |
5cf3edb3 | 4723 | } |
07659514 | 4724 | |
09f9da0b JR |
4725 | case OVS_ACTION_ATTR_PUSH_VLAN: |
4726 | case OVS_ACTION_ATTR_POP_VLAN: | |
4727 | case OVS_ACTION_ATTR_PUSH_MPLS: | |
4728 | case OVS_ACTION_ATTR_POP_MPLS: | |
4729 | case OVS_ACTION_ATTR_SET: | |
6d670e7f | 4730 | case OVS_ACTION_ATTR_SET_MASKED: |
09f9da0b | 4731 | case OVS_ACTION_ATTR_SAMPLE: |
53e1d6f1 | 4732 | case OVS_ACTION_ATTR_HASH: |
09f9da0b | 4733 | case OVS_ACTION_ATTR_UNSPEC: |
aaca4fe0 | 4734 | case OVS_ACTION_ATTR_TRUNC: |
535e3acf | 4735 | case OVS_ACTION_ATTR_CLONE: |
09f9da0b JR |
4736 | case __OVS_ACTION_ATTR_MAX: |
4737 | OVS_NOT_REACHED(); | |
da546e07 | 4738 | } |
ac8c2081 | 4739 | |
1895cc8d | 4740 | dp_packet_delete_batch(packets_, may_steal); |
98403001 BP |
4741 | } |
4742 | ||
4edb9ae9 | 4743 | static void |
65f13b50 | 4744 | dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, |
1895cc8d | 4745 | struct dp_packet_batch *packets, |
66e4ad8a | 4746 | bool may_steal, const struct flow *flow, |
324c8374 IM |
4747 | const struct nlattr *actions, size_t actions_len, |
4748 | long long now) | |
72865317 | 4749 | { |
66e4ad8a | 4750 | struct dp_netdev_execute_aux aux = { pmd, now, flow }; |
9080a111 | 4751 | |
1895cc8d | 4752 | odp_execute_actions(&aux, packets, may_steal, actions, |
8cbf4f47 | 4753 | actions_len, dp_execute_cb); |
72865317 BP |
4754 | } |
4755 | ||
4d4e68ed DDP |
4756 | struct dp_netdev_ct_dump { |
4757 | struct ct_dpif_dump_state up; | |
4758 | struct conntrack_dump dump; | |
4759 | struct conntrack *ct; | |
4760 | struct dp_netdev *dp; | |
4761 | }; | |
4762 | ||
4763 | static int | |
4764 | dpif_netdev_ct_dump_start(struct dpif *dpif, struct ct_dpif_dump_state **dump_, | |
4765 | const uint16_t *pzone) | |
4766 | { | |
4767 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
4768 | struct dp_netdev_ct_dump *dump; | |
4769 | ||
4770 | dump = xzalloc(sizeof *dump); | |
4771 | dump->dp = dp; | |
4772 | dump->ct = &dp->conntrack; | |
4773 | ||
4774 | conntrack_dump_start(&dp->conntrack, &dump->dump, pzone); | |
4775 | ||
4776 | *dump_ = &dump->up; | |
4777 | ||
4778 | return 0; | |
4779 | } | |
4780 | ||
4781 | static int | |
4782 | dpif_netdev_ct_dump_next(struct dpif *dpif OVS_UNUSED, | |
4783 | struct ct_dpif_dump_state *dump_, | |
4784 | struct ct_dpif_entry *entry) | |
4785 | { | |
4786 | struct dp_netdev_ct_dump *dump; | |
4787 | ||
4788 | INIT_CONTAINER(dump, dump_, up); | |
4789 | ||
4790 | return conntrack_dump_next(&dump->dump, entry); | |
4791 | } | |
4792 | ||
4793 | static int | |
4794 | dpif_netdev_ct_dump_done(struct dpif *dpif OVS_UNUSED, | |
4795 | struct ct_dpif_dump_state *dump_) | |
4796 | { | |
4797 | struct dp_netdev_ct_dump *dump; | |
4798 | int err; | |
4799 | ||
4800 | INIT_CONTAINER(dump, dump_, up); | |
4801 | ||
4802 | err = conntrack_dump_done(&dump->dump); | |
4803 | ||
4804 | free(dump); | |
4805 | ||
4806 | return err; | |
4807 | } | |
4808 | ||
5d9cbb4c DDP |
4809 | static int |
4810 | dpif_netdev_ct_flush(struct dpif *dpif, const uint16_t *zone) | |
4811 | { | |
4812 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
4813 | ||
4814 | return conntrack_flush(&dp->conntrack, zone); | |
4815 | } | |
4816 | ||
72865317 | 4817 | const struct dpif_class dpif_netdev_class = { |
72865317 | 4818 | "netdev", |
6553d06b | 4819 | dpif_netdev_init, |
2197d7ab | 4820 | dpif_netdev_enumerate, |
0aeaabc8 | 4821 | dpif_netdev_port_open_type, |
72865317 BP |
4822 | dpif_netdev_open, |
4823 | dpif_netdev_close, | |
7dab847a | 4824 | dpif_netdev_destroy, |
e4cfed38 PS |
4825 | dpif_netdev_run, |
4826 | dpif_netdev_wait, | |
72865317 | 4827 | dpif_netdev_get_stats, |
72865317 BP |
4828 | dpif_netdev_port_add, |
4829 | dpif_netdev_port_del, | |
3eb67853 | 4830 | dpif_netdev_port_set_config, |
72865317 BP |
4831 | dpif_netdev_port_query_by_number, |
4832 | dpif_netdev_port_query_by_name, | |
98403001 | 4833 | NULL, /* port_get_pid */ |
b0ec0f27 BP |
4834 | dpif_netdev_port_dump_start, |
4835 | dpif_netdev_port_dump_next, | |
4836 | dpif_netdev_port_dump_done, | |
72865317 BP |
4837 | dpif_netdev_port_poll, |
4838 | dpif_netdev_port_poll_wait, | |
72865317 | 4839 | dpif_netdev_flow_flush, |
ac64794a BP |
4840 | dpif_netdev_flow_dump_create, |
4841 | dpif_netdev_flow_dump_destroy, | |
4842 | dpif_netdev_flow_dump_thread_create, | |
4843 | dpif_netdev_flow_dump_thread_destroy, | |
704a1e09 | 4844 | dpif_netdev_flow_dump_next, |
1a0c894a | 4845 | dpif_netdev_operate, |
6b31e073 RW |
4846 | NULL, /* recv_set */ |
4847 | NULL, /* handlers_set */ | |
d4f6865c | 4848 | dpif_netdev_set_config, |
5bf93d67 | 4849 | dpif_netdev_queue_to_priority, |
6b31e073 RW |
4850 | NULL, /* recv */ |
4851 | NULL, /* recv_wait */ | |
4852 | NULL, /* recv_purge */ | |
e4e74c3a | 4853 | dpif_netdev_register_dp_purge_cb, |
6b31e073 RW |
4854 | dpif_netdev_register_upcall_cb, |
4855 | dpif_netdev_enable_upcall, | |
4856 | dpif_netdev_disable_upcall, | |
b5cbbcf6 | 4857 | dpif_netdev_get_datapath_version, |
4d4e68ed DDP |
4858 | dpif_netdev_ct_dump_start, |
4859 | dpif_netdev_ct_dump_next, | |
4860 | dpif_netdev_ct_dump_done, | |
5d9cbb4c | 4861 | dpif_netdev_ct_flush, |
72865317 | 4862 | }; |
614c4892 | 4863 | |
74cc3969 BP |
4864 | static void |
4865 | dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED, | |
4866 | const char *argv[], void *aux OVS_UNUSED) | |
4867 | { | |
e9985d6a | 4868 | struct dp_netdev_port *port; |
74cc3969 | 4869 | struct dp_netdev *dp; |
ff073a71 | 4870 | odp_port_t port_no; |
74cc3969 | 4871 | |
8a4e3a85 | 4872 | ovs_mutex_lock(&dp_netdev_mutex); |
74cc3969 BP |
4873 | dp = shash_find_data(&dp_netdevs, argv[1]); |
4874 | if (!dp || !dpif_netdev_class_is_dummy(dp->class)) { | |
8a4e3a85 | 4875 | ovs_mutex_unlock(&dp_netdev_mutex); |
74cc3969 BP |
4876 | unixctl_command_reply_error(conn, "unknown datapath or not a dummy"); |
4877 | return; | |
4878 | } | |
8a4e3a85 BP |
4879 | ovs_refcount_ref(&dp->ref_cnt); |
4880 | ovs_mutex_unlock(&dp_netdev_mutex); | |
74cc3969 | 4881 | |
59e6d833 | 4882 | ovs_mutex_lock(&dp->port_mutex); |
e9985d6a | 4883 | if (get_port_by_name(dp, argv[2], &port)) { |
74cc3969 | 4884 | unixctl_command_reply_error(conn, "unknown port"); |
8a4e3a85 | 4885 | goto exit; |
74cc3969 BP |
4886 | } |
4887 | ||
ff073a71 BP |
4888 | port_no = u32_to_odp(atoi(argv[3])); |
4889 | if (!port_no || port_no == ODPP_NONE) { | |
74cc3969 | 4890 | unixctl_command_reply_error(conn, "bad port number"); |
8a4e3a85 | 4891 | goto exit; |
74cc3969 | 4892 | } |
ff073a71 | 4893 | if (dp_netdev_lookup_port(dp, port_no)) { |
74cc3969 | 4894 | unixctl_command_reply_error(conn, "port number already in use"); |
8a4e3a85 | 4895 | goto exit; |
74cc3969 | 4896 | } |
59e6d833 | 4897 | |
e9985d6a DDP |
4898 | /* Remove port. */ |
4899 | hmap_remove(&dp->ports, &port->node); | |
e32971b8 | 4900 | reconfigure_datapath(dp); |
59e6d833 | 4901 | |
e9985d6a DDP |
4902 | /* Reinsert with new port number. */ |
4903 | port->port_no = port_no; | |
4904 | hmap_insert(&dp->ports, &port->node, hash_port_no(port_no)); | |
e32971b8 | 4905 | reconfigure_datapath(dp); |
59e6d833 | 4906 | |
d33ed218 | 4907 | seq_change(dp->port_seq); |
74cc3969 | 4908 | unixctl_command_reply(conn, NULL); |
8a4e3a85 BP |
4909 | |
4910 | exit: | |
59e6d833 | 4911 | ovs_mutex_unlock(&dp->port_mutex); |
8a4e3a85 | 4912 | dp_netdev_unref(dp); |
74cc3969 BP |
4913 | } |
4914 | ||
0cbfe35d BP |
4915 | static void |
4916 | dpif_dummy_register__(const char *type) | |
4917 | { | |
4918 | struct dpif_class *class; | |
4919 | ||
4920 | class = xmalloc(sizeof *class); | |
4921 | *class = dpif_netdev_class; | |
4922 | class->type = xstrdup(type); | |
4923 | dp_register_provider(class); | |
4924 | } | |
4925 | ||
8420c7ad BP |
4926 | static void |
4927 | dpif_dummy_override(const char *type) | |
4928 | { | |
65d43fdc YT |
4929 | int error; |
4930 | ||
4931 | /* | |
4932 | * Ignore EAFNOSUPPORT to allow --enable-dummy=system with | |
4933 | * a userland-only build. It's useful for testsuite. | |
4934 | */ | |
4935 | error = dp_unregister_provider(type); | |
4936 | if (error == 0 || error == EAFNOSUPPORT) { | |
8420c7ad BP |
4937 | dpif_dummy_register__(type); |
4938 | } | |
4939 | } | |
4940 | ||
614c4892 | 4941 | void |
8420c7ad | 4942 | dpif_dummy_register(enum dummy_level level) |
614c4892 | 4943 | { |
8420c7ad | 4944 | if (level == DUMMY_OVERRIDE_ALL) { |
0cbfe35d BP |
4945 | struct sset types; |
4946 | const char *type; | |
4947 | ||
4948 | sset_init(&types); | |
4949 | dp_enumerate_types(&types); | |
4950 | SSET_FOR_EACH (type, &types) { | |
8420c7ad | 4951 | dpif_dummy_override(type); |
0cbfe35d BP |
4952 | } |
4953 | sset_destroy(&types); | |
8420c7ad BP |
4954 | } else if (level == DUMMY_OVERRIDE_SYSTEM) { |
4955 | dpif_dummy_override("system"); | |
614c4892 | 4956 | } |
0cbfe35d BP |
4957 | |
4958 | dpif_dummy_register__("dummy"); | |
74cc3969 BP |
4959 | |
4960 | unixctl_command_register("dpif-dummy/change-port-number", | |
74467d5c | 4961 | "dp port new-number", |
74cc3969 | 4962 | 3, 3, dpif_dummy_change_port_number, NULL); |
614c4892 | 4963 | } |
0de8783a JR |
4964 | \f |
4965 | /* Datapath Classifier. */ | |
4966 | ||
4967 | /* A set of rules that all have the same fields wildcarded. */ | |
4968 | struct dpcls_subtable { | |
4969 | /* The fields are only used by writers. */ | |
4970 | struct cmap_node cmap_node OVS_GUARDED; /* Within dpcls 'subtables_map'. */ | |
4971 | ||
4972 | /* These fields are accessed by readers. */ | |
4973 | struct cmap rules; /* Contains "struct dpcls_rule"s. */ | |
3453b4d6 JS |
4974 | uint32_t hit_cnt; /* Number of match hits in subtable in current |
4975 | optimization interval. */ | |
0de8783a JR |
4976 | struct netdev_flow_key mask; /* Wildcards for fields (const). */ |
4977 | /* 'mask' must be the last field, additional space is allocated here. */ | |
4978 | }; | |
4979 | ||
4980 | /* Initializes 'cls' as a classifier that initially contains no classification | |
4981 | * rules. */ | |
4982 | static void | |
4983 | dpcls_init(struct dpcls *cls) | |
4984 | { | |
4985 | cmap_init(&cls->subtables_map); | |
da9cfca6 | 4986 | pvector_init(&cls->subtables); |
0de8783a JR |
4987 | } |
4988 | ||
4989 | static void | |
4990 | dpcls_destroy_subtable(struct dpcls *cls, struct dpcls_subtable *subtable) | |
4991 | { | |
3453b4d6 | 4992 | VLOG_DBG("Destroying subtable %p for in_port %d", subtable, cls->in_port); |
da9cfca6 | 4993 | pvector_remove(&cls->subtables, subtable); |
0de8783a JR |
4994 | cmap_remove(&cls->subtables_map, &subtable->cmap_node, |
4995 | subtable->mask.hash); | |
4996 | cmap_destroy(&subtable->rules); | |
4997 | ovsrcu_postpone(free, subtable); | |
4998 | } | |
4999 | ||
5000 | /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the | |
5001 | * caller's responsibility. | |
5002 | * May only be called after all the readers have been terminated. */ | |
5003 | static void | |
5004 | dpcls_destroy(struct dpcls *cls) | |
5005 | { | |
5006 | if (cls) { | |
5007 | struct dpcls_subtable *subtable; | |
5008 | ||
5009 | CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) { | |
361d808d | 5010 | ovs_assert(cmap_count(&subtable->rules) == 0); |
0de8783a JR |
5011 | dpcls_destroy_subtable(cls, subtable); |
5012 | } | |
5013 | cmap_destroy(&cls->subtables_map); | |
da9cfca6 | 5014 | pvector_destroy(&cls->subtables); |
0de8783a JR |
5015 | } |
5016 | } | |
5017 | ||
5018 | static struct dpcls_subtable * | |
5019 | dpcls_create_subtable(struct dpcls *cls, const struct netdev_flow_key *mask) | |
5020 | { | |
5021 | struct dpcls_subtable *subtable; | |
5022 | ||
5023 | /* Need to add one. */ | |
caeb4906 JR |
5024 | subtable = xmalloc(sizeof *subtable |
5025 | - sizeof subtable->mask.mf + mask->len); | |
0de8783a | 5026 | cmap_init(&subtable->rules); |
3453b4d6 | 5027 | subtable->hit_cnt = 0; |
0de8783a JR |
5028 | netdev_flow_key_clone(&subtable->mask, mask); |
5029 | cmap_insert(&cls->subtables_map, &subtable->cmap_node, mask->hash); | |
3453b4d6 | 5030 | /* Add the new subtable at the end of the pvector (with no hits yet) */ |
da9cfca6 | 5031 | pvector_insert(&cls->subtables, subtable, 0); |
84dbfb2b | 5032 | VLOG_DBG("Creating %"PRIuSIZE". subtable %p for in_port %d", |
3453b4d6 | 5033 | cmap_count(&cls->subtables_map), subtable, cls->in_port); |
da9cfca6 | 5034 | pvector_publish(&cls->subtables); |
0de8783a JR |
5035 | |
5036 | return subtable; | |
5037 | } | |
5038 | ||
5039 | static inline struct dpcls_subtable * | |
5040 | dpcls_find_subtable(struct dpcls *cls, const struct netdev_flow_key *mask) | |
5041 | { | |
5042 | struct dpcls_subtable *subtable; | |
5043 | ||
5044 | CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, mask->hash, | |
5045 | &cls->subtables_map) { | |
5046 | if (netdev_flow_key_equal(&subtable->mask, mask)) { | |
5047 | return subtable; | |
5048 | } | |
5049 | } | |
5050 | return dpcls_create_subtable(cls, mask); | |
5051 | } | |
5052 | ||
3453b4d6 JS |
5053 | |
5054 | /* Periodically sort the dpcls subtable vectors according to hit counts */ | |
5055 | static void | |
5056 | dpcls_sort_subtable_vector(struct dpcls *cls) | |
5057 | { | |
5058 | struct pvector *pvec = &cls->subtables; | |
5059 | struct dpcls_subtable *subtable; | |
5060 | ||
5061 | PVECTOR_FOR_EACH (subtable, pvec) { | |
5062 | pvector_change_priority(pvec, subtable, subtable->hit_cnt); | |
5063 | subtable->hit_cnt = 0; | |
5064 | } | |
5065 | pvector_publish(pvec); | |
5066 | } | |
5067 | ||
5068 | static inline void | |
5069 | dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd) | |
5070 | { | |
5071 | struct dpcls *cls; | |
5072 | long long int now = time_msec(); | |
5073 | ||
5074 | if (now > pmd->next_optimization) { | |
5075 | /* Try to obtain the flow lock to block out revalidator threads. | |
5076 | * If not possible, just try next time. */ | |
5077 | if (!ovs_mutex_trylock(&pmd->flow_mutex)) { | |
5078 | /* Optimize each classifier */ | |
5079 | CMAP_FOR_EACH (cls, node, &pmd->classifiers) { | |
5080 | dpcls_sort_subtable_vector(cls); | |
5081 | } | |
5082 | ovs_mutex_unlock(&pmd->flow_mutex); | |
5083 | /* Start new measuring interval */ | |
5084 | pmd->next_optimization = now + DPCLS_OPTIMIZATION_INTERVAL; | |
5085 | } | |
5086 | } | |
5087 | } | |
5088 | ||
0de8783a JR |
5089 | /* Insert 'rule' into 'cls'. */ |
5090 | static void | |
5091 | dpcls_insert(struct dpcls *cls, struct dpcls_rule *rule, | |
5092 | const struct netdev_flow_key *mask) | |
5093 | { | |
5094 | struct dpcls_subtable *subtable = dpcls_find_subtable(cls, mask); | |
5095 | ||
3453b4d6 | 5096 | /* Refer to subtable's mask, also for later removal. */ |
0de8783a JR |
5097 | rule->mask = &subtable->mask; |
5098 | cmap_insert(&subtable->rules, &rule->cmap_node, rule->flow.hash); | |
5099 | } | |
5100 | ||
5101 | /* Removes 'rule' from 'cls', also destructing the 'rule'. */ | |
5102 | static void | |
5103 | dpcls_remove(struct dpcls *cls, struct dpcls_rule *rule) | |
5104 | { | |
5105 | struct dpcls_subtable *subtable; | |
5106 | ||
5107 | ovs_assert(rule->mask); | |
5108 | ||
3453b4d6 | 5109 | /* Get subtable from reference in rule->mask. */ |
0de8783a | 5110 | INIT_CONTAINER(subtable, rule->mask, mask); |
0de8783a JR |
5111 | if (cmap_remove(&subtable->rules, &rule->cmap_node, rule->flow.hash) |
5112 | == 0) { | |
3453b4d6 | 5113 | /* Delete empty subtable. */ |
0de8783a | 5114 | dpcls_destroy_subtable(cls, subtable); |
da9cfca6 | 5115 | pvector_publish(&cls->subtables); |
0de8783a JR |
5116 | } |
5117 | } | |
5118 | ||
361d808d JR |
5119 | /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit |
5120 | * in 'mask' the values in 'key' and 'target' are the same. */ | |
0de8783a JR |
5121 | static inline bool |
5122 | dpcls_rule_matches_key(const struct dpcls_rule *rule, | |
5123 | const struct netdev_flow_key *target) | |
5124 | { | |
09b0fa9c JR |
5125 | const uint64_t *keyp = miniflow_get_values(&rule->flow.mf); |
5126 | const uint64_t *maskp = miniflow_get_values(&rule->mask->mf); | |
5fcff47b | 5127 | uint64_t value; |
0de8783a | 5128 | |
5fcff47b JR |
5129 | NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) { |
5130 | if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) { | |
0de8783a JR |
5131 | return false; |
5132 | } | |
5133 | } | |
5134 | return true; | |
5135 | } | |
5136 | ||
5b1c9c78 FA |
5137 | /* For each miniflow in 'keys' performs a classifier lookup writing the result |
5138 | * into the corresponding slot in 'rules'. If a particular entry in 'keys' is | |
0de8783a JR |
5139 | * NULL it is skipped. |
5140 | * | |
5141 | * This function is optimized for use in the userspace datapath and therefore | |
5142 | * does not implement a lot of features available in the standard | |
5143 | * classifier_lookup() function. Specifically, it does not implement | |
5144 | * priorities, instead returning any rule which matches the flow. | |
5145 | * | |
5b1c9c78 | 5146 | * Returns true if all miniflows found a corresponding rule. */ |
0de8783a | 5147 | static bool |
3453b4d6 JS |
5148 | dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[], |
5149 | struct dpcls_rule **rules, const size_t cnt, | |
5150 | int *num_lookups_p) | |
0de8783a | 5151 | { |
5b1c9c78 | 5152 | /* The received 'cnt' miniflows are the search-keys that will be processed |
63906f18 BB |
5153 | * to find a matching entry into the available subtables. |
5154 | * The number of bits in map_type is equal to NETDEV_MAX_BURST. */ | |
5155 | typedef uint32_t map_type; | |
0de8783a | 5156 | #define MAP_BITS (sizeof(map_type) * CHAR_BIT) |
63906f18 | 5157 | BUILD_ASSERT_DECL(MAP_BITS >= NETDEV_MAX_BURST); |
0de8783a | 5158 | |
0de8783a JR |
5159 | struct dpcls_subtable *subtable; |
5160 | ||
63906f18 BB |
5161 | map_type keys_map = TYPE_MAXIMUM(map_type); /* Set all bits. */ |
5162 | map_type found_map; | |
5163 | uint32_t hashes[MAP_BITS]; | |
5164 | const struct cmap_node *nodes[MAP_BITS]; | |
5165 | ||
5166 | if (cnt != MAP_BITS) { | |
5167 | keys_map >>= MAP_BITS - cnt; /* Clear extra bits. */ | |
0de8783a JR |
5168 | } |
5169 | memset(rules, 0, cnt * sizeof *rules); | |
5170 | ||
3453b4d6 JS |
5171 | int lookups_match = 0, subtable_pos = 1; |
5172 | ||
5b1c9c78 FA |
5173 | /* The Datapath classifier - aka dpcls - is composed of subtables. |
5174 | * Subtables are dynamically created as needed when new rules are inserted. | |
5175 | * Each subtable collects rules with matches on a specific subset of packet | |
5176 | * fields as defined by the subtable's mask. We proceed to process every | |
5177 | * search-key against each subtable, but when a match is found for a | |
5178 | * search-key, the search for that key can stop because the rules are | |
5179 | * non-overlapping. */ | |
da9cfca6 | 5180 | PVECTOR_FOR_EACH (subtable, &cls->subtables) { |
63906f18 BB |
5181 | int i; |
5182 | ||
5183 | /* Compute hashes for the remaining keys. Each search-key is | |
5184 | * masked with the subtable's mask to avoid hashing the wildcarded | |
5185 | * bits. */ | |
5186 | ULLONG_FOR_EACH_1(i, keys_map) { | |
5187 | hashes[i] = netdev_flow_key_hash_in_mask(&keys[i], | |
5188 | &subtable->mask); | |
5189 | } | |
5190 | /* Lookup. */ | |
5191 | found_map = cmap_find_batch(&subtable->rules, keys_map, hashes, nodes); | |
5192 | /* Check results. When the i-th bit of found_map is set, it means | |
5193 | * that a set of nodes with a matching hash value was found for the | |
5194 | * i-th search-key. Due to possible hash collisions we need to check | |
5195 | * which of the found rules, if any, really matches our masked | |
5196 | * search-key. */ | |
5197 | ULLONG_FOR_EACH_1(i, found_map) { | |
5198 | struct dpcls_rule *rule; | |
5199 | ||
5200 | CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) { | |
5201 | if (OVS_LIKELY(dpcls_rule_matches_key(rule, &keys[i]))) { | |
5202 | rules[i] = rule; | |
5203 | /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap | |
5204 | * within one second optimization interval. */ | |
5205 | subtable->hit_cnt++; | |
5206 | lookups_match += subtable_pos; | |
5207 | goto next; | |
0de8783a | 5208 | } |
0de8783a | 5209 | } |
63906f18 BB |
5210 | /* None of the found rules was a match. Reset the i-th bit to |
5211 | * keep searching this key in the next subtable. */ | |
5212 | ULLONG_SET0(found_map, i); /* Did not match. */ | |
5213 | next: | |
5214 | ; /* Keep Sparse happy. */ | |
0de8783a | 5215 | } |
63906f18 BB |
5216 | keys_map &= ~found_map; /* Clear the found rules. */ |
5217 | if (!keys_map) { | |
3453b4d6 JS |
5218 | if (num_lookups_p) { |
5219 | *num_lookups_p = lookups_match; | |
5220 | } | |
0de8783a JR |
5221 | return true; /* All found. */ |
5222 | } | |
3453b4d6 JS |
5223 | subtable_pos++; |
5224 | } | |
5225 | if (num_lookups_p) { | |
5226 | *num_lookups_p = lookups_match; | |
0de8783a JR |
5227 | } |
5228 | return false; /* Some misses. */ | |
5229 | } |