]>
Commit | Line | Data |
---|---|---|
72865317 | 1 | /* |
4ea96698 | 2 | * Copyright (c) 2009-2014, 2016-2018 Nicira, Inc. |
72865317 BP |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | * you may not use this file except in compliance with the License. | |
6 | * You may obtain a copy of the License at: | |
7 | * | |
8 | * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | * | |
10 | * Unless required by applicable law or agreed to in writing, software | |
11 | * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | * See the License for the specific language governing permissions and | |
14 | * limitations under the License. | |
15 | */ | |
16 | ||
17 | #include <config.h> | |
db73f716 | 18 | #include "dpif-netdev.h" |
72865317 | 19 | |
72865317 BP |
20 | #include <ctype.h> |
21 | #include <errno.h> | |
22 | #include <fcntl.h> | |
23 | #include <inttypes.h> | |
7f3adc00 | 24 | #include <net/if.h> |
b2befd5b | 25 | #include <sys/types.h> |
7daedce4 | 26 | #include <netinet/in.h> |
cdee00fd | 27 | #include <stdint.h> |
72865317 BP |
28 | #include <stdlib.h> |
29 | #include <string.h> | |
30 | #include <sys/ioctl.h> | |
7daedce4 | 31 | #include <sys/socket.h> |
72865317 | 32 | #include <sys/stat.h> |
72865317 BP |
33 | #include <unistd.h> |
34 | ||
9f861c91 | 35 | #include "bitmap.h" |
59e6d833 | 36 | #include "cmap.h" |
5cf3edb3 | 37 | #include "conntrack.h" |
7daedce4 | 38 | #include "coverage.h" |
4d4e68ed | 39 | #include "ct-dpif.h" |
72865317 | 40 | #include "csum.h" |
e14deea0 | 41 | #include "dp-packet.h" |
614c4892 | 42 | #include "dpif.h" |
82a48ead | 43 | #include "dpif-netdev-perf.h" |
72865317 | 44 | #include "dpif-provider.h" |
614c4892 | 45 | #include "dummy.h" |
afae68b1 | 46 | #include "fat-rwlock.h" |
72865317 | 47 | #include "flow.h" |
762d146a | 48 | #include "hmapx.h" |
140dd699 | 49 | #include "id-pool.h" |
4ea96698 | 50 | #include "ipf.h" |
6c3eee82 | 51 | #include "latch.h" |
72865317 | 52 | #include "netdev.h" |
79f36875 | 53 | #include "netdev-provider.h" |
de281153 | 54 | #include "netdev-vport.h" |
cdee00fd | 55 | #include "netlink.h" |
f094af7b | 56 | #include "odp-execute.h" |
72865317 | 57 | #include "odp-util.h" |
25d436fb BW |
58 | #include "openvswitch/dynamic-string.h" |
59 | #include "openvswitch/list.h" | |
60 | #include "openvswitch/match.h" | |
0d71302e | 61 | #include "openvswitch/ofp-parse.h" |
25d436fb | 62 | #include "openvswitch/ofp-print.h" |
64c96779 | 63 | #include "openvswitch/ofpbuf.h" |
3eb67853 | 64 | #include "openvswitch/shash.h" |
25d436fb | 65 | #include "openvswitch/vlog.h" |
5a034064 | 66 | #include "ovs-numa.h" |
61e7deb1 | 67 | #include "ovs-rcu.h" |
72865317 | 68 | #include "packets.h" |
fd016ae3 | 69 | #include "openvswitch/poll-loop.h" |
0de8783a | 70 | #include "pvector.h" |
26c6b6cd | 71 | #include "random.h" |
d33ed218 | 72 | #include "seq.h" |
3eb67853 | 73 | #include "smap.h" |
0cbfe35d | 74 | #include "sset.h" |
72865317 | 75 | #include "timeval.h" |
53902038 | 76 | #include "tnl-neigh-cache.h" |
7f9b8504 | 77 | #include "tnl-ports.h" |
74cc3969 | 78 | #include "unixctl.h" |
72865317 | 79 | #include "util.h" |
241bad15 | 80 | #include "uuid.h" |
7daedce4 | 81 | |
d98e6007 | 82 | VLOG_DEFINE_THIS_MODULE(dpif_netdev); |
72865317 | 83 | |
5bf84282 NK |
84 | /* Auto Load Balancing Defaults */ |
85 | #define ALB_ACCEPTABLE_IMPROVEMENT 25 | |
86 | #define ALB_PMD_LOAD_THRESHOLD 95 | |
87 | #define ALB_PMD_REBALANCE_POLL_INTERVAL 1 /* 1 Min */ | |
88 | #define MIN_TO_MSEC 60000 | |
89 | ||
8bb113da | 90 | #define FLOW_DUMP_MAX_BATCH 50 |
adcf00ba | 91 | /* Use per thread recirc_depth to prevent recirculation loop. */ |
3f9d3836 | 92 | #define MAX_RECIRC_DEPTH 6 |
adcf00ba | 93 | DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0) |
e4cfed38 | 94 | |
c71ea3c4 IM |
95 | /* Use instant packet send by default. */ |
96 | #define DEFAULT_TX_FLUSH_INTERVAL 0 | |
97 | ||
72865317 | 98 | /* Configuration parameters. */ |
72865317 | 99 | enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */ |
4b27db64 JR |
100 | enum { MAX_METERS = 65536 }; /* Maximum number of meters. */ |
101 | enum { MAX_BANDS = 8 }; /* Maximum number of bands / meter. */ | |
102 | enum { N_METER_LOCKS = 64 }; /* Maximum number of meters. */ | |
72865317 | 103 | |
8a4e3a85 BP |
104 | /* Protects against changes to 'dp_netdevs'. */ |
105 | static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER; | |
106 | ||
107 | /* Contains all 'struct dp_netdev's. */ | |
108 | static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex) | |
109 | = SHASH_INITIALIZER(&dp_netdevs); | |
110 | ||
623540e4 | 111 | static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600); |
6b31e073 | 112 | |
5cf3edb3 | 113 | #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \ |
4cddb1f0 DB |
114 | | CS_INVALID | CS_REPLY_DIR | CS_TRACKED \ |
115 | | CS_SRC_NAT | CS_DST_NAT) | |
5cf3edb3 DDP |
116 | #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK) |
117 | ||
2494ccd7 | 118 | static struct odp_support dp_netdev_support = { |
f0fb825a | 119 | .max_vlan_headers = SIZE_MAX, |
2494ccd7 JS |
120 | .max_mpls_depth = SIZE_MAX, |
121 | .recirc = true, | |
5cf3edb3 DDP |
122 | .ct_state = true, |
123 | .ct_zone = true, | |
124 | .ct_mark = true, | |
125 | .ct_label = true, | |
2575df07 JP |
126 | .ct_state_nat = true, |
127 | .ct_orig_tuple = true, | |
128 | .ct_orig_tuple6 = true, | |
2494ccd7 JS |
129 | }; |
130 | ||
79df317f | 131 | /* Stores a miniflow with inline values */ |
9bbf1c3d | 132 | |
9bbf1c3d | 133 | struct netdev_flow_key { |
caeb4906 JR |
134 | uint32_t hash; /* Hash function differs for different users. */ |
135 | uint32_t len; /* Length of the following miniflow (incl. map). */ | |
0de8783a | 136 | struct miniflow mf; |
8fd47924 | 137 | uint64_t buf[FLOW_MAX_PACKET_U64S]; |
9bbf1c3d DDP |
138 | }; |
139 | ||
60d8ccae YW |
140 | /* EMC cache and SMC cache compose the datapath flow cache (DFC) |
141 | * | |
142 | * Exact match cache for frequently used flows | |
9bbf1c3d DDP |
143 | * |
144 | * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to | |
145 | * search its entries for a miniflow that matches exactly the miniflow of the | |
0de8783a | 146 | * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow. |
9bbf1c3d DDP |
147 | * |
148 | * A cache entry holds a reference to its 'dp_netdev_flow'. | |
149 | * | |
150 | * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different | |
151 | * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of | |
152 | * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each | |
153 | * value is the index of a cache entry where the miniflow could be. | |
154 | * | |
155 | * | |
60d8ccae YW |
156 | * Signature match cache (SMC) |
157 | * | |
158 | * This cache stores a 16-bit signature for each flow without storing keys, and | |
159 | * stores the corresponding 16-bit flow_table index to the 'dp_netdev_flow'. | |
160 | * Each flow thus occupies 32bit which is much more memory efficient than EMC. | |
161 | * SMC uses a set-associative design that each bucket contains | |
162 | * SMC_ENTRY_PER_BUCKET number of entries. | |
163 | * Since 16-bit flow_table index is used, if there are more than 2^16 | |
164 | * dp_netdev_flow, SMC will miss them that cannot be indexed by a 16-bit value. | |
165 | * | |
166 | * | |
9bbf1c3d DDP |
167 | * Thread-safety |
168 | * ============= | |
169 | * | |
170 | * Each pmd_thread has its own private exact match cache. | |
171 | * If dp_netdev_input is not called from a pmd thread, a mutex is used. | |
172 | */ | |
173 | ||
fc82e877 | 174 | #define EM_FLOW_HASH_SHIFT 13 |
9bbf1c3d DDP |
175 | #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT) |
176 | #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1) | |
177 | #define EM_FLOW_HASH_SEGS 2 | |
178 | ||
60d8ccae YW |
179 | /* SMC uses a set-associative design. A bucket contains a set of entries that |
180 | * a flow item can occupy. For now, it uses one hash function rather than two | |
181 | * as for the EMC design. */ | |
182 | #define SMC_ENTRY_PER_BUCKET 4 | |
183 | #define SMC_ENTRIES (1u << 20) | |
184 | #define SMC_BUCKET_CNT (SMC_ENTRIES / SMC_ENTRY_PER_BUCKET) | |
185 | #define SMC_MASK (SMC_BUCKET_CNT - 1) | |
186 | ||
4c30b246 CL |
187 | /* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */ |
188 | #define DEFAULT_EM_FLOW_INSERT_INV_PROB 100 | |
189 | #define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \ | |
190 | DEFAULT_EM_FLOW_INSERT_INV_PROB) | |
191 | ||
9bbf1c3d | 192 | struct emc_entry { |
9bbf1c3d | 193 | struct dp_netdev_flow *flow; |
0de8783a | 194 | struct netdev_flow_key key; /* key.hash used for emc hash value. */ |
9bbf1c3d DDP |
195 | }; |
196 | ||
197 | struct emc_cache { | |
198 | struct emc_entry entries[EM_FLOW_HASH_ENTRIES]; | |
67ad54cb | 199 | int sweep_idx; /* For emc_cache_slow_sweep(). */ |
9bbf1c3d DDP |
200 | }; |
201 | ||
60d8ccae YW |
202 | struct smc_bucket { |
203 | uint16_t sig[SMC_ENTRY_PER_BUCKET]; | |
204 | uint16_t flow_idx[SMC_ENTRY_PER_BUCKET]; | |
205 | }; | |
206 | ||
207 | /* Signature match cache, differentiate from EMC cache */ | |
208 | struct smc_cache { | |
209 | struct smc_bucket buckets[SMC_BUCKET_CNT]; | |
210 | }; | |
211 | ||
212 | struct dfc_cache { | |
213 | struct emc_cache emc_cache; | |
214 | struct smc_cache smc_cache; | |
215 | }; | |
216 | ||
9bbf1c3d DDP |
217 | /* Iterate in the exact match cache through every entry that might contain a |
218 | * miniflow with hash 'HASH'. */ | |
219 | #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \ | |
220 | for (uint32_t i__ = 0, srch_hash__ = (HASH); \ | |
221 | (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \ | |
222 | i__ < EM_FLOW_HASH_SEGS; \ | |
223 | i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT) | |
0de8783a JR |
224 | \f |
225 | /* Simple non-wildcarding single-priority classifier. */ | |
226 | ||
05f9e707 IM |
227 | /* Time in microseconds between successive optimizations of the dpcls |
228 | * subtable vector */ | |
229 | #define DPCLS_OPTIMIZATION_INTERVAL 1000000LL | |
3453b4d6 | 230 | |
05f9e707 IM |
231 | /* Time in microseconds of the interval in which rxq processing cycles used |
232 | * in rxq to pmd assignments is measured and stored. */ | |
233 | #define PMD_RXQ_INTERVAL_LEN 10000000LL | |
4809891b | 234 | |
c59e759f KT |
235 | /* Number of intervals for which cycles are stored |
236 | * and used during rxq to pmd assignment. */ | |
237 | #define PMD_RXQ_INTERVAL_MAX 6 | |
238 | ||
0de8783a | 239 | struct dpcls { |
3453b4d6 JS |
240 | struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */ |
241 | odp_port_t in_port; | |
0de8783a | 242 | struct cmap subtables_map; |
da9cfca6 | 243 | struct pvector subtables; |
0de8783a | 244 | }; |
9bbf1c3d | 245 | |
0de8783a JR |
246 | /* A rule to be inserted to the classifier. */ |
247 | struct dpcls_rule { | |
248 | struct cmap_node cmap_node; /* Within struct dpcls_subtable 'rules'. */ | |
249 | struct netdev_flow_key *mask; /* Subtable's mask. */ | |
250 | struct netdev_flow_key flow; /* Matching key. */ | |
251 | /* 'flow' must be the last field, additional space is allocated here. */ | |
252 | }; | |
253 | ||
9b4f08cd VDA |
254 | /* Data structure to keep packet order till fastpath processing. */ |
255 | struct dp_packet_flow_map { | |
256 | struct dp_packet *packet; | |
257 | struct dp_netdev_flow *flow; | |
258 | uint16_t tcp_flags; | |
259 | }; | |
260 | ||
0de8783a JR |
261 | static void dpcls_init(struct dpcls *); |
262 | static void dpcls_destroy(struct dpcls *); | |
3453b4d6 | 263 | static void dpcls_sort_subtable_vector(struct dpcls *); |
0de8783a JR |
264 | static void dpcls_insert(struct dpcls *, struct dpcls_rule *, |
265 | const struct netdev_flow_key *mask); | |
266 | static void dpcls_remove(struct dpcls *, struct dpcls_rule *); | |
3453b4d6 | 267 | static bool dpcls_lookup(struct dpcls *cls, |
60d8ccae | 268 | const struct netdev_flow_key *keys[], |
3453b4d6 JS |
269 | struct dpcls_rule **rules, size_t cnt, |
270 | int *num_lookups_p); | |
60d8ccae YW |
271 | static bool dpcls_rule_matches_key(const struct dpcls_rule *rule, |
272 | const struct netdev_flow_key *target); | |
4b27db64 JR |
273 | /* Set of supported meter flags */ |
274 | #define DP_SUPPORTED_METER_FLAGS_MASK \ | |
275 | (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST) | |
276 | ||
277 | /* Set of supported meter band types */ | |
278 | #define DP_SUPPORTED_METER_BAND_TYPES \ | |
279 | ( 1 << OFPMBT13_DROP ) | |
280 | ||
281 | struct dp_meter_band { | |
282 | struct ofputil_meter_band up; /* type, prec_level, pad, rate, burst_size */ | |
283 | uint32_t bucket; /* In 1/1000 packets (for PKTPS), or in bits (for KBPS) */ | |
284 | uint64_t packet_count; | |
285 | uint64_t byte_count; | |
286 | }; | |
287 | ||
288 | struct dp_meter { | |
289 | uint16_t flags; | |
290 | uint16_t n_bands; | |
291 | uint32_t max_delta_t; | |
292 | uint64_t used; | |
293 | uint64_t packet_count; | |
294 | uint64_t byte_count; | |
295 | struct dp_meter_band bands[]; | |
296 | }; | |
297 | ||
5bf84282 NK |
298 | struct pmd_auto_lb { |
299 | bool auto_lb_requested; /* Auto load balancing requested by user. */ | |
300 | bool is_enabled; /* Current status of Auto load balancing. */ | |
301 | uint64_t rebalance_intvl; | |
302 | uint64_t rebalance_poll_timer; | |
303 | }; | |
304 | ||
8a4e3a85 BP |
305 | /* Datapath based on the network device interface from netdev.h. |
306 | * | |
307 | * | |
308 | * Thread-safety | |
309 | * ============= | |
310 | * | |
311 | * Some members, marked 'const', are immutable. Accessing other members | |
312 | * requires synchronization, as noted in more detail below. | |
313 | * | |
314 | * Acquisition order is, from outermost to innermost: | |
315 | * | |
316 | * dp_netdev_mutex (global) | |
59e6d833 | 317 | * port_mutex |
d0cca6c3 | 318 | * non_pmd_mutex |
8a4e3a85 | 319 | */ |
72865317 | 320 | struct dp_netdev { |
8a4e3a85 BP |
321 | const struct dpif_class *const class; |
322 | const char *const name; | |
6b31e073 | 323 | struct dpif *dpif; |
6a8267c5 BP |
324 | struct ovs_refcount ref_cnt; |
325 | atomic_flag destroyed; | |
72865317 | 326 | |
8a4e3a85 BP |
327 | /* Ports. |
328 | * | |
e9985d6a DDP |
329 | * Any lookup into 'ports' or any access to the dp_netdev_ports found |
330 | * through 'ports' requires taking 'port_mutex'. */ | |
59e6d833 | 331 | struct ovs_mutex port_mutex; |
e9985d6a | 332 | struct hmap ports; |
d33ed218 | 333 | struct seq *port_seq; /* Incremented whenever a port changes. */ |
6c3eee82 | 334 | |
c71ea3c4 IM |
335 | /* The time that a packet can wait in output batch for sending. */ |
336 | atomic_uint32_t tx_flush_interval; | |
337 | ||
4b27db64 JR |
338 | /* Meters. */ |
339 | struct ovs_mutex meter_locks[N_METER_LOCKS]; | |
340 | struct dp_meter *meters[MAX_METERS]; /* Meter bands. */ | |
4b27db64 | 341 | |
65dcf3da BB |
342 | /* Probability of EMC insertions is a factor of 'emc_insert_min'.*/ |
343 | OVS_ALIGNED_VAR(CACHE_LINE_SIZE) atomic_uint32_t emc_insert_min; | |
79f36875 JS |
344 | /* Enable collection of PMD performance metrics. */ |
345 | atomic_bool pmd_perf_metrics; | |
60d8ccae YW |
346 | /* Enable the SMC cache from ovsdb config */ |
347 | atomic_bool smc_enable_db; | |
65dcf3da | 348 | |
6b31e073 RW |
349 | /* Protects access to ofproto-dpif-upcall interface during revalidator |
350 | * thread synchronization. */ | |
351 | struct fat_rwlock upcall_rwlock; | |
623540e4 EJ |
352 | upcall_callback *upcall_cb; /* Callback function for executing upcalls. */ |
353 | void *upcall_aux; | |
6b31e073 | 354 | |
e4e74c3a AW |
355 | /* Callback function for notifying the purging of dp flows (during |
356 | * reseting pmd deletion). */ | |
357 | dp_purge_callback *dp_purge_cb; | |
358 | void *dp_purge_aux; | |
359 | ||
65f13b50 AW |
360 | /* Stores all 'struct dp_netdev_pmd_thread's. */ |
361 | struct cmap poll_threads; | |
140dd699 IM |
362 | /* id pool for per thread static_tx_qid. */ |
363 | struct id_pool *tx_qid_pool; | |
364 | struct ovs_mutex tx_qid_pool_mutex; | |
e77c97b9 KT |
365 | /* Use measured cycles for rxq to pmd assignment. */ |
366 | bool pmd_rxq_assign_cyc; | |
65f13b50 AW |
367 | |
368 | /* Protects the access of the 'struct dp_netdev_pmd_thread' | |
369 | * instance for non-pmd thread. */ | |
370 | struct ovs_mutex non_pmd_mutex; | |
371 | ||
372 | /* Each pmd thread will store its pointer to | |
373 | * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */ | |
374 | ovsthread_key_t per_pmd_key; | |
f2eee189 | 375 | |
a6a426d6 IM |
376 | struct seq *reconfigure_seq; |
377 | uint64_t last_reconfigure_seq; | |
378 | ||
a14b8947 | 379 | /* Cpu mask for pin of pmd threads. */ |
f2eee189 | 380 | char *pmd_cmask; |
6e3c6fa4 | 381 | |
a36de779 | 382 | uint64_t last_tnl_conf_seq; |
5cf3edb3 DDP |
383 | |
384 | struct conntrack conntrack; | |
5bf84282 | 385 | struct pmd_auto_lb pmd_alb; |
72865317 BP |
386 | }; |
387 | ||
4b27db64 JR |
388 | static void meter_lock(const struct dp_netdev *dp, uint32_t meter_id) |
389 | OVS_ACQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS]) | |
390 | { | |
391 | ovs_mutex_lock(&dp->meter_locks[meter_id % N_METER_LOCKS]); | |
392 | } | |
393 | ||
394 | static void meter_unlock(const struct dp_netdev *dp, uint32_t meter_id) | |
395 | OVS_RELEASES(dp->meter_locks[meter_id % N_METER_LOCKS]) | |
396 | { | |
397 | ovs_mutex_unlock(&dp->meter_locks[meter_id % N_METER_LOCKS]); | |
398 | } | |
399 | ||
400 | ||
8a4e3a85 | 401 | static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp, |
e9985d6a DDP |
402 | odp_port_t) |
403 | OVS_REQUIRES(dp->port_mutex); | |
ff073a71 | 404 | |
c59e759f KT |
405 | enum rxq_cycles_counter_type { |
406 | RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and | |
407 | processing packets during the current | |
408 | interval. */ | |
409 | RXQ_CYCLES_PROC_HIST, /* Total cycles of all intervals that are used | |
410 | during rxq to pmd assignment. */ | |
411 | RXQ_N_CYCLES | |
412 | }; | |
413 | ||
02bb2824 YL |
414 | enum { |
415 | DP_NETDEV_FLOW_OFFLOAD_OP_ADD, | |
416 | DP_NETDEV_FLOW_OFFLOAD_OP_MOD, | |
417 | DP_NETDEV_FLOW_OFFLOAD_OP_DEL, | |
418 | }; | |
419 | ||
420 | struct dp_flow_offload_item { | |
421 | struct dp_netdev_pmd_thread *pmd; | |
422 | struct dp_netdev_flow *flow; | |
423 | int op; | |
424 | struct match match; | |
425 | struct nlattr *actions; | |
426 | size_t actions_len; | |
427 | ||
428 | struct ovs_list node; | |
429 | }; | |
430 | ||
431 | struct dp_flow_offload { | |
432 | struct ovs_mutex mutex; | |
433 | struct ovs_list list; | |
434 | pthread_cond_t cond; | |
435 | }; | |
436 | ||
437 | static struct dp_flow_offload dp_flow_offload = { | |
438 | .mutex = OVS_MUTEX_INITIALIZER, | |
439 | .list = OVS_LIST_INITIALIZER(&dp_flow_offload.list), | |
440 | }; | |
441 | ||
442 | static struct ovsthread_once offload_thread_once | |
443 | = OVSTHREAD_ONCE_INITIALIZER; | |
444 | ||
05f9e707 | 445 | #define XPS_TIMEOUT 500000LL /* In microseconds. */ |
324c8374 | 446 | |
3eb67853 IM |
447 | /* Contained by struct dp_netdev_port's 'rxqs' member. */ |
448 | struct dp_netdev_rxq { | |
947dc567 DDP |
449 | struct dp_netdev_port *port; |
450 | struct netdev_rxq *rx; | |
451 | unsigned core_id; /* Core to which this queue should be | |
452 | pinned. OVS_CORE_UNSPEC if the | |
453 | queue doesn't need to be pinned to a | |
454 | particular core. */ | |
ee42dd70 | 455 | unsigned intrvl_idx; /* Write index for 'cycles_intrvl'. */ |
47a45d86 | 456 | struct dp_netdev_pmd_thread *pmd; /* pmd thread that polls this queue. */ |
79f36875 | 457 | bool is_vhost; /* Is rxq of a vhost port. */ |
c59e759f KT |
458 | |
459 | /* Counters of cycles spent successfully polling and processing pkts. */ | |
460 | atomic_ullong cycles[RXQ_N_CYCLES]; | |
461 | /* We store PMD_RXQ_INTERVAL_MAX intervals of data for an rxq and then | |
462 | sum them to yield the cycles used for an rxq. */ | |
463 | atomic_ullong cycles_intrvl[PMD_RXQ_INTERVAL_MAX]; | |
3eb67853 IM |
464 | }; |
465 | ||
72865317 BP |
466 | /* A port in a netdev-based datapath. */ |
467 | struct dp_netdev_port { | |
35303d71 | 468 | odp_port_t port_no; |
ca62bb16 BB |
469 | bool dynamic_txqs; /* If true XPS will be used. */ |
470 | bool need_reconfigure; /* True if we should reconfigure netdev. */ | |
72865317 | 471 | struct netdev *netdev; |
e9985d6a | 472 | struct hmap_node node; /* Node in dp_netdev's 'ports'. */ |
4b609110 | 473 | struct netdev_saved_flags *sf; |
3eb67853 | 474 | struct dp_netdev_rxq *rxqs; |
85a4f238 | 475 | unsigned n_rxq; /* Number of elements in 'rxqs' */ |
47a45d86 | 476 | unsigned *txq_used; /* Number of threads that use each tx queue. */ |
324c8374 | 477 | struct ovs_mutex txq_used_mutex; |
2fbadeb6 | 478 | bool emc_enabled; /* If true EMC will be used. */ |
0cbfe35d | 479 | char *type; /* Port type as requested by user. */ |
3eb67853 | 480 | char *rxq_affinity_list; /* Requested affinity of rx queues. */ |
72865317 BP |
481 | }; |
482 | ||
1c1e46ed AW |
483 | /* Contained by struct dp_netdev_flow's 'stats' member. */ |
484 | struct dp_netdev_flow_stats { | |
eb94da30 DDP |
485 | atomic_llong used; /* Last used time, in monotonic msecs. */ |
486 | atomic_ullong packet_count; /* Number of packets matched. */ | |
487 | atomic_ullong byte_count; /* Number of bytes matched. */ | |
488 | atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */ | |
1c1e46ed AW |
489 | }; |
490 | ||
491 | /* A flow in 'dp_netdev_pmd_thread's 'flow_table'. | |
8a4e3a85 BP |
492 | * |
493 | * | |
494 | * Thread-safety | |
495 | * ============= | |
496 | * | |
497 | * Except near the beginning or ending of its lifespan, rule 'rule' belongs to | |
1c1e46ed | 498 | * its pmd thread's classifier. The text below calls this classifier 'cls'. |
8a4e3a85 BP |
499 | * |
500 | * Motivation | |
501 | * ---------- | |
502 | * | |
503 | * The thread safety rules described here for "struct dp_netdev_flow" are | |
504 | * motivated by two goals: | |
505 | * | |
506 | * - Prevent threads that read members of "struct dp_netdev_flow" from | |
507 | * reading bad data due to changes by some thread concurrently modifying | |
508 | * those members. | |
509 | * | |
510 | * - Prevent two threads making changes to members of a given "struct | |
511 | * dp_netdev_flow" from interfering with each other. | |
512 | * | |
513 | * | |
514 | * Rules | |
515 | * ----- | |
516 | * | |
ed79f89a DDP |
517 | * A flow 'flow' may be accessed without a risk of being freed during an RCU |
518 | * grace period. Code that needs to hold onto a flow for a while | |
519 | * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref(). | |
8a4e3a85 BP |
520 | * |
521 | * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the | |
ed79f89a DDP |
522 | * flow from being deleted from 'cls' and it doesn't protect members of 'flow' |
523 | * from modification. | |
8a4e3a85 BP |
524 | * |
525 | * Some members, marked 'const', are immutable. Accessing other members | |
526 | * requires synchronization, as noted in more detail below. | |
527 | */ | |
72865317 | 528 | struct dp_netdev_flow { |
11e5cf1f | 529 | const struct flow flow; /* Unmasked flow that created this entry. */ |
8a4e3a85 | 530 | /* Hash table index by unmasked flow. */ |
1c1e46ed AW |
531 | const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */ |
532 | /* 'flow_table'. */ | |
241bad15 | 533 | const struct cmap_node mark_node; /* In owning flow_mark's mark_to_flow */ |
70e5ed6f | 534 | const ovs_u128 ufid; /* Unique flow identifier. */ |
241bad15 | 535 | const ovs_u128 mega_ufid; /* Unique mega flow identifier. */ |
bd5131ba | 536 | const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */ |
1c1e46ed | 537 | /* flow. */ |
72865317 | 538 | |
ed79f89a DDP |
539 | /* Number of references. |
540 | * The classifier owns one reference. | |
541 | * Any thread trying to keep a rule from being freed should hold its own | |
542 | * reference. */ | |
543 | struct ovs_refcount ref_cnt; | |
544 | ||
11e5cf1f | 545 | bool dead; |
241bad15 | 546 | uint32_t mark; /* Unique flow mark assigned to a flow */ |
11e5cf1f | 547 | |
1c1e46ed AW |
548 | /* Statistics. */ |
549 | struct dp_netdev_flow_stats stats; | |
8a4e3a85 | 550 | |
45c626a3 | 551 | /* Actions. */ |
61e7deb1 | 552 | OVSRCU_TYPE(struct dp_netdev_actions *) actions; |
0de8783a | 553 | |
11e5cf1f DDP |
554 | /* While processing a group of input packets, the datapath uses the next |
555 | * member to store a pointer to the output batch for the flow. It is | |
556 | * reset after the batch has been sent out (See dp_netdev_queue_batches(), | |
f7ce4811 PS |
557 | * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */ |
558 | struct packet_batch_per_flow *batch; | |
11e5cf1f | 559 | |
0de8783a JR |
560 | /* Packet classification. */ |
561 | struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */ | |
562 | /* 'cr' must be the last member. */ | |
72865317 BP |
563 | }; |
564 | ||
ed79f89a | 565 | static void dp_netdev_flow_unref(struct dp_netdev_flow *); |
9bbf1c3d | 566 | static bool dp_netdev_flow_ref(struct dp_netdev_flow *); |
70e5ed6f | 567 | static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t, |
f0fb825a | 568 | struct flow *, bool); |
8a4e3a85 | 569 | |
a84cb64a BP |
570 | /* A set of datapath actions within a "struct dp_netdev_flow". |
571 | * | |
572 | * | |
573 | * Thread-safety | |
574 | * ============= | |
575 | * | |
45c626a3 | 576 | * A struct dp_netdev_actions 'actions' is protected with RCU. */ |
a84cb64a | 577 | struct dp_netdev_actions { |
a84cb64a BP |
578 | /* These members are immutable: they do not change during the struct's |
579 | * lifetime. */ | |
a84cb64a | 580 | unsigned int size; /* Size of 'actions', in bytes. */ |
9ff55ae2 | 581 | struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */ |
a84cb64a BP |
582 | }; |
583 | ||
584 | struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *, | |
585 | size_t); | |
61e7deb1 BP |
586 | struct dp_netdev_actions *dp_netdev_flow_get_actions( |
587 | const struct dp_netdev_flow *); | |
588 | static void dp_netdev_actions_free(struct dp_netdev_actions *); | |
a84cb64a | 589 | |
947dc567 | 590 | struct polled_queue { |
922b28d4 | 591 | struct dp_netdev_rxq *rxq; |
947dc567 | 592 | odp_port_t port_no; |
2fbadeb6 | 593 | bool emc_enabled; |
947dc567 DDP |
594 | }; |
595 | ||
ae7ad0a1 IM |
596 | /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */ |
597 | struct rxq_poll { | |
947dc567 DDP |
598 | struct dp_netdev_rxq *rxq; |
599 | struct hmap_node node; | |
ae7ad0a1 IM |
600 | }; |
601 | ||
57eebbb4 DDP |
602 | /* Contained by struct dp_netdev_pmd_thread's 'send_port_cache', |
603 | * 'tnl_port_cache' or 'tx_ports'. */ | |
d0cca6c3 | 604 | struct tx_port { |
324c8374 IM |
605 | struct dp_netdev_port *port; |
606 | int qid; | |
607 | long long last_used; | |
d0cca6c3 | 608 | struct hmap_node node; |
c71ea3c4 | 609 | long long flush_time; |
009e0033 | 610 | struct dp_packet_batch output_pkts; |
58ed6df0 | 611 | struct dp_netdev_rxq *output_pkts_rxqs[NETDEV_MAX_BURST]; |
d0cca6c3 DDP |
612 | }; |
613 | ||
b010be17 IM |
614 | /* A set of properties for the current processing loop that is not directly |
615 | * associated with the pmd thread itself, but with the packets being | |
616 | * processed or the short-term system configuration (for example, time). | |
617 | * Contained by struct dp_netdev_pmd_thread's 'ctx' member. */ | |
618 | struct dp_netdev_pmd_thread_ctx { | |
619 | /* Latest measured time. See 'pmd_thread_ctx_time_update()'. */ | |
620 | long long now; | |
58ed6df0 IM |
621 | /* RX queue from which last packet was received. */ |
622 | struct dp_netdev_rxq *last_rxq; | |
2fbadeb6 IM |
623 | /* EMC insertion probability context for the current processing cycle. */ |
624 | uint32_t emc_insert_min; | |
d0cca6c3 DDP |
625 | }; |
626 | ||
e4cfed38 PS |
627 | /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate |
628 | * the performance overhead of interrupt processing. Therefore netdev can | |
629 | * not implement rx-wait for these devices. dpif-netdev needs to poll | |
630 | * these device to check for recv buffer. pmd-thread does polling for | |
1c1e46ed | 631 | * devices assigned to itself. |
e4cfed38 PS |
632 | * |
633 | * DPDK used PMD for accessing NIC. | |
634 | * | |
65f13b50 AW |
635 | * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for |
636 | * I/O of all non-pmd threads. There will be no actual thread created | |
637 | * for the instance. | |
1c1e46ed | 638 | * |
1859876c BB |
639 | * Each struct has its own flow cache and classifier per managed ingress port. |
640 | * For packets received on ingress port, a look up is done on corresponding PMD | |
641 | * thread's flow cache and in case of a miss, lookup is performed in the | |
642 | * corresponding classifier of port. Packets are executed with the found | |
643 | * actions in either case. | |
1c1e46ed | 644 | * */ |
65f13b50 | 645 | struct dp_netdev_pmd_thread { |
d9d73f84 IM |
646 | struct dp_netdev *dp; |
647 | struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */ | |
648 | struct cmap_node node; /* In 'dp->poll_threads'. */ | |
649 | ||
650 | pthread_cond_t cond; /* For synchronizing pmd thread reload. */ | |
651 | struct ovs_mutex cond_mutex; /* Mutex for condition variable. */ | |
accf8626 | 652 | |
65f13b50 AW |
653 | /* Per thread exact-match cache. Note, the instance for cpu core |
654 | * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly | |
d0cca6c3 DDP |
655 | * need to be protected by 'non_pmd_mutex'. Every other instance |
656 | * will only be accessed by its own pmd thread. */ | |
60d8ccae | 657 | OVS_ALIGNED_VAR(CACHE_LINE_SIZE) struct dfc_cache flow_cache; |
1c1e46ed | 658 | |
3453b4d6 | 659 | /* Flow-Table and classifiers |
1c1e46ed AW |
660 | * |
661 | * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding | |
3453b4d6 JS |
662 | * changes to 'classifiers' must be made while still holding the |
663 | * 'flow_mutex'. | |
1c1e46ed AW |
664 | */ |
665 | struct ovs_mutex flow_mutex; | |
d9d73f84 IM |
666 | struct cmap flow_table OVS_GUARDED; /* Flow table. */ |
667 | ||
668 | /* One classifier per in_port polled by the pmd */ | |
669 | struct cmap classifiers; | |
670 | /* Periodically sort subtable vectors according to hit frequencies */ | |
671 | long long int next_optimization; | |
672 | /* End of the next time interval for which processing cycles | |
673 | are stored for each polled rxq. */ | |
674 | long long int rxq_next_cycle_store; | |
675 | ||
2a2c67b4 KT |
676 | /* Last interval timestamp. */ |
677 | uint64_t intrvl_tsc_prev; | |
678 | /* Last interval cycles. */ | |
679 | atomic_ullong intrvl_cycles; | |
680 | ||
b010be17 IM |
681 | /* Current context of the PMD thread. */ |
682 | struct dp_netdev_pmd_thread_ctx ctx; | |
d9d73f84 IM |
683 | |
684 | struct latch exit_latch; /* For terminating the pmd thread. */ | |
685 | struct seq *reload_seq; | |
686 | uint64_t last_reload_seq; | |
687 | atomic_bool reload; /* Do we need to reload ports? */ | |
688 | pthread_t thread; | |
689 | unsigned core_id; /* CPU core id of this pmd thread. */ | |
690 | int numa_id; /* numa node id of this pmd thread. */ | |
691 | bool isolated; | |
692 | ||
693 | /* Queue id used by this pmd thread to send packets on all netdevs if | |
694 | * XPS disabled for this netdev. All static_tx_qid's are unique and less | |
695 | * than 'cmap_count(dp->poll_threads)'. */ | |
696 | uint32_t static_tx_qid; | |
697 | ||
c71ea3c4 IM |
698 | /* Number of filled output batches. */ |
699 | int n_output_batches; | |
700 | ||
d9d73f84 IM |
701 | struct ovs_mutex port_mutex; /* Mutex for 'poll_list' and 'tx_ports'. */ |
702 | /* List of rx queues to poll. */ | |
703 | struct hmap poll_list OVS_GUARDED; | |
704 | /* Map of 'tx_port's used for transmission. Written by the main thread, | |
705 | * read by the pmd thread. */ | |
706 | struct hmap tx_ports OVS_GUARDED; | |
707 | ||
708 | /* These are thread-local copies of 'tx_ports'. One contains only tunnel | |
709 | * ports (that support push_tunnel/pop_tunnel), the other contains ports | |
710 | * with at least one txq (that support send). A port can be in both. | |
711 | * | |
712 | * There are two separate maps to make sure that we don't try to execute | |
713 | * OUTPUT on a device which has 0 txqs or PUSH/POP on a non-tunnel device. | |
714 | * | |
715 | * The instances for cpu core NON_PMD_CORE_ID can be accessed by multiple | |
716 | * threads, and thusly need to be protected by 'non_pmd_mutex'. Every | |
717 | * other instance will only be accessed by its own pmd thread. */ | |
718 | struct hmap tnl_port_cache; | |
719 | struct hmap send_port_cache; | |
720 | ||
82a48ead JS |
721 | /* Keep track of detailed PMD performance statistics. */ |
722 | struct pmd_perf_stats perf_stats; | |
d9d73f84 | 723 | |
5bf84282 NK |
724 | /* Stats from previous iteration used by automatic pmd |
725 | * load balance logic. */ | |
726 | uint64_t prev_stats[PMD_N_STATS]; | |
727 | atomic_count pmd_overloaded; | |
728 | ||
d9d73f84 IM |
729 | /* Set to true if the pmd thread needs to be reloaded. */ |
730 | bool need_reload; | |
6c3eee82 BP |
731 | }; |
732 | ||
72865317 BP |
733 | /* Interface to netdev-based datapath. */ |
734 | struct dpif_netdev { | |
735 | struct dpif dpif; | |
736 | struct dp_netdev *dp; | |
d33ed218 | 737 | uint64_t last_port_seq; |
72865317 BP |
738 | }; |
739 | ||
8a4e3a85 | 740 | static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no, |
e9985d6a DDP |
741 | struct dp_netdev_port **portp) |
742 | OVS_REQUIRES(dp->port_mutex); | |
8a4e3a85 | 743 | static int get_port_by_name(struct dp_netdev *dp, const char *devname, |
e9985d6a DDP |
744 | struct dp_netdev_port **portp) |
745 | OVS_REQUIRES(dp->port_mutex); | |
8a4e3a85 BP |
746 | static void dp_netdev_free(struct dp_netdev *) |
747 | OVS_REQUIRES(dp_netdev_mutex); | |
8a4e3a85 BP |
748 | static int do_add_port(struct dp_netdev *dp, const char *devname, |
749 | const char *type, odp_port_t port_no) | |
59e6d833 | 750 | OVS_REQUIRES(dp->port_mutex); |
c40b890f | 751 | static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *) |
59e6d833 | 752 | OVS_REQUIRES(dp->port_mutex); |
614c4892 BP |
753 | static int dpif_netdev_open(const struct dpif_class *, const char *name, |
754 | bool create, struct dpif **); | |
65f13b50 | 755 | static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, |
1895cc8d | 756 | struct dp_packet_batch *, |
7d7ded7a DB |
757 | bool should_steal, |
758 | const struct flow *flow, | |
4edb9ae9 | 759 | const struct nlattr *actions, |
b010be17 | 760 | size_t actions_len); |
65f13b50 | 761 | static void dp_netdev_input(struct dp_netdev_pmd_thread *, |
1895cc8d | 762 | struct dp_packet_batch *, odp_port_t port_no); |
a90ed026 | 763 | static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *, |
1895cc8d | 764 | struct dp_packet_batch *); |
41ccaa24 | 765 | |
6b31e073 | 766 | static void dp_netdev_disable_upcall(struct dp_netdev *); |
ae7ad0a1 | 767 | static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd); |
65f13b50 | 768 | static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, |
00873463 DDP |
769 | struct dp_netdev *dp, unsigned core_id, |
770 | int numa_id); | |
1c1e46ed | 771 | static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd); |
e9985d6a DDP |
772 | static void dp_netdev_set_nonpmd(struct dp_netdev *dp) |
773 | OVS_REQUIRES(dp->port_mutex); | |
774 | ||
e32971b8 | 775 | static void *pmd_thread_main(void *); |
b19befae | 776 | static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp, |
bd5131ba | 777 | unsigned core_id); |
1c1e46ed AW |
778 | static struct dp_netdev_pmd_thread * |
779 | dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos); | |
140dd699 IM |
780 | static void dp_netdev_del_pmd(struct dp_netdev *dp, |
781 | struct dp_netdev_pmd_thread *pmd); | |
e32971b8 | 782 | static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd); |
d0cca6c3 | 783 | static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd); |
d0cca6c3 | 784 | static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd, |
e32971b8 DDP |
785 | struct dp_netdev_port *port) |
786 | OVS_REQUIRES(pmd->port_mutex); | |
787 | static void dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd, | |
788 | struct tx_port *tx) | |
789 | OVS_REQUIRES(pmd->port_mutex); | |
d0cca6c3 | 790 | static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd, |
947dc567 DDP |
791 | struct dp_netdev_rxq *rxq) |
792 | OVS_REQUIRES(pmd->port_mutex); | |
e32971b8 DDP |
793 | static void dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd, |
794 | struct rxq_poll *poll) | |
795 | OVS_REQUIRES(pmd->port_mutex); | |
c71ea3c4 IM |
796 | static int |
797 | dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread *pmd, | |
798 | bool force); | |
009e0033 | 799 | |
e32971b8 | 800 | static void reconfigure_datapath(struct dp_netdev *dp) |
3eb67853 | 801 | OVS_REQUIRES(dp->port_mutex); |
1c1e46ed AW |
802 | static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd); |
803 | static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd); | |
804 | static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd); | |
d0cca6c3 DDP |
805 | static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) |
806 | OVS_REQUIRES(pmd->port_mutex); | |
3453b4d6 | 807 | static inline void |
4809891b KT |
808 | dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, |
809 | struct polled_queue *poll_list, int poll_cnt); | |
810 | static void | |
811 | dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx, | |
812 | enum rxq_cycles_counter_type type, | |
813 | unsigned long long cycles); | |
814 | static uint64_t | |
815 | dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx, | |
816 | enum rxq_cycles_counter_type type); | |
817 | static void | |
818 | dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx, | |
819 | unsigned long long cycles); | |
655856ef KT |
820 | static uint64_t |
821 | dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx); | |
324c8374 IM |
822 | static void |
823 | dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd, | |
b010be17 | 824 | bool purge); |
324c8374 | 825 | static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd, |
b010be17 | 826 | struct tx_port *tx); |
324c8374 | 827 | |
67ad54cb | 828 | static inline bool emc_entry_alive(struct emc_entry *ce); |
9bbf1c3d | 829 | static void emc_clear_entry(struct emc_entry *ce); |
60d8ccae | 830 | static void smc_clear_entry(struct smc_bucket *b, int idx); |
9bbf1c3d | 831 | |
cd995c73 | 832 | static void dp_netdev_request_reconfigure(struct dp_netdev *dp); |
79f36875 JS |
833 | static inline bool |
834 | pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread *pmd); | |
02bb2824 YL |
835 | static void queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd, |
836 | struct dp_netdev_flow *flow); | |
cd995c73 | 837 | |
9bbf1c3d DDP |
838 | static void |
839 | emc_cache_init(struct emc_cache *flow_cache) | |
840 | { | |
841 | int i; | |
842 | ||
67ad54cb | 843 | flow_cache->sweep_idx = 0; |
9bbf1c3d DDP |
844 | for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { |
845 | flow_cache->entries[i].flow = NULL; | |
0de8783a | 846 | flow_cache->entries[i].key.hash = 0; |
09b0fa9c | 847 | flow_cache->entries[i].key.len = sizeof(struct miniflow); |
5fcff47b | 848 | flowmap_init(&flow_cache->entries[i].key.mf.map); |
9bbf1c3d DDP |
849 | } |
850 | } | |
851 | ||
60d8ccae YW |
852 | static void |
853 | smc_cache_init(struct smc_cache *smc_cache) | |
854 | { | |
855 | int i, j; | |
856 | for (i = 0; i < SMC_BUCKET_CNT; i++) { | |
857 | for (j = 0; j < SMC_ENTRY_PER_BUCKET; j++) { | |
858 | smc_cache->buckets[i].flow_idx[j] = UINT16_MAX; | |
859 | } | |
860 | } | |
861 | } | |
862 | ||
863 | static void | |
864 | dfc_cache_init(struct dfc_cache *flow_cache) | |
865 | { | |
866 | emc_cache_init(&flow_cache->emc_cache); | |
867 | smc_cache_init(&flow_cache->smc_cache); | |
868 | } | |
869 | ||
9bbf1c3d DDP |
870 | static void |
871 | emc_cache_uninit(struct emc_cache *flow_cache) | |
872 | { | |
873 | int i; | |
874 | ||
875 | for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { | |
876 | emc_clear_entry(&flow_cache->entries[i]); | |
877 | } | |
878 | } | |
879 | ||
60d8ccae YW |
880 | static void |
881 | smc_cache_uninit(struct smc_cache *smc) | |
882 | { | |
883 | int i, j; | |
884 | ||
885 | for (i = 0; i < SMC_BUCKET_CNT; i++) { | |
886 | for (j = 0; j < SMC_ENTRY_PER_BUCKET; j++) { | |
887 | smc_clear_entry(&(smc->buckets[i]), j); | |
888 | } | |
889 | } | |
890 | } | |
891 | ||
892 | static void | |
893 | dfc_cache_uninit(struct dfc_cache *flow_cache) | |
894 | { | |
895 | smc_cache_uninit(&flow_cache->smc_cache); | |
896 | emc_cache_uninit(&flow_cache->emc_cache); | |
897 | } | |
898 | ||
67ad54cb AW |
899 | /* Check and clear dead flow references slowly (one entry at each |
900 | * invocation). */ | |
901 | static void | |
902 | emc_cache_slow_sweep(struct emc_cache *flow_cache) | |
903 | { | |
904 | struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx]; | |
905 | ||
906 | if (!emc_entry_alive(entry)) { | |
907 | emc_clear_entry(entry); | |
908 | } | |
909 | flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK; | |
910 | } | |
911 | ||
b010be17 IM |
912 | /* Updates the time in PMD threads context and should be called in three cases: |
913 | * | |
914 | * 1. PMD structure initialization: | |
915 | * - dp_netdev_configure_pmd() | |
916 | * | |
917 | * 2. Before processing of the new packet batch: | |
918 | * - dpif_netdev_execute() | |
009e0033 | 919 | * - dp_netdev_process_rxq_port() |
b010be17 IM |
920 | * |
921 | * 3. At least once per polling iteration in main polling threads if no | |
922 | * packets received on current iteration: | |
923 | * - dpif_netdev_run() | |
924 | * - pmd_thread_main() | |
925 | * | |
926 | * 'pmd->ctx.now' should be used without update in all other cases if possible. | |
927 | */ | |
928 | static inline void | |
929 | pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread *pmd) | |
930 | { | |
05f9e707 | 931 | pmd->ctx.now = time_usec(); |
b010be17 IM |
932 | } |
933 | ||
c4ea7529 BP |
934 | /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */ |
935 | bool | |
936 | dpif_is_netdev(const struct dpif *dpif) | |
937 | { | |
938 | return dpif->dpif_class->open == dpif_netdev_open; | |
939 | } | |
940 | ||
72865317 BP |
941 | static struct dpif_netdev * |
942 | dpif_netdev_cast(const struct dpif *dpif) | |
943 | { | |
c4ea7529 | 944 | ovs_assert(dpif_is_netdev(dpif)); |
72865317 BP |
945 | return CONTAINER_OF(dpif, struct dpif_netdev, dpif); |
946 | } | |
947 | ||
948 | static struct dp_netdev * | |
949 | get_dp_netdev(const struct dpif *dpif) | |
950 | { | |
951 | return dpif_netdev_cast(dpif)->dp; | |
952 | } | |
6553d06b DDP |
953 | \f |
954 | enum pmd_info_type { | |
ce179f11 IM |
955 | PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */ |
956 | PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */ | |
79f36875 JS |
957 | PMD_INFO_SHOW_RXQ, /* Show poll lists of pmd threads. */ |
958 | PMD_INFO_PERF_SHOW, /* Show pmd performance details. */ | |
6553d06b DDP |
959 | }; |
960 | ||
961 | static void | |
82a48ead | 962 | format_pmd_thread(struct ds *reply, struct dp_netdev_pmd_thread *pmd) |
6553d06b | 963 | { |
6553d06b DDP |
964 | ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID) |
965 | ? "main thread" : "pmd thread"); | |
6553d06b DDP |
966 | if (pmd->numa_id != OVS_NUMA_UNSPEC) { |
967 | ds_put_format(reply, " numa_id %d", pmd->numa_id); | |
968 | } | |
d5c199ea | 969 | if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) { |
bd5131ba | 970 | ds_put_format(reply, " core_id %u", pmd->core_id); |
6553d06b DDP |
971 | } |
972 | ds_put_cstr(reply, ":\n"); | |
82a48ead JS |
973 | } |
974 | ||
975 | static void | |
976 | pmd_info_show_stats(struct ds *reply, | |
977 | struct dp_netdev_pmd_thread *pmd) | |
978 | { | |
979 | uint64_t stats[PMD_N_STATS]; | |
980 | uint64_t total_cycles, total_packets; | |
981 | double passes_per_pkt = 0; | |
982 | double lookups_per_hit = 0; | |
983 | double packets_per_batch = 0; | |
984 | ||
985 | pmd_perf_read_counters(&pmd->perf_stats, stats); | |
986 | total_cycles = stats[PMD_CYCLES_ITER_IDLE] | |
987 | + stats[PMD_CYCLES_ITER_BUSY]; | |
988 | total_packets = stats[PMD_STAT_RECV]; | |
989 | ||
990 | format_pmd_thread(reply, pmd); | |
6553d06b | 991 | |
82a48ead JS |
992 | if (total_packets > 0) { |
993 | passes_per_pkt = (total_packets + stats[PMD_STAT_RECIRC]) | |
994 | / (double) total_packets; | |
cc4891f3 | 995 | } |
82a48ead JS |
996 | if (stats[PMD_STAT_MASKED_HIT] > 0) { |
997 | lookups_per_hit = stats[PMD_STAT_MASKED_LOOKUP] | |
998 | / (double) stats[PMD_STAT_MASKED_HIT]; | |
999 | } | |
1000 | if (stats[PMD_STAT_SENT_BATCHES] > 0) { | |
1001 | packets_per_batch = stats[PMD_STAT_SENT_PKTS] | |
1002 | / (double) stats[PMD_STAT_SENT_BATCHES]; | |
cc4891f3 IM |
1003 | } |
1004 | ||
6553d06b | 1005 | ds_put_format(reply, |
5a0e4aec BP |
1006 | " packets received: %"PRIu64"\n" |
1007 | " packet recirculations: %"PRIu64"\n" | |
1008 | " avg. datapath passes per packet: %.02f\n" | |
1009 | " emc hits: %"PRIu64"\n" | |
60d8ccae | 1010 | " smc hits: %"PRIu64"\n" |
5a0e4aec BP |
1011 | " megaflow hits: %"PRIu64"\n" |
1012 | " avg. subtable lookups per megaflow hit: %.02f\n" | |
1013 | " miss with success upcall: %"PRIu64"\n" | |
1014 | " miss with failed upcall: %"PRIu64"\n" | |
1015 | " avg. packets per output batch: %.02f\n", | |
82a48ead JS |
1016 | total_packets, stats[PMD_STAT_RECIRC], |
1017 | passes_per_pkt, stats[PMD_STAT_EXACT_HIT], | |
60d8ccae | 1018 | stats[PMD_STAT_SMC_HIT], |
82a48ead JS |
1019 | stats[PMD_STAT_MASKED_HIT], lookups_per_hit, |
1020 | stats[PMD_STAT_MISS], stats[PMD_STAT_LOST], | |
cc4891f3 | 1021 | packets_per_batch); |
6553d06b DDP |
1022 | |
1023 | if (total_cycles == 0) { | |
1024 | return; | |
1025 | } | |
1026 | ||
1027 | ds_put_format(reply, | |
5a0e4aec BP |
1028 | " idle cycles: %"PRIu64" (%.02f%%)\n" |
1029 | " processing cycles: %"PRIu64" (%.02f%%)\n", | |
82a48ead JS |
1030 | stats[PMD_CYCLES_ITER_IDLE], |
1031 | stats[PMD_CYCLES_ITER_IDLE] / (double) total_cycles * 100, | |
1032 | stats[PMD_CYCLES_ITER_BUSY], | |
1033 | stats[PMD_CYCLES_ITER_BUSY] / (double) total_cycles * 100); | |
6553d06b DDP |
1034 | |
1035 | if (total_packets == 0) { | |
1036 | return; | |
1037 | } | |
1038 | ||
1039 | ds_put_format(reply, | |
5a0e4aec | 1040 | " avg cycles per packet: %.02f (%"PRIu64"/%"PRIu64")\n", |
82a48ead | 1041 | total_cycles / (double) total_packets, |
6553d06b DDP |
1042 | total_cycles, total_packets); |
1043 | ||
1044 | ds_put_format(reply, | |
5a0e4aec | 1045 | " avg processing cycles per packet: " |
82a48ead JS |
1046 | "%.02f (%"PRIu64"/%"PRIu64")\n", |
1047 | stats[PMD_CYCLES_ITER_BUSY] / (double) total_packets, | |
1048 | stats[PMD_CYCLES_ITER_BUSY], total_packets); | |
6553d06b DDP |
1049 | } |
1050 | ||
79f36875 JS |
1051 | static void |
1052 | pmd_info_show_perf(struct ds *reply, | |
1053 | struct dp_netdev_pmd_thread *pmd, | |
1054 | struct pmd_perf_params *par) | |
1055 | { | |
1056 | if (pmd->core_id != NON_PMD_CORE_ID) { | |
1057 | char *time_str = | |
1058 | xastrftime_msec("%H:%M:%S.###", time_wall_msec(), true); | |
1059 | long long now = time_msec(); | |
1060 | double duration = (now - pmd->perf_stats.start_ms) / 1000.0; | |
1061 | ||
1062 | ds_put_cstr(reply, "\n"); | |
1063 | ds_put_format(reply, "Time: %s\n", time_str); | |
1064 | ds_put_format(reply, "Measurement duration: %.3f s\n", duration); | |
1065 | ds_put_cstr(reply, "\n"); | |
1066 | format_pmd_thread(reply, pmd); | |
1067 | ds_put_cstr(reply, "\n"); | |
1068 | pmd_perf_format_overall_stats(reply, &pmd->perf_stats, duration); | |
1069 | if (pmd_perf_metrics_enabled(pmd)) { | |
1070 | /* Prevent parallel clearing of perf metrics. */ | |
1071 | ovs_mutex_lock(&pmd->perf_stats.clear_mutex); | |
1072 | if (par->histograms) { | |
1073 | ds_put_cstr(reply, "\n"); | |
1074 | pmd_perf_format_histograms(reply, &pmd->perf_stats); | |
1075 | } | |
1076 | if (par->iter_hist_len > 0) { | |
1077 | ds_put_cstr(reply, "\n"); | |
1078 | pmd_perf_format_iteration_history(reply, &pmd->perf_stats, | |
1079 | par->iter_hist_len); | |
1080 | } | |
1081 | if (par->ms_hist_len > 0) { | |
1082 | ds_put_cstr(reply, "\n"); | |
1083 | pmd_perf_format_ms_history(reply, &pmd->perf_stats, | |
1084 | par->ms_hist_len); | |
1085 | } | |
1086 | ovs_mutex_unlock(&pmd->perf_stats.clear_mutex); | |
1087 | } | |
1088 | free(time_str); | |
1089 | } | |
1090 | } | |
1091 | ||
947dc567 DDP |
1092 | static int |
1093 | compare_poll_list(const void *a_, const void *b_) | |
1094 | { | |
1095 | const struct rxq_poll *a = a_; | |
1096 | const struct rxq_poll *b = b_; | |
1097 | ||
1098 | const char *namea = netdev_rxq_get_name(a->rxq->rx); | |
1099 | const char *nameb = netdev_rxq_get_name(b->rxq->rx); | |
1100 | ||
1101 | int cmp = strcmp(namea, nameb); | |
1102 | if (!cmp) { | |
1103 | return netdev_rxq_get_queue_id(a->rxq->rx) | |
1104 | - netdev_rxq_get_queue_id(b->rxq->rx); | |
1105 | } else { | |
1106 | return cmp; | |
1107 | } | |
1108 | } | |
1109 | ||
1110 | static void | |
1111 | sorted_poll_list(struct dp_netdev_pmd_thread *pmd, struct rxq_poll **list, | |
1112 | size_t *n) | |
216abd28 | 1113 | OVS_REQUIRES(pmd->port_mutex) |
947dc567 DDP |
1114 | { |
1115 | struct rxq_poll *ret, *poll; | |
1116 | size_t i; | |
1117 | ||
1118 | *n = hmap_count(&pmd->poll_list); | |
1119 | if (!*n) { | |
1120 | ret = NULL; | |
1121 | } else { | |
1122 | ret = xcalloc(*n, sizeof *ret); | |
1123 | i = 0; | |
1124 | HMAP_FOR_EACH (poll, node, &pmd->poll_list) { | |
1125 | ret[i] = *poll; | |
1126 | i++; | |
1127 | } | |
1128 | ovs_assert(i == *n); | |
1cc1b5f6 | 1129 | qsort(ret, *n, sizeof *ret, compare_poll_list); |
947dc567 DDP |
1130 | } |
1131 | ||
947dc567 DDP |
1132 | *list = ret; |
1133 | } | |
1134 | ||
ce179f11 IM |
1135 | static void |
1136 | pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd) | |
1137 | { | |
1138 | if (pmd->core_id != NON_PMD_CORE_ID) { | |
947dc567 | 1139 | struct rxq_poll *list; |
2a2c67b4 KT |
1140 | size_t n_rxq; |
1141 | uint64_t total_cycles = 0; | |
ce179f11 | 1142 | |
3eb67853 | 1143 | ds_put_format(reply, |
5a0e4aec | 1144 | "pmd thread numa_id %d core_id %u:\n isolated : %s\n", |
3eb67853 IM |
1145 | pmd->numa_id, pmd->core_id, (pmd->isolated) |
1146 | ? "true" : "false"); | |
ce179f11 | 1147 | |
d0cca6c3 | 1148 | ovs_mutex_lock(&pmd->port_mutex); |
2a2c67b4 | 1149 | sorted_poll_list(pmd, &list, &n_rxq); |
ce179f11 | 1150 | |
2a2c67b4 KT |
1151 | /* Get the total pmd cycles for an interval. */ |
1152 | atomic_read_relaxed(&pmd->intrvl_cycles, &total_cycles); | |
1153 | /* Estimate the cycles to cover all intervals. */ | |
1154 | total_cycles *= PMD_RXQ_INTERVAL_MAX; | |
1155 | ||
1156 | for (int i = 0; i < n_rxq; i++) { | |
1157 | struct dp_netdev_rxq *rxq = list[i].rxq; | |
1158 | const char *name = netdev_rxq_get_name(rxq->rx); | |
1159 | uint64_t proc_cycles = 0; | |
1160 | ||
1161 | for (int j = 0; j < PMD_RXQ_INTERVAL_MAX; j++) { | |
1162 | proc_cycles += dp_netdev_rxq_get_intrvl_cycles(rxq, j); | |
ce179f11 | 1163 | } |
5a0e4aec | 1164 | ds_put_format(reply, " port: %-16s queue-id: %2d", name, |
947dc567 | 1165 | netdev_rxq_get_queue_id(list[i].rxq->rx)); |
5a0e4aec | 1166 | ds_put_format(reply, " pmd usage: "); |
2a2c67b4 KT |
1167 | if (total_cycles) { |
1168 | ds_put_format(reply, "%2"PRIu64"", | |
1169 | proc_cycles * 100 / total_cycles); | |
1170 | ds_put_cstr(reply, " %"); | |
1171 | } else { | |
1172 | ds_put_format(reply, "%s", "NOT AVAIL"); | |
1173 | } | |
1174 | ds_put_cstr(reply, "\n"); | |
ce179f11 | 1175 | } |
d0cca6c3 | 1176 | ovs_mutex_unlock(&pmd->port_mutex); |
947dc567 | 1177 | free(list); |
ce179f11 IM |
1178 | } |
1179 | } | |
1180 | ||
34d8e04b EC |
1181 | static int |
1182 | compare_poll_thread_list(const void *a_, const void *b_) | |
1183 | { | |
1184 | const struct dp_netdev_pmd_thread *a, *b; | |
1185 | ||
1186 | a = *(struct dp_netdev_pmd_thread **)a_; | |
1187 | b = *(struct dp_netdev_pmd_thread **)b_; | |
1188 | ||
1189 | if (a->core_id < b->core_id) { | |
1190 | return -1; | |
1191 | } | |
1192 | if (a->core_id > b->core_id) { | |
1193 | return 1; | |
1194 | } | |
1195 | return 0; | |
1196 | } | |
1197 | ||
1198 | /* Create a sorted list of pmd's from the dp->poll_threads cmap. We can use | |
1199 | * this list, as long as we do not go to quiescent state. */ | |
1200 | static void | |
1201 | sorted_poll_thread_list(struct dp_netdev *dp, | |
1202 | struct dp_netdev_pmd_thread ***list, | |
1203 | size_t *n) | |
1204 | { | |
1205 | struct dp_netdev_pmd_thread *pmd; | |
1206 | struct dp_netdev_pmd_thread **pmd_list; | |
1207 | size_t k = 0, n_pmds; | |
1208 | ||
1209 | n_pmds = cmap_count(&dp->poll_threads); | |
1210 | pmd_list = xcalloc(n_pmds, sizeof *pmd_list); | |
1211 | ||
1212 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
1213 | if (k >= n_pmds) { | |
1214 | break; | |
1215 | } | |
1216 | pmd_list[k++] = pmd; | |
1217 | } | |
1218 | ||
1219 | qsort(pmd_list, k, sizeof *pmd_list, compare_poll_thread_list); | |
1220 | ||
1221 | *list = pmd_list; | |
1222 | *n = k; | |
1223 | } | |
1224 | ||
cd995c73 KT |
1225 | static void |
1226 | dpif_netdev_pmd_rebalance(struct unixctl_conn *conn, int argc, | |
1227 | const char *argv[], void *aux OVS_UNUSED) | |
1228 | { | |
1229 | struct ds reply = DS_EMPTY_INITIALIZER; | |
1230 | struct dp_netdev *dp = NULL; | |
1231 | ||
1232 | ovs_mutex_lock(&dp_netdev_mutex); | |
1233 | ||
1234 | if (argc == 2) { | |
1235 | dp = shash_find_data(&dp_netdevs, argv[1]); | |
1236 | } else if (shash_count(&dp_netdevs) == 1) { | |
1237 | /* There's only one datapath */ | |
1238 | dp = shash_first(&dp_netdevs)->data; | |
1239 | } | |
1240 | ||
1241 | if (!dp) { | |
1242 | ovs_mutex_unlock(&dp_netdev_mutex); | |
1243 | unixctl_command_reply_error(conn, | |
1244 | "please specify an existing datapath"); | |
1245 | return; | |
1246 | } | |
1247 | ||
1248 | dp_netdev_request_reconfigure(dp); | |
1249 | ovs_mutex_unlock(&dp_netdev_mutex); | |
1250 | ds_put_cstr(&reply, "pmd rxq rebalance requested.\n"); | |
1251 | unixctl_command_reply(conn, ds_cstr(&reply)); | |
1252 | ds_destroy(&reply); | |
1253 | } | |
1254 | ||
6553d06b DDP |
1255 | static void |
1256 | dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[], | |
1257 | void *aux) | |
1258 | { | |
1259 | struct ds reply = DS_EMPTY_INITIALIZER; | |
34d8e04b | 1260 | struct dp_netdev_pmd_thread **pmd_list; |
6553d06b DDP |
1261 | struct dp_netdev *dp = NULL; |
1262 | enum pmd_info_type type = *(enum pmd_info_type *) aux; | |
82a48ead JS |
1263 | unsigned int core_id; |
1264 | bool filter_on_pmd = false; | |
1265 | size_t n; | |
6553d06b DDP |
1266 | |
1267 | ovs_mutex_lock(&dp_netdev_mutex); | |
1268 | ||
82a48ead | 1269 | while (argc > 1) { |
79f36875 | 1270 | if (!strcmp(argv[1], "-pmd") && argc > 2) { |
82a48ead JS |
1271 | if (str_to_uint(argv[2], 10, &core_id)) { |
1272 | filter_on_pmd = true; | |
1273 | } | |
1274 | argc -= 2; | |
1275 | argv += 2; | |
1276 | } else { | |
1277 | dp = shash_find_data(&dp_netdevs, argv[1]); | |
1278 | argc -= 1; | |
1279 | argv += 1; | |
1280 | } | |
6553d06b DDP |
1281 | } |
1282 | ||
1283 | if (!dp) { | |
82a48ead JS |
1284 | if (shash_count(&dp_netdevs) == 1) { |
1285 | /* There's only one datapath */ | |
1286 | dp = shash_first(&dp_netdevs)->data; | |
1287 | } else { | |
1288 | ovs_mutex_unlock(&dp_netdev_mutex); | |
1289 | unixctl_command_reply_error(conn, | |
1290 | "please specify an existing datapath"); | |
1291 | return; | |
1292 | } | |
6553d06b DDP |
1293 | } |
1294 | ||
34d8e04b EC |
1295 | sorted_poll_thread_list(dp, &pmd_list, &n); |
1296 | for (size_t i = 0; i < n; i++) { | |
1297 | struct dp_netdev_pmd_thread *pmd = pmd_list[i]; | |
1298 | if (!pmd) { | |
1299 | break; | |
1300 | } | |
82a48ead JS |
1301 | if (filter_on_pmd && pmd->core_id != core_id) { |
1302 | continue; | |
1303 | } | |
ce179f11 IM |
1304 | if (type == PMD_INFO_SHOW_RXQ) { |
1305 | pmd_info_show_rxq(&reply, pmd); | |
82a48ead JS |
1306 | } else if (type == PMD_INFO_CLEAR_STATS) { |
1307 | pmd_perf_stats_clear(&pmd->perf_stats); | |
1308 | } else if (type == PMD_INFO_SHOW_STATS) { | |
1309 | pmd_info_show_stats(&reply, pmd); | |
79f36875 JS |
1310 | } else if (type == PMD_INFO_PERF_SHOW) { |
1311 | pmd_info_show_perf(&reply, pmd, (struct pmd_perf_params *)aux); | |
6553d06b DDP |
1312 | } |
1313 | } | |
34d8e04b | 1314 | free(pmd_list); |
6553d06b DDP |
1315 | |
1316 | ovs_mutex_unlock(&dp_netdev_mutex); | |
1317 | ||
1318 | unixctl_command_reply(conn, ds_cstr(&reply)); | |
1319 | ds_destroy(&reply); | |
1320 | } | |
79f36875 JS |
1321 | |
1322 | static void | |
1323 | pmd_perf_show_cmd(struct unixctl_conn *conn, int argc, | |
1324 | const char *argv[], | |
1325 | void *aux OVS_UNUSED) | |
1326 | { | |
1327 | struct pmd_perf_params par; | |
1328 | long int it_hist = 0, ms_hist = 0; | |
1329 | par.histograms = true; | |
1330 | ||
1331 | while (argc > 1) { | |
1332 | if (!strcmp(argv[1], "-nh")) { | |
1333 | par.histograms = false; | |
1334 | argc -= 1; | |
1335 | argv += 1; | |
1336 | } else if (!strcmp(argv[1], "-it") && argc > 2) { | |
1337 | it_hist = strtol(argv[2], NULL, 10); | |
1338 | if (it_hist < 0) { | |
1339 | it_hist = 0; | |
1340 | } else if (it_hist > HISTORY_LEN) { | |
1341 | it_hist = HISTORY_LEN; | |
1342 | } | |
1343 | argc -= 2; | |
1344 | argv += 2; | |
1345 | } else if (!strcmp(argv[1], "-ms") && argc > 2) { | |
1346 | ms_hist = strtol(argv[2], NULL, 10); | |
1347 | if (ms_hist < 0) { | |
1348 | ms_hist = 0; | |
1349 | } else if (ms_hist > HISTORY_LEN) { | |
1350 | ms_hist = HISTORY_LEN; | |
1351 | } | |
1352 | argc -= 2; | |
1353 | argv += 2; | |
1354 | } else { | |
1355 | break; | |
1356 | } | |
1357 | } | |
1358 | par.iter_hist_len = it_hist; | |
1359 | par.ms_hist_len = ms_hist; | |
1360 | par.command_type = PMD_INFO_PERF_SHOW; | |
1361 | dpif_netdev_pmd_info(conn, argc, argv, &par); | |
1362 | } | |
6553d06b DDP |
1363 | \f |
1364 | static int | |
1365 | dpif_netdev_init(void) | |
1366 | { | |
1367 | static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS, | |
ce179f11 IM |
1368 | clear_aux = PMD_INFO_CLEAR_STATS, |
1369 | poll_aux = PMD_INFO_SHOW_RXQ; | |
6553d06b | 1370 | |
82a48ead JS |
1371 | unixctl_command_register("dpif-netdev/pmd-stats-show", "[-pmd core] [dp]", |
1372 | 0, 3, dpif_netdev_pmd_info, | |
6553d06b | 1373 | (void *)&show_aux); |
82a48ead JS |
1374 | unixctl_command_register("dpif-netdev/pmd-stats-clear", "[-pmd core] [dp]", |
1375 | 0, 3, dpif_netdev_pmd_info, | |
6553d06b | 1376 | (void *)&clear_aux); |
82a48ead JS |
1377 | unixctl_command_register("dpif-netdev/pmd-rxq-show", "[-pmd core] [dp]", |
1378 | 0, 3, dpif_netdev_pmd_info, | |
ce179f11 | 1379 | (void *)&poll_aux); |
79f36875 JS |
1380 | unixctl_command_register("dpif-netdev/pmd-perf-show", |
1381 | "[-nh] [-it iter-history-len]" | |
1382 | " [-ms ms-history-len]" | |
1383 | " [-pmd core] [dp]", | |
1384 | 0, 8, pmd_perf_show_cmd, | |
1385 | NULL); | |
cd995c73 KT |
1386 | unixctl_command_register("dpif-netdev/pmd-rxq-rebalance", "[dp]", |
1387 | 0, 1, dpif_netdev_pmd_rebalance, | |
1388 | NULL); | |
7178fefb JS |
1389 | unixctl_command_register("dpif-netdev/pmd-perf-log-set", |
1390 | "on|off [-b before] [-a after] [-e|-ne] " | |
1391 | "[-us usec] [-q qlen]", | |
1392 | 0, 10, pmd_perf_log_set_cmd, | |
1393 | NULL); | |
6553d06b DDP |
1394 | return 0; |
1395 | } | |
72865317 | 1396 | |
2197d7ab | 1397 | static int |
2240af25 DDP |
1398 | dpif_netdev_enumerate(struct sset *all_dps, |
1399 | const struct dpif_class *dpif_class) | |
2197d7ab GL |
1400 | { |
1401 | struct shash_node *node; | |
1402 | ||
97be1538 | 1403 | ovs_mutex_lock(&dp_netdev_mutex); |
2197d7ab | 1404 | SHASH_FOR_EACH(node, &dp_netdevs) { |
2240af25 DDP |
1405 | struct dp_netdev *dp = node->data; |
1406 | if (dpif_class != dp->class) { | |
1407 | /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs. | |
1408 | * If the class doesn't match, skip this dpif. */ | |
1409 | continue; | |
1410 | } | |
2197d7ab GL |
1411 | sset_add(all_dps, node->name); |
1412 | } | |
97be1538 | 1413 | ovs_mutex_unlock(&dp_netdev_mutex); |
5279f8fd | 1414 | |
2197d7ab GL |
1415 | return 0; |
1416 | } | |
1417 | ||
add90f6f EJ |
1418 | static bool |
1419 | dpif_netdev_class_is_dummy(const struct dpif_class *class) | |
1420 | { | |
1421 | return class != &dpif_netdev_class; | |
1422 | } | |
1423 | ||
0aeaabc8 JP |
1424 | static const char * |
1425 | dpif_netdev_port_open_type(const struct dpif_class *class, const char *type) | |
1426 | { | |
1427 | return strcmp(type, "internal") ? type | |
e98d0cb3 | 1428 | : dpif_netdev_class_is_dummy(class) ? "dummy-internal" |
0aeaabc8 JP |
1429 | : "tap"; |
1430 | } | |
1431 | ||
72865317 BP |
1432 | static struct dpif * |
1433 | create_dpif_netdev(struct dp_netdev *dp) | |
1434 | { | |
462278db | 1435 | uint16_t netflow_id = hash_string(dp->name, 0); |
72865317 | 1436 | struct dpif_netdev *dpif; |
72865317 | 1437 | |
6a8267c5 | 1438 | ovs_refcount_ref(&dp->ref_cnt); |
72865317 | 1439 | |
72865317 | 1440 | dpif = xmalloc(sizeof *dpif); |
614c4892 | 1441 | dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id); |
72865317 | 1442 | dpif->dp = dp; |
d33ed218 | 1443 | dpif->last_port_seq = seq_read(dp->port_seq); |
72865317 BP |
1444 | |
1445 | return &dpif->dpif; | |
1446 | } | |
1447 | ||
4e022ec0 AW |
1448 | /* Choose an unused, non-zero port number and return it on success. |
1449 | * Return ODPP_NONE on failure. */ | |
1450 | static odp_port_t | |
e44768b7 | 1451 | choose_port(struct dp_netdev *dp, const char *name) |
59e6d833 | 1452 | OVS_REQUIRES(dp->port_mutex) |
e44768b7 | 1453 | { |
4e022ec0 | 1454 | uint32_t port_no; |
e44768b7 JP |
1455 | |
1456 | if (dp->class != &dpif_netdev_class) { | |
1457 | const char *p; | |
1458 | int start_no = 0; | |
1459 | ||
1460 | /* If the port name begins with "br", start the number search at | |
1461 | * 100 to make writing tests easier. */ | |
1462 | if (!strncmp(name, "br", 2)) { | |
1463 | start_no = 100; | |
1464 | } | |
1465 | ||
1466 | /* If the port name contains a number, try to assign that port number. | |
1467 | * This can make writing unit tests easier because port numbers are | |
1468 | * predictable. */ | |
1469 | for (p = name; *p != '\0'; p++) { | |
1470 | if (isdigit((unsigned char) *p)) { | |
1471 | port_no = start_no + strtol(p, NULL, 10); | |
ff073a71 BP |
1472 | if (port_no > 0 && port_no != odp_to_u32(ODPP_NONE) |
1473 | && !dp_netdev_lookup_port(dp, u32_to_odp(port_no))) { | |
4e022ec0 | 1474 | return u32_to_odp(port_no); |
e44768b7 JP |
1475 | } |
1476 | break; | |
1477 | } | |
1478 | } | |
1479 | } | |
1480 | ||
ff073a71 BP |
1481 | for (port_no = 1; port_no <= UINT16_MAX; port_no++) { |
1482 | if (!dp_netdev_lookup_port(dp, u32_to_odp(port_no))) { | |
4e022ec0 | 1483 | return u32_to_odp(port_no); |
e44768b7 JP |
1484 | } |
1485 | } | |
1486 | ||
4e022ec0 | 1487 | return ODPP_NONE; |
e44768b7 JP |
1488 | } |
1489 | ||
72865317 | 1490 | static int |
614c4892 BP |
1491 | create_dp_netdev(const char *name, const struct dpif_class *class, |
1492 | struct dp_netdev **dpp) | |
8a4e3a85 | 1493 | OVS_REQUIRES(dp_netdev_mutex) |
72865317 BP |
1494 | { |
1495 | struct dp_netdev *dp; | |
1496 | int error; | |
72865317 | 1497 | |
462278db | 1498 | dp = xzalloc(sizeof *dp); |
8a4e3a85 BP |
1499 | shash_add(&dp_netdevs, name, dp); |
1500 | ||
1501 | *CONST_CAST(const struct dpif_class **, &dp->class) = class; | |
1502 | *CONST_CAST(const char **, &dp->name) = xstrdup(name); | |
6a8267c5 | 1503 | ovs_refcount_init(&dp->ref_cnt); |
1a65ba85 | 1504 | atomic_flag_clear(&dp->destroyed); |
8a4e3a85 | 1505 | |
59e6d833 | 1506 | ovs_mutex_init(&dp->port_mutex); |
e9985d6a | 1507 | hmap_init(&dp->ports); |
d33ed218 | 1508 | dp->port_seq = seq_create(); |
6b31e073 RW |
1509 | fat_rwlock_init(&dp->upcall_rwlock); |
1510 | ||
a6a426d6 IM |
1511 | dp->reconfigure_seq = seq_create(); |
1512 | dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq); | |
1513 | ||
4b27db64 JR |
1514 | for (int i = 0; i < N_METER_LOCKS; ++i) { |
1515 | ovs_mutex_init_adaptive(&dp->meter_locks[i]); | |
1516 | } | |
1517 | ||
6b31e073 RW |
1518 | /* Disable upcalls by default. */ |
1519 | dp_netdev_disable_upcall(dp); | |
623540e4 | 1520 | dp->upcall_aux = NULL; |
6b31e073 | 1521 | dp->upcall_cb = NULL; |
e44768b7 | 1522 | |
5cf3edb3 DDP |
1523 | conntrack_init(&dp->conntrack); |
1524 | ||
4c30b246 | 1525 | atomic_init(&dp->emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN); |
c71ea3c4 | 1526 | atomic_init(&dp->tx_flush_interval, DEFAULT_TX_FLUSH_INTERVAL); |
4c30b246 | 1527 | |
65f13b50 | 1528 | cmap_init(&dp->poll_threads); |
e77c97b9 | 1529 | dp->pmd_rxq_assign_cyc = true; |
140dd699 IM |
1530 | |
1531 | ovs_mutex_init(&dp->tx_qid_pool_mutex); | |
1532 | /* We need 1 Tx queue for each possible core + 1 for non-PMD threads. */ | |
1533 | dp->tx_qid_pool = id_pool_create(0, ovs_numa_get_n_cores() + 1); | |
1534 | ||
65f13b50 AW |
1535 | ovs_mutex_init_recursive(&dp->non_pmd_mutex); |
1536 | ovsthread_key_create(&dp->per_pmd_key, NULL); | |
1537 | ||
e9985d6a | 1538 | ovs_mutex_lock(&dp->port_mutex); |
140dd699 IM |
1539 | /* non-PMD will be created before all other threads and will |
1540 | * allocate static_tx_qid = 0. */ | |
f2eee189 | 1541 | dp_netdev_set_nonpmd(dp); |
65f13b50 | 1542 | |
a3e8437a TLSC |
1543 | error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class, |
1544 | "internal"), | |
1545 | ODPP_LOCAL); | |
59e6d833 | 1546 | ovs_mutex_unlock(&dp->port_mutex); |
72865317 BP |
1547 | if (error) { |
1548 | dp_netdev_free(dp); | |
462278db | 1549 | return error; |
72865317 BP |
1550 | } |
1551 | ||
a36de779 | 1552 | dp->last_tnl_conf_seq = seq_read(tnl_conf_seq); |
462278db | 1553 | *dpp = dp; |
72865317 BP |
1554 | return 0; |
1555 | } | |
1556 | ||
a6a426d6 IM |
1557 | static void |
1558 | dp_netdev_request_reconfigure(struct dp_netdev *dp) | |
1559 | { | |
1560 | seq_change(dp->reconfigure_seq); | |
1561 | } | |
1562 | ||
1563 | static bool | |
1564 | dp_netdev_is_reconf_required(struct dp_netdev *dp) | |
1565 | { | |
1566 | return seq_read(dp->reconfigure_seq) != dp->last_reconfigure_seq; | |
1567 | } | |
1568 | ||
72865317 | 1569 | static int |
614c4892 | 1570 | dpif_netdev_open(const struct dpif_class *class, const char *name, |
4a387741 | 1571 | bool create, struct dpif **dpifp) |
72865317 | 1572 | { |
462278db | 1573 | struct dp_netdev *dp; |
5279f8fd | 1574 | int error; |
462278db | 1575 | |
97be1538 | 1576 | ovs_mutex_lock(&dp_netdev_mutex); |
462278db BP |
1577 | dp = shash_find_data(&dp_netdevs, name); |
1578 | if (!dp) { | |
5279f8fd | 1579 | error = create ? create_dp_netdev(name, class, &dp) : ENODEV; |
72865317 | 1580 | } else { |
5279f8fd BP |
1581 | error = (dp->class != class ? EINVAL |
1582 | : create ? EEXIST | |
1583 | : 0); | |
1584 | } | |
1585 | if (!error) { | |
1586 | *dpifp = create_dpif_netdev(dp); | |
6b31e073 | 1587 | dp->dpif = *dpifp; |
72865317 | 1588 | } |
97be1538 | 1589 | ovs_mutex_unlock(&dp_netdev_mutex); |
462278db | 1590 | |
5279f8fd | 1591 | return error; |
72865317 BP |
1592 | } |
1593 | ||
88ace79b DDP |
1594 | static void |
1595 | dp_netdev_destroy_upcall_lock(struct dp_netdev *dp) | |
1596 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
1597 | { | |
1598 | /* Check that upcalls are disabled, i.e. that the rwlock is taken */ | |
1599 | ovs_assert(fat_rwlock_tryrdlock(&dp->upcall_rwlock)); | |
1600 | ||
1601 | /* Before freeing a lock we should release it */ | |
1602 | fat_rwlock_unlock(&dp->upcall_rwlock); | |
1603 | fat_rwlock_destroy(&dp->upcall_rwlock); | |
1604 | } | |
1605 | ||
4b27db64 JR |
1606 | static void |
1607 | dp_delete_meter(struct dp_netdev *dp, uint32_t meter_id) | |
1608 | OVS_REQUIRES(dp->meter_locks[meter_id % N_METER_LOCKS]) | |
1609 | { | |
1610 | if (dp->meters[meter_id]) { | |
1611 | free(dp->meters[meter_id]); | |
1612 | dp->meters[meter_id] = NULL; | |
1613 | } | |
1614 | } | |
1615 | ||
8a4e3a85 BP |
1616 | /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp' |
1617 | * through the 'dp_netdevs' shash while freeing 'dp'. */ | |
1ba530f4 BP |
1618 | static void |
1619 | dp_netdev_free(struct dp_netdev *dp) | |
8a4e3a85 | 1620 | OVS_REQUIRES(dp_netdev_mutex) |
1ba530f4 | 1621 | { |
e9985d6a | 1622 | struct dp_netdev_port *port, *next; |
4ad28026 | 1623 | |
8a4e3a85 BP |
1624 | shash_find_and_delete(&dp_netdevs, dp->name); |
1625 | ||
59e6d833 | 1626 | ovs_mutex_lock(&dp->port_mutex); |
e9985d6a | 1627 | HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) { |
c40b890f | 1628 | do_del_port(dp, port); |
1ba530f4 | 1629 | } |
59e6d833 | 1630 | ovs_mutex_unlock(&dp->port_mutex); |
4b27db64 | 1631 | |
e32971b8 | 1632 | dp_netdev_destroy_all_pmds(dp, true); |
d916785c | 1633 | cmap_destroy(&dp->poll_threads); |
51852a57 | 1634 | |
140dd699 IM |
1635 | ovs_mutex_destroy(&dp->tx_qid_pool_mutex); |
1636 | id_pool_destroy(dp->tx_qid_pool); | |
1637 | ||
b9584f21 DDP |
1638 | ovs_mutex_destroy(&dp->non_pmd_mutex); |
1639 | ovsthread_key_delete(dp->per_pmd_key); | |
1640 | ||
1641 | conntrack_destroy(&dp->conntrack); | |
1642 | ||
1643 | ||
a6a426d6 IM |
1644 | seq_destroy(dp->reconfigure_seq); |
1645 | ||
d33ed218 | 1646 | seq_destroy(dp->port_seq); |
e9985d6a | 1647 | hmap_destroy(&dp->ports); |
3186ea46 | 1648 | ovs_mutex_destroy(&dp->port_mutex); |
88ace79b DDP |
1649 | |
1650 | /* Upcalls must be disabled at this point */ | |
1651 | dp_netdev_destroy_upcall_lock(dp); | |
9bbf1c3d | 1652 | |
4b27db64 JR |
1653 | int i; |
1654 | ||
1655 | for (i = 0; i < MAX_METERS; ++i) { | |
1656 | meter_lock(dp, i); | |
1657 | dp_delete_meter(dp, i); | |
1658 | meter_unlock(dp, i); | |
1659 | } | |
1660 | for (i = 0; i < N_METER_LOCKS; ++i) { | |
1661 | ovs_mutex_destroy(&dp->meter_locks[i]); | |
1662 | } | |
1663 | ||
f2eee189 | 1664 | free(dp->pmd_cmask); |
8a4e3a85 | 1665 | free(CONST_CAST(char *, dp->name)); |
72865317 BP |
1666 | free(dp); |
1667 | } | |
1668 | ||
8a4e3a85 BP |
1669 | static void |
1670 | dp_netdev_unref(struct dp_netdev *dp) | |
1671 | { | |
1672 | if (dp) { | |
1673 | /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't | |
1674 | * get a new reference to 'dp' through the 'dp_netdevs' shash. */ | |
1675 | ovs_mutex_lock(&dp_netdev_mutex); | |
24f83812 | 1676 | if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) { |
8a4e3a85 BP |
1677 | dp_netdev_free(dp); |
1678 | } | |
1679 | ovs_mutex_unlock(&dp_netdev_mutex); | |
1680 | } | |
1681 | } | |
1682 | ||
72865317 BP |
1683 | static void |
1684 | dpif_netdev_close(struct dpif *dpif) | |
1685 | { | |
1686 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5279f8fd | 1687 | |
8a4e3a85 | 1688 | dp_netdev_unref(dp); |
72865317 BP |
1689 | free(dpif); |
1690 | } | |
1691 | ||
1692 | static int | |
7dab847a | 1693 | dpif_netdev_destroy(struct dpif *dpif) |
72865317 BP |
1694 | { |
1695 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5279f8fd | 1696 | |
6a8267c5 | 1697 | if (!atomic_flag_test_and_set(&dp->destroyed)) { |
24f83812 | 1698 | if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) { |
6a8267c5 BP |
1699 | /* Can't happen: 'dpif' still owns a reference to 'dp'. */ |
1700 | OVS_NOT_REACHED(); | |
1701 | } | |
1702 | } | |
5279f8fd | 1703 | |
72865317 BP |
1704 | return 0; |
1705 | } | |
1706 | ||
eb94da30 DDP |
1707 | /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed |
1708 | * load/store semantics. While the increment is not atomic, the load and | |
1709 | * store operations are, making it impossible to read inconsistent values. | |
1710 | * | |
1711 | * This is used to update thread local stats counters. */ | |
1712 | static void | |
1713 | non_atomic_ullong_add(atomic_ullong *var, unsigned long long n) | |
1714 | { | |
1715 | unsigned long long tmp; | |
1716 | ||
1717 | atomic_read_relaxed(var, &tmp); | |
1718 | tmp += n; | |
1719 | atomic_store_relaxed(var, tmp); | |
1720 | } | |
1721 | ||
72865317 | 1722 | static int |
a8d9304d | 1723 | dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) |
72865317 BP |
1724 | { |
1725 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
1c1e46ed | 1726 | struct dp_netdev_pmd_thread *pmd; |
82a48ead | 1727 | uint64_t pmd_stats[PMD_N_STATS]; |
8a4e3a85 | 1728 | |
1c1e46ed AW |
1729 | stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0; |
1730 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
1731 | stats->n_flows += cmap_count(&pmd->flow_table); | |
82a48ead JS |
1732 | pmd_perf_read_counters(&pmd->perf_stats, pmd_stats); |
1733 | stats->n_hit += pmd_stats[PMD_STAT_EXACT_HIT]; | |
60d8ccae | 1734 | stats->n_hit += pmd_stats[PMD_STAT_SMC_HIT]; |
82a48ead JS |
1735 | stats->n_hit += pmd_stats[PMD_STAT_MASKED_HIT]; |
1736 | stats->n_missed += pmd_stats[PMD_STAT_MISS]; | |
1737 | stats->n_lost += pmd_stats[PMD_STAT_LOST]; | |
51852a57 | 1738 | } |
1ce3fa06 | 1739 | stats->n_masks = UINT32_MAX; |
847108dc | 1740 | stats->n_mask_hit = UINT64_MAX; |
5279f8fd | 1741 | |
72865317 BP |
1742 | return 0; |
1743 | } | |
1744 | ||
e4cfed38 | 1745 | static void |
65f13b50 | 1746 | dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd) |
e4cfed38 | 1747 | { |
accf8626 | 1748 | if (pmd->core_id == NON_PMD_CORE_ID) { |
d0cca6c3 DDP |
1749 | ovs_mutex_lock(&pmd->dp->non_pmd_mutex); |
1750 | ovs_mutex_lock(&pmd->port_mutex); | |
1751 | pmd_load_cached_ports(pmd); | |
1752 | ovs_mutex_unlock(&pmd->port_mutex); | |
1753 | ovs_mutex_unlock(&pmd->dp->non_pmd_mutex); | |
accf8626 AW |
1754 | return; |
1755 | } | |
1756 | ||
1757 | ovs_mutex_lock(&pmd->cond_mutex); | |
2788a1b1 | 1758 | seq_change(pmd->reload_seq); |
14e3e12a | 1759 | atomic_store_relaxed(&pmd->reload, true); |
accf8626 AW |
1760 | ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex); |
1761 | ovs_mutex_unlock(&pmd->cond_mutex); | |
65f13b50 | 1762 | } |
e4cfed38 | 1763 | |
59e6d833 BP |
1764 | static uint32_t |
1765 | hash_port_no(odp_port_t port_no) | |
1766 | { | |
1767 | return hash_int(odp_to_u32(port_no), 0); | |
1768 | } | |
1769 | ||
72865317 | 1770 | static int |
a3e8437a | 1771 | port_create(const char *devname, const char *type, |
b8d29252 | 1772 | odp_port_t port_no, struct dp_netdev_port **portp) |
72865317 | 1773 | { |
4b609110 | 1774 | struct netdev_saved_flags *sf; |
72865317 | 1775 | struct dp_netdev_port *port; |
2499a8ce | 1776 | enum netdev_flags flags; |
b8d29252 | 1777 | struct netdev *netdev; |
e32971b8 | 1778 | int error; |
72865317 | 1779 | |
b8d29252 | 1780 | *portp = NULL; |
72865317 BP |
1781 | |
1782 | /* Open and validate network device. */ | |
a3e8437a | 1783 | error = netdev_open(devname, type, &netdev); |
72865317 | 1784 | if (error) { |
b8d29252 | 1785 | return error; |
72865317 | 1786 | } |
72865317 BP |
1787 | /* XXX reject non-Ethernet devices */ |
1788 | ||
2499a8ce AC |
1789 | netdev_get_flags(netdev, &flags); |
1790 | if (flags & NETDEV_LOOPBACK) { | |
1791 | VLOG_ERR("%s: cannot add a loopback device", devname); | |
d17f4f08 | 1792 | error = EINVAL; |
b8d29252 | 1793 | goto out; |
2499a8ce AC |
1794 | } |
1795 | ||
e32971b8 DDP |
1796 | error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf); |
1797 | if (error) { | |
1798 | VLOG_ERR("%s: cannot set promisc flag", devname); | |
1799 | goto out; | |
324c8374 IM |
1800 | } |
1801 | ||
e4cfed38 | 1802 | port = xzalloc(sizeof *port); |
35303d71 | 1803 | port->port_no = port_no; |
e4cfed38 PS |
1804 | port->netdev = netdev; |
1805 | port->type = xstrdup(type); | |
4b609110 | 1806 | port->sf = sf; |
2fbadeb6 | 1807 | port->emc_enabled = true; |
e32971b8 DDP |
1808 | port->need_reconfigure = true; |
1809 | ovs_mutex_init(&port->txq_used_mutex); | |
e4cfed38 | 1810 | |
b8d29252 | 1811 | *portp = port; |
72865317 BP |
1812 | |
1813 | return 0; | |
d17f4f08 | 1814 | |
d17f4f08 | 1815 | out: |
b8d29252 | 1816 | netdev_close(netdev); |
d17f4f08 | 1817 | return error; |
72865317 BP |
1818 | } |
1819 | ||
b8d29252 DDP |
1820 | static int |
1821 | do_add_port(struct dp_netdev *dp, const char *devname, const char *type, | |
1822 | odp_port_t port_no) | |
1823 | OVS_REQUIRES(dp->port_mutex) | |
1824 | { | |
1825 | struct dp_netdev_port *port; | |
1826 | int error; | |
1827 | ||
1828 | /* Reject devices already in 'dp'. */ | |
1829 | if (!get_port_by_name(dp, devname, &port)) { | |
1830 | return EEXIST; | |
1831 | } | |
1832 | ||
a3e8437a | 1833 | error = port_create(devname, type, port_no, &port); |
b8d29252 DDP |
1834 | if (error) { |
1835 | return error; | |
1836 | } | |
1837 | ||
e9985d6a | 1838 | hmap_insert(&dp->ports, &port->node, hash_port_no(port_no)); |
b8d29252 DDP |
1839 | seq_change(dp->port_seq); |
1840 | ||
e32971b8 DDP |
1841 | reconfigure_datapath(dp); |
1842 | ||
b8d29252 DDP |
1843 | return 0; |
1844 | } | |
1845 | ||
247527db BP |
1846 | static int |
1847 | dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev, | |
4e022ec0 | 1848 | odp_port_t *port_nop) |
247527db BP |
1849 | { |
1850 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
3aa30359 BP |
1851 | char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; |
1852 | const char *dpif_port; | |
4e022ec0 | 1853 | odp_port_t port_no; |
5279f8fd | 1854 | int error; |
247527db | 1855 | |
59e6d833 | 1856 | ovs_mutex_lock(&dp->port_mutex); |
3aa30359 | 1857 | dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); |
4e022ec0 | 1858 | if (*port_nop != ODPP_NONE) { |
ff073a71 BP |
1859 | port_no = *port_nop; |
1860 | error = dp_netdev_lookup_port(dp, *port_nop) ? EBUSY : 0; | |
232dfa4a | 1861 | } else { |
3aa30359 | 1862 | port_no = choose_port(dp, dpif_port); |
5279f8fd | 1863 | error = port_no == ODPP_NONE ? EFBIG : 0; |
232dfa4a | 1864 | } |
5279f8fd | 1865 | if (!error) { |
247527db | 1866 | *port_nop = port_no; |
5279f8fd | 1867 | error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no); |
247527db | 1868 | } |
59e6d833 | 1869 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd BP |
1870 | |
1871 | return error; | |
72865317 BP |
1872 | } |
1873 | ||
1874 | static int | |
4e022ec0 | 1875 | dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no) |
72865317 BP |
1876 | { |
1877 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5279f8fd BP |
1878 | int error; |
1879 | ||
59e6d833 | 1880 | ovs_mutex_lock(&dp->port_mutex); |
c40b890f BP |
1881 | if (port_no == ODPP_LOCAL) { |
1882 | error = EINVAL; | |
1883 | } else { | |
1884 | struct dp_netdev_port *port; | |
1885 | ||
1886 | error = get_port_by_number(dp, port_no, &port); | |
1887 | if (!error) { | |
1888 | do_del_port(dp, port); | |
1889 | } | |
1890 | } | |
59e6d833 | 1891 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd BP |
1892 | |
1893 | return error; | |
72865317 BP |
1894 | } |
1895 | ||
1896 | static bool | |
4e022ec0 | 1897 | is_valid_port_number(odp_port_t port_no) |
72865317 | 1898 | { |
ff073a71 BP |
1899 | return port_no != ODPP_NONE; |
1900 | } | |
1901 | ||
1902 | static struct dp_netdev_port * | |
1903 | dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no) | |
e9985d6a | 1904 | OVS_REQUIRES(dp->port_mutex) |
ff073a71 BP |
1905 | { |
1906 | struct dp_netdev_port *port; | |
1907 | ||
e9985d6a | 1908 | HMAP_FOR_EACH_WITH_HASH (port, node, hash_port_no(port_no), &dp->ports) { |
35303d71 | 1909 | if (port->port_no == port_no) { |
ff073a71 BP |
1910 | return port; |
1911 | } | |
1912 | } | |
1913 | return NULL; | |
72865317 BP |
1914 | } |
1915 | ||
1916 | static int | |
1917 | get_port_by_number(struct dp_netdev *dp, | |
4e022ec0 | 1918 | odp_port_t port_no, struct dp_netdev_port **portp) |
e9985d6a | 1919 | OVS_REQUIRES(dp->port_mutex) |
72865317 BP |
1920 | { |
1921 | if (!is_valid_port_number(port_no)) { | |
1922 | *portp = NULL; | |
1923 | return EINVAL; | |
1924 | } else { | |
ff073a71 | 1925 | *portp = dp_netdev_lookup_port(dp, port_no); |
0f6a066f | 1926 | return *portp ? 0 : ENODEV; |
72865317 BP |
1927 | } |
1928 | } | |
1929 | ||
b284085e | 1930 | static void |
62453dad | 1931 | port_destroy(struct dp_netdev_port *port) |
b284085e | 1932 | { |
62453dad DDP |
1933 | if (!port) { |
1934 | return; | |
b284085e | 1935 | } |
b284085e | 1936 | |
62453dad DDP |
1937 | netdev_close(port->netdev); |
1938 | netdev_restore_flags(port->sf); | |
accf8626 | 1939 | |
62453dad | 1940 | for (unsigned i = 0; i < port->n_rxq; i++) { |
947dc567 | 1941 | netdev_rxq_close(port->rxqs[i].rx); |
b284085e | 1942 | } |
324c8374 | 1943 | ovs_mutex_destroy(&port->txq_used_mutex); |
3eb67853 | 1944 | free(port->rxq_affinity_list); |
324c8374 | 1945 | free(port->txq_used); |
3eb67853 | 1946 | free(port->rxqs); |
62453dad DDP |
1947 | free(port->type); |
1948 | free(port); | |
b284085e PS |
1949 | } |
1950 | ||
72865317 BP |
1951 | static int |
1952 | get_port_by_name(struct dp_netdev *dp, | |
1953 | const char *devname, struct dp_netdev_port **portp) | |
59e6d833 | 1954 | OVS_REQUIRES(dp->port_mutex) |
72865317 BP |
1955 | { |
1956 | struct dp_netdev_port *port; | |
1957 | ||
e9985d6a | 1958 | HMAP_FOR_EACH (port, node, &dp->ports) { |
3efb6063 | 1959 | if (!strcmp(netdev_get_name(port->netdev), devname)) { |
72865317 BP |
1960 | *portp = port; |
1961 | return 0; | |
1962 | } | |
1963 | } | |
0f6a066f DDP |
1964 | |
1965 | /* Callers of dpif_netdev_port_query_by_name() expect ENODEV for a non | |
1966 | * existing port. */ | |
1967 | return ENODEV; | |
72865317 BP |
1968 | } |
1969 | ||
b9584f21 | 1970 | /* Returns 'true' if there is a port with pmd netdev. */ |
65f13b50 | 1971 | static bool |
b9584f21 | 1972 | has_pmd_port(struct dp_netdev *dp) |
e9985d6a | 1973 | OVS_REQUIRES(dp->port_mutex) |
65f13b50 AW |
1974 | { |
1975 | struct dp_netdev_port *port; | |
1976 | ||
e9985d6a | 1977 | HMAP_FOR_EACH (port, node, &dp->ports) { |
5dd57e80 | 1978 | if (netdev_is_pmd(port->netdev)) { |
b9584f21 | 1979 | return true; |
65f13b50 AW |
1980 | } |
1981 | } | |
1982 | ||
1983 | return false; | |
1984 | } | |
1985 | ||
c40b890f BP |
1986 | static void |
1987 | do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port) | |
59e6d833 | 1988 | OVS_REQUIRES(dp->port_mutex) |
72865317 | 1989 | { |
e9985d6a | 1990 | hmap_remove(&dp->ports, &port->node); |
d33ed218 | 1991 | seq_change(dp->port_seq); |
d0cca6c3 | 1992 | |
e32971b8 | 1993 | reconfigure_datapath(dp); |
72865317 | 1994 | |
62453dad | 1995 | port_destroy(port); |
72865317 BP |
1996 | } |
1997 | ||
1998 | static void | |
4c738a8d BP |
1999 | answer_port_query(const struct dp_netdev_port *port, |
2000 | struct dpif_port *dpif_port) | |
72865317 | 2001 | { |
3efb6063 | 2002 | dpif_port->name = xstrdup(netdev_get_name(port->netdev)); |
0cbfe35d | 2003 | dpif_port->type = xstrdup(port->type); |
35303d71 | 2004 | dpif_port->port_no = port->port_no; |
72865317 BP |
2005 | } |
2006 | ||
2007 | static int | |
4e022ec0 | 2008 | dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no, |
4c738a8d | 2009 | struct dpif_port *dpif_port) |
72865317 BP |
2010 | { |
2011 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
2012 | struct dp_netdev_port *port; | |
2013 | int error; | |
2014 | ||
e9985d6a | 2015 | ovs_mutex_lock(&dp->port_mutex); |
72865317 | 2016 | error = get_port_by_number(dp, port_no, &port); |
4afba28d | 2017 | if (!error && dpif_port) { |
4c738a8d | 2018 | answer_port_query(port, dpif_port); |
72865317 | 2019 | } |
e9985d6a | 2020 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd | 2021 | |
72865317 BP |
2022 | return error; |
2023 | } | |
2024 | ||
2025 | static int | |
2026 | dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname, | |
4c738a8d | 2027 | struct dpif_port *dpif_port) |
72865317 BP |
2028 | { |
2029 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
2030 | struct dp_netdev_port *port; | |
2031 | int error; | |
2032 | ||
59e6d833 | 2033 | ovs_mutex_lock(&dp->port_mutex); |
72865317 | 2034 | error = get_port_by_name(dp, devname, &port); |
4afba28d | 2035 | if (!error && dpif_port) { |
4c738a8d | 2036 | answer_port_query(port, dpif_port); |
72865317 | 2037 | } |
59e6d833 | 2038 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd | 2039 | |
72865317 BP |
2040 | return error; |
2041 | } | |
2042 | ||
61e7deb1 BP |
2043 | static void |
2044 | dp_netdev_flow_free(struct dp_netdev_flow *flow) | |
2045 | { | |
61e7deb1 | 2046 | dp_netdev_actions_free(dp_netdev_flow_get_actions(flow)); |
61e7deb1 BP |
2047 | free(flow); |
2048 | } | |
2049 | ||
ed79f89a DDP |
2050 | static void dp_netdev_flow_unref(struct dp_netdev_flow *flow) |
2051 | { | |
2052 | if (ovs_refcount_unref_relaxed(&flow->ref_cnt) == 1) { | |
2053 | ovsrcu_postpone(dp_netdev_flow_free, flow); | |
2054 | } | |
2055 | } | |
2056 | ||
70e5ed6f JS |
2057 | static uint32_t |
2058 | dp_netdev_flow_hash(const ovs_u128 *ufid) | |
2059 | { | |
2060 | return ufid->u32[0]; | |
2061 | } | |
2062 | ||
3453b4d6 JS |
2063 | static inline struct dpcls * |
2064 | dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread *pmd, | |
2065 | odp_port_t in_port) | |
2066 | { | |
2067 | struct dpcls *cls; | |
2068 | uint32_t hash = hash_port_no(in_port); | |
2069 | CMAP_FOR_EACH_WITH_HASH (cls, node, hash, &pmd->classifiers) { | |
2070 | if (cls->in_port == in_port) { | |
2071 | /* Port classifier exists already */ | |
2072 | return cls; | |
2073 | } | |
2074 | } | |
2075 | return NULL; | |
2076 | } | |
2077 | ||
2078 | static inline struct dpcls * | |
2079 | dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread *pmd, | |
2080 | odp_port_t in_port) | |
2081 | OVS_REQUIRES(pmd->flow_mutex) | |
2082 | { | |
2083 | struct dpcls *cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); | |
2084 | uint32_t hash = hash_port_no(in_port); | |
2085 | ||
2086 | if (!cls) { | |
2087 | /* Create new classifier for in_port */ | |
2088 | cls = xmalloc(sizeof(*cls)); | |
2089 | dpcls_init(cls); | |
2090 | cls->in_port = in_port; | |
2091 | cmap_insert(&pmd->classifiers, &cls->node, hash); | |
2092 | VLOG_DBG("Creating dpcls %p for in_port %d", cls, in_port); | |
2093 | } | |
2094 | return cls; | |
2095 | } | |
2096 | ||
241bad15 YL |
2097 | #define MAX_FLOW_MARK (UINT32_MAX - 1) |
2098 | #define INVALID_FLOW_MARK (UINT32_MAX) | |
2099 | ||
2100 | struct megaflow_to_mark_data { | |
2101 | const struct cmap_node node; | |
2102 | ovs_u128 mega_ufid; | |
2103 | uint32_t mark; | |
2104 | }; | |
2105 | ||
2106 | struct flow_mark { | |
2107 | struct cmap megaflow_to_mark; | |
2108 | struct cmap mark_to_flow; | |
2109 | struct id_pool *pool; | |
241bad15 YL |
2110 | }; |
2111 | ||
2112 | static struct flow_mark flow_mark = { | |
2113 | .megaflow_to_mark = CMAP_INITIALIZER, | |
2114 | .mark_to_flow = CMAP_INITIALIZER, | |
241bad15 YL |
2115 | }; |
2116 | ||
2117 | static uint32_t | |
2118 | flow_mark_alloc(void) | |
2119 | { | |
2120 | uint32_t mark; | |
2121 | ||
2122 | if (!flow_mark.pool) { | |
2123 | /* Haven't initiated yet, do it here */ | |
2124 | flow_mark.pool = id_pool_create(0, MAX_FLOW_MARK); | |
2125 | } | |
2126 | ||
2127 | if (id_pool_alloc_id(flow_mark.pool, &mark)) { | |
2128 | return mark; | |
2129 | } | |
2130 | ||
2131 | return INVALID_FLOW_MARK; | |
2132 | } | |
2133 | ||
2134 | static void | |
2135 | flow_mark_free(uint32_t mark) | |
2136 | { | |
2137 | id_pool_free_id(flow_mark.pool, mark); | |
2138 | } | |
2139 | ||
2140 | /* associate megaflow with a mark, which is a 1:1 mapping */ | |
2141 | static void | |
2142 | megaflow_to_mark_associate(const ovs_u128 *mega_ufid, uint32_t mark) | |
2143 | { | |
2144 | size_t hash = dp_netdev_flow_hash(mega_ufid); | |
2145 | struct megaflow_to_mark_data *data = xzalloc(sizeof(*data)); | |
2146 | ||
2147 | data->mega_ufid = *mega_ufid; | |
2148 | data->mark = mark; | |
2149 | ||
2150 | cmap_insert(&flow_mark.megaflow_to_mark, | |
2151 | CONST_CAST(struct cmap_node *, &data->node), hash); | |
2152 | } | |
2153 | ||
2154 | /* disassociate meagaflow with a mark */ | |
2155 | static void | |
2156 | megaflow_to_mark_disassociate(const ovs_u128 *mega_ufid) | |
2157 | { | |
2158 | size_t hash = dp_netdev_flow_hash(mega_ufid); | |
2159 | struct megaflow_to_mark_data *data; | |
2160 | ||
2161 | CMAP_FOR_EACH_WITH_HASH (data, node, hash, &flow_mark.megaflow_to_mark) { | |
2162 | if (ovs_u128_equals(*mega_ufid, data->mega_ufid)) { | |
2163 | cmap_remove(&flow_mark.megaflow_to_mark, | |
2164 | CONST_CAST(struct cmap_node *, &data->node), hash); | |
5752eae4 | 2165 | ovsrcu_postpone(free, data); |
241bad15 YL |
2166 | return; |
2167 | } | |
2168 | } | |
2169 | ||
2170 | VLOG_WARN("Masked ufid "UUID_FMT" is not associated with a mark?\n", | |
2171 | UUID_ARGS((struct uuid *)mega_ufid)); | |
2172 | } | |
2173 | ||
2174 | static inline uint32_t | |
2175 | megaflow_to_mark_find(const ovs_u128 *mega_ufid) | |
2176 | { | |
2177 | size_t hash = dp_netdev_flow_hash(mega_ufid); | |
2178 | struct megaflow_to_mark_data *data; | |
2179 | ||
2180 | CMAP_FOR_EACH_WITH_HASH (data, node, hash, &flow_mark.megaflow_to_mark) { | |
2181 | if (ovs_u128_equals(*mega_ufid, data->mega_ufid)) { | |
2182 | return data->mark; | |
2183 | } | |
2184 | } | |
2185 | ||
2186 | VLOG_WARN("Mark id for ufid "UUID_FMT" was not found\n", | |
2187 | UUID_ARGS((struct uuid *)mega_ufid)); | |
2188 | return INVALID_FLOW_MARK; | |
2189 | } | |
2190 | ||
2191 | /* associate mark with a flow, which is 1:N mapping */ | |
2192 | static void | |
2193 | mark_to_flow_associate(const uint32_t mark, struct dp_netdev_flow *flow) | |
2194 | { | |
2195 | dp_netdev_flow_ref(flow); | |
2196 | ||
2197 | cmap_insert(&flow_mark.mark_to_flow, | |
2198 | CONST_CAST(struct cmap_node *, &flow->mark_node), | |
2199 | hash_int(mark, 0)); | |
2200 | flow->mark = mark; | |
2201 | ||
2202 | VLOG_DBG("Associated dp_netdev flow %p with mark %u\n", flow, mark); | |
2203 | } | |
2204 | ||
2205 | static bool | |
2206 | flow_mark_has_no_ref(uint32_t mark) | |
2207 | { | |
2208 | struct dp_netdev_flow *flow; | |
2209 | ||
2210 | CMAP_FOR_EACH_WITH_HASH (flow, mark_node, hash_int(mark, 0), | |
2211 | &flow_mark.mark_to_flow) { | |
2212 | if (flow->mark == mark) { | |
2213 | return false; | |
2214 | } | |
2215 | } | |
2216 | ||
2217 | return true; | |
2218 | } | |
2219 | ||
2220 | static int | |
2221 | mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd, | |
2222 | struct dp_netdev_flow *flow) | |
2223 | { | |
2224 | int ret = 0; | |
2225 | uint32_t mark = flow->mark; | |
2226 | struct cmap_node *mark_node = CONST_CAST(struct cmap_node *, | |
2227 | &flow->mark_node); | |
2228 | ||
2229 | cmap_remove(&flow_mark.mark_to_flow, mark_node, hash_int(mark, 0)); | |
2230 | flow->mark = INVALID_FLOW_MARK; | |
2231 | ||
2232 | /* | |
2233 | * no flow is referencing the mark any more? If so, let's | |
2234 | * remove the flow from hardware and free the mark. | |
2235 | */ | |
2236 | if (flow_mark_has_no_ref(mark)) { | |
2237 | struct dp_netdev_port *port; | |
2238 | odp_port_t in_port = flow->flow.in_port.odp_port; | |
2239 | ||
2240 | ovs_mutex_lock(&pmd->dp->port_mutex); | |
2241 | port = dp_netdev_lookup_port(pmd->dp, in_port); | |
2242 | if (port) { | |
2243 | ret = netdev_flow_del(port->netdev, &flow->mega_ufid, NULL); | |
2244 | } | |
2245 | ovs_mutex_unlock(&pmd->dp->port_mutex); | |
2246 | ||
2247 | flow_mark_free(mark); | |
2248 | VLOG_DBG("Freed flow mark %u\n", mark); | |
2249 | ||
2250 | megaflow_to_mark_disassociate(&flow->mega_ufid); | |
2251 | } | |
2252 | dp_netdev_flow_unref(flow); | |
2253 | ||
2254 | return ret; | |
2255 | } | |
2256 | ||
2257 | static void | |
2258 | flow_mark_flush(struct dp_netdev_pmd_thread *pmd) | |
2259 | { | |
2260 | struct dp_netdev_flow *flow; | |
2261 | ||
2262 | CMAP_FOR_EACH (flow, mark_node, &flow_mark.mark_to_flow) { | |
2263 | if (flow->pmd_id == pmd->core_id) { | |
02bb2824 | 2264 | queue_netdev_flow_del(pmd, flow); |
241bad15 YL |
2265 | } |
2266 | } | |
2267 | } | |
2268 | ||
aab96ec4 YL |
2269 | static struct dp_netdev_flow * |
2270 | mark_to_flow_find(const struct dp_netdev_pmd_thread *pmd, | |
2271 | const uint32_t mark) | |
2272 | { | |
2273 | struct dp_netdev_flow *flow; | |
2274 | ||
2275 | CMAP_FOR_EACH_WITH_HASH (flow, mark_node, hash_int(mark, 0), | |
2276 | &flow_mark.mark_to_flow) { | |
2277 | if (flow->mark == mark && flow->pmd_id == pmd->core_id && | |
2278 | flow->dead == false) { | |
2279 | return flow; | |
2280 | } | |
2281 | } | |
2282 | ||
2283 | return NULL; | |
2284 | } | |
2285 | ||
02bb2824 YL |
2286 | static struct dp_flow_offload_item * |
2287 | dp_netdev_alloc_flow_offload(struct dp_netdev_pmd_thread *pmd, | |
2288 | struct dp_netdev_flow *flow, | |
2289 | int op) | |
2290 | { | |
2291 | struct dp_flow_offload_item *offload; | |
2292 | ||
2293 | offload = xzalloc(sizeof(*offload)); | |
2294 | offload->pmd = pmd; | |
2295 | offload->flow = flow; | |
2296 | offload->op = op; | |
2297 | ||
2298 | dp_netdev_flow_ref(flow); | |
2299 | dp_netdev_pmd_try_ref(pmd); | |
2300 | ||
2301 | return offload; | |
2302 | } | |
2303 | ||
2304 | static void | |
2305 | dp_netdev_free_flow_offload(struct dp_flow_offload_item *offload) | |
2306 | { | |
2307 | dp_netdev_pmd_unref(offload->pmd); | |
2308 | dp_netdev_flow_unref(offload->flow); | |
2309 | ||
2310 | free(offload->actions); | |
2311 | free(offload); | |
2312 | } | |
2313 | ||
2314 | static void | |
2315 | dp_netdev_append_flow_offload(struct dp_flow_offload_item *offload) | |
2316 | { | |
2317 | ovs_mutex_lock(&dp_flow_offload.mutex); | |
2318 | ovs_list_push_back(&dp_flow_offload.list, &offload->node); | |
2319 | xpthread_cond_signal(&dp_flow_offload.cond); | |
2320 | ovs_mutex_unlock(&dp_flow_offload.mutex); | |
2321 | } | |
2322 | ||
2323 | static int | |
2324 | dp_netdev_flow_offload_del(struct dp_flow_offload_item *offload) | |
2325 | { | |
2326 | return mark_to_flow_disassociate(offload->pmd, offload->flow); | |
2327 | } | |
2328 | ||
2329 | /* | |
2330 | * There are two flow offload operations here: addition and modification. | |
2331 | * | |
2332 | * For flow addition, this function does: | |
2333 | * - allocate a new flow mark id | |
2334 | * - perform hardware flow offload | |
2335 | * - associate the flow mark with flow and mega flow | |
2336 | * | |
2337 | * For flow modification, both flow mark and the associations are still | |
2338 | * valid, thus only item 2 needed. | |
2339 | */ | |
2340 | static int | |
2341 | dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload) | |
2342 | { | |
2343 | struct dp_netdev_port *port; | |
2344 | struct dp_netdev_pmd_thread *pmd = offload->pmd; | |
2345 | struct dp_netdev_flow *flow = offload->flow; | |
2346 | odp_port_t in_port = flow->flow.in_port.odp_port; | |
2347 | bool modification = offload->op == DP_NETDEV_FLOW_OFFLOAD_OP_MOD; | |
2348 | struct offload_info info; | |
2349 | uint32_t mark; | |
2350 | int ret; | |
2351 | ||
2352 | if (flow->dead) { | |
2353 | return -1; | |
2354 | } | |
2355 | ||
2356 | if (modification) { | |
2357 | mark = flow->mark; | |
2358 | ovs_assert(mark != INVALID_FLOW_MARK); | |
2359 | } else { | |
2360 | /* | |
2361 | * If a mega flow has already been offloaded (from other PMD | |
2362 | * instances), do not offload it again. | |
2363 | */ | |
2364 | mark = megaflow_to_mark_find(&flow->mega_ufid); | |
2365 | if (mark != INVALID_FLOW_MARK) { | |
2366 | VLOG_DBG("Flow has already been offloaded with mark %u\n", mark); | |
2367 | if (flow->mark != INVALID_FLOW_MARK) { | |
2368 | ovs_assert(flow->mark == mark); | |
2369 | } else { | |
2370 | mark_to_flow_associate(mark, flow); | |
2371 | } | |
2372 | return 0; | |
2373 | } | |
2374 | ||
2375 | mark = flow_mark_alloc(); | |
2376 | if (mark == INVALID_FLOW_MARK) { | |
2377 | VLOG_ERR("Failed to allocate flow mark!\n"); | |
2378 | } | |
2379 | } | |
2380 | info.flow_mark = mark; | |
2381 | ||
2382 | ovs_mutex_lock(&pmd->dp->port_mutex); | |
2383 | port = dp_netdev_lookup_port(pmd->dp, in_port); | |
2384 | if (!port) { | |
2385 | ovs_mutex_unlock(&pmd->dp->port_mutex); | |
2386 | return -1; | |
2387 | } | |
2388 | ret = netdev_flow_put(port->netdev, &offload->match, | |
2389 | CONST_CAST(struct nlattr *, offload->actions), | |
2390 | offload->actions_len, &flow->mega_ufid, &info, | |
2391 | NULL); | |
2392 | ovs_mutex_unlock(&pmd->dp->port_mutex); | |
2393 | ||
2394 | if (ret) { | |
2395 | if (!modification) { | |
2396 | flow_mark_free(mark); | |
2397 | } else { | |
2398 | mark_to_flow_disassociate(pmd, flow); | |
2399 | } | |
2400 | return -1; | |
2401 | } | |
2402 | ||
2403 | if (!modification) { | |
2404 | megaflow_to_mark_associate(&flow->mega_ufid, mark); | |
2405 | mark_to_flow_associate(mark, flow); | |
2406 | } | |
2407 | ||
2408 | return 0; | |
2409 | } | |
2410 | ||
2411 | static void * | |
2412 | dp_netdev_flow_offload_main(void *data OVS_UNUSED) | |
2413 | { | |
2414 | struct dp_flow_offload_item *offload; | |
2415 | struct ovs_list *list; | |
2416 | const char *op; | |
2417 | int ret; | |
2418 | ||
2419 | for (;;) { | |
2420 | ovs_mutex_lock(&dp_flow_offload.mutex); | |
2421 | if (ovs_list_is_empty(&dp_flow_offload.list)) { | |
2422 | ovsrcu_quiesce_start(); | |
2423 | ovs_mutex_cond_wait(&dp_flow_offload.cond, | |
2424 | &dp_flow_offload.mutex); | |
6c95dbf9 | 2425 | ovsrcu_quiesce_end(); |
02bb2824 YL |
2426 | } |
2427 | list = ovs_list_pop_front(&dp_flow_offload.list); | |
2428 | offload = CONTAINER_OF(list, struct dp_flow_offload_item, node); | |
2429 | ovs_mutex_unlock(&dp_flow_offload.mutex); | |
2430 | ||
2431 | switch (offload->op) { | |
2432 | case DP_NETDEV_FLOW_OFFLOAD_OP_ADD: | |
2433 | op = "add"; | |
2434 | ret = dp_netdev_flow_offload_put(offload); | |
2435 | break; | |
2436 | case DP_NETDEV_FLOW_OFFLOAD_OP_MOD: | |
2437 | op = "modify"; | |
2438 | ret = dp_netdev_flow_offload_put(offload); | |
2439 | break; | |
2440 | case DP_NETDEV_FLOW_OFFLOAD_OP_DEL: | |
2441 | op = "delete"; | |
2442 | ret = dp_netdev_flow_offload_del(offload); | |
2443 | break; | |
2444 | default: | |
2445 | OVS_NOT_REACHED(); | |
2446 | } | |
2447 | ||
2448 | VLOG_DBG("%s to %s netdev flow\n", | |
2449 | ret == 0 ? "succeed" : "failed", op); | |
2450 | dp_netdev_free_flow_offload(offload); | |
2451 | } | |
2452 | ||
2453 | return NULL; | |
2454 | } | |
2455 | ||
2456 | static void | |
2457 | queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd, | |
2458 | struct dp_netdev_flow *flow) | |
2459 | { | |
2460 | struct dp_flow_offload_item *offload; | |
2461 | ||
2462 | if (ovsthread_once_start(&offload_thread_once)) { | |
2463 | xpthread_cond_init(&dp_flow_offload.cond, NULL); | |
2464 | ovs_thread_create("dp_netdev_flow_offload", | |
2465 | dp_netdev_flow_offload_main, NULL); | |
2466 | ovsthread_once_done(&offload_thread_once); | |
2467 | } | |
2468 | ||
2469 | offload = dp_netdev_alloc_flow_offload(pmd, flow, | |
2470 | DP_NETDEV_FLOW_OFFLOAD_OP_DEL); | |
2471 | dp_netdev_append_flow_offload(offload); | |
2472 | } | |
2473 | ||
2474 | static void | |
2475 | queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd, | |
2476 | struct dp_netdev_flow *flow, struct match *match, | |
2477 | const struct nlattr *actions, size_t actions_len) | |
2478 | { | |
2479 | struct dp_flow_offload_item *offload; | |
2480 | int op; | |
2481 | ||
2482 | if (!netdev_is_flow_api_enabled()) { | |
2483 | return; | |
2484 | } | |
2485 | ||
2486 | if (ovsthread_once_start(&offload_thread_once)) { | |
2487 | xpthread_cond_init(&dp_flow_offload.cond, NULL); | |
2488 | ovs_thread_create("dp_netdev_flow_offload", | |
2489 | dp_netdev_flow_offload_main, NULL); | |
2490 | ovsthread_once_done(&offload_thread_once); | |
2491 | } | |
2492 | ||
2493 | if (flow->mark != INVALID_FLOW_MARK) { | |
2494 | op = DP_NETDEV_FLOW_OFFLOAD_OP_MOD; | |
2495 | } else { | |
2496 | op = DP_NETDEV_FLOW_OFFLOAD_OP_ADD; | |
2497 | } | |
2498 | offload = dp_netdev_alloc_flow_offload(pmd, flow, op); | |
2499 | offload->match = *match; | |
2500 | offload->actions = xmalloc(actions_len); | |
2501 | memcpy(offload->actions, actions, actions_len); | |
2502 | offload->actions_len = actions_len; | |
2503 | ||
2504 | dp_netdev_append_flow_offload(offload); | |
2505 | } | |
2506 | ||
72865317 | 2507 | static void |
1c1e46ed AW |
2508 | dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd, |
2509 | struct dp_netdev_flow *flow) | |
2510 | OVS_REQUIRES(pmd->flow_mutex) | |
72865317 | 2511 | { |
9f361d6b | 2512 | struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node); |
3453b4d6 JS |
2513 | struct dpcls *cls; |
2514 | odp_port_t in_port = flow->flow.in_port.odp_port; | |
2c0ea78f | 2515 | |
3453b4d6 JS |
2516 | cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); |
2517 | ovs_assert(cls != NULL); | |
2518 | dpcls_remove(cls, &flow->cr); | |
1c1e46ed | 2519 | cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid)); |
241bad15 | 2520 | if (flow->mark != INVALID_FLOW_MARK) { |
02bb2824 | 2521 | queue_netdev_flow_del(pmd, flow); |
241bad15 | 2522 | } |
9bbf1c3d | 2523 | flow->dead = true; |
ed79f89a DDP |
2524 | |
2525 | dp_netdev_flow_unref(flow); | |
72865317 BP |
2526 | } |
2527 | ||
2528 | static void | |
1c1e46ed | 2529 | dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd) |
72865317 | 2530 | { |
78c8df12 | 2531 | struct dp_netdev_flow *netdev_flow; |
72865317 | 2532 | |
1c1e46ed AW |
2533 | ovs_mutex_lock(&pmd->flow_mutex); |
2534 | CMAP_FOR_EACH (netdev_flow, node, &pmd->flow_table) { | |
2535 | dp_netdev_pmd_remove_flow(pmd, netdev_flow); | |
72865317 | 2536 | } |
1c1e46ed | 2537 | ovs_mutex_unlock(&pmd->flow_mutex); |
72865317 BP |
2538 | } |
2539 | ||
2540 | static int | |
2541 | dpif_netdev_flow_flush(struct dpif *dpif) | |
2542 | { | |
2543 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
1c1e46ed AW |
2544 | struct dp_netdev_pmd_thread *pmd; |
2545 | ||
2546 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
2547 | dp_netdev_pmd_flow_flush(pmd); | |
2548 | } | |
5279f8fd | 2549 | |
72865317 BP |
2550 | return 0; |
2551 | } | |
2552 | ||
b0ec0f27 | 2553 | struct dp_netdev_port_state { |
e9985d6a | 2554 | struct hmap_position position; |
4c738a8d | 2555 | char *name; |
b0ec0f27 BP |
2556 | }; |
2557 | ||
2558 | static int | |
2559 | dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep) | |
2560 | { | |
2561 | *statep = xzalloc(sizeof(struct dp_netdev_port_state)); | |
2562 | return 0; | |
2563 | } | |
2564 | ||
72865317 | 2565 | static int |
b0ec0f27 | 2566 | dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_, |
4c738a8d | 2567 | struct dpif_port *dpif_port) |
72865317 | 2568 | { |
b0ec0f27 | 2569 | struct dp_netdev_port_state *state = state_; |
72865317 | 2570 | struct dp_netdev *dp = get_dp_netdev(dpif); |
e9985d6a | 2571 | struct hmap_node *node; |
ff073a71 | 2572 | int retval; |
72865317 | 2573 | |
e9985d6a DDP |
2574 | ovs_mutex_lock(&dp->port_mutex); |
2575 | node = hmap_at_position(&dp->ports, &state->position); | |
ff073a71 BP |
2576 | if (node) { |
2577 | struct dp_netdev_port *port; | |
5279f8fd | 2578 | |
ff073a71 BP |
2579 | port = CONTAINER_OF(node, struct dp_netdev_port, node); |
2580 | ||
2581 | free(state->name); | |
2582 | state->name = xstrdup(netdev_get_name(port->netdev)); | |
2583 | dpif_port->name = state->name; | |
2584 | dpif_port->type = port->type; | |
35303d71 | 2585 | dpif_port->port_no = port->port_no; |
ff073a71 BP |
2586 | |
2587 | retval = 0; | |
2588 | } else { | |
2589 | retval = EOF; | |
72865317 | 2590 | } |
e9985d6a | 2591 | ovs_mutex_unlock(&dp->port_mutex); |
5279f8fd | 2592 | |
ff073a71 | 2593 | return retval; |
b0ec0f27 BP |
2594 | } |
2595 | ||
2596 | static int | |
4c738a8d | 2597 | dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_) |
b0ec0f27 | 2598 | { |
4c738a8d BP |
2599 | struct dp_netdev_port_state *state = state_; |
2600 | free(state->name); | |
b0ec0f27 BP |
2601 | free(state); |
2602 | return 0; | |
72865317 BP |
2603 | } |
2604 | ||
2605 | static int | |
67a4917b | 2606 | dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED) |
72865317 BP |
2607 | { |
2608 | struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); | |
d33ed218 | 2609 | uint64_t new_port_seq; |
5279f8fd BP |
2610 | int error; |
2611 | ||
d33ed218 BP |
2612 | new_port_seq = seq_read(dpif->dp->port_seq); |
2613 | if (dpif->last_port_seq != new_port_seq) { | |
2614 | dpif->last_port_seq = new_port_seq; | |
5279f8fd | 2615 | error = ENOBUFS; |
72865317 | 2616 | } else { |
5279f8fd | 2617 | error = EAGAIN; |
72865317 | 2618 | } |
5279f8fd BP |
2619 | |
2620 | return error; | |
72865317 BP |
2621 | } |
2622 | ||
2623 | static void | |
2624 | dpif_netdev_port_poll_wait(const struct dpif *dpif_) | |
2625 | { | |
2626 | struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); | |
5279f8fd | 2627 | |
d33ed218 | 2628 | seq_wait(dpif->dp->port_seq, dpif->last_port_seq); |
8a4e3a85 BP |
2629 | } |
2630 | ||
2631 | static struct dp_netdev_flow * | |
0de8783a | 2632 | dp_netdev_flow_cast(const struct dpcls_rule *cr) |
8a4e3a85 BP |
2633 | { |
2634 | return cr ? CONTAINER_OF(cr, struct dp_netdev_flow, cr) : NULL; | |
72865317 BP |
2635 | } |
2636 | ||
9bbf1c3d DDP |
2637 | static bool dp_netdev_flow_ref(struct dp_netdev_flow *flow) |
2638 | { | |
2639 | return ovs_refcount_try_ref_rcu(&flow->ref_cnt); | |
2640 | } | |
2641 | ||
79df317f DDP |
2642 | /* netdev_flow_key utilities. |
2643 | * | |
2644 | * netdev_flow_key is basically a miniflow. We use these functions | |
2645 | * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow | |
2646 | * functions (miniflow_clone_inline, miniflow_equal, ...), because: | |
2647 | * | |
2648 | * - Since we are dealing exclusively with miniflows created by | |
2649 | * miniflow_extract(), if the map is different the miniflow is different. | |
2650 | * Therefore we can be faster by comparing the map and the miniflow in a | |
2651 | * single memcmp(). | |
5fcff47b | 2652 | * - These functions can be inlined by the compiler. */ |
79df317f | 2653 | |
361d808d | 2654 | /* Given the number of bits set in miniflow's maps, returns the size of the |
caeb4906 | 2655 | * 'netdev_flow_key.mf' */ |
361d808d JR |
2656 | static inline size_t |
2657 | netdev_flow_key_size(size_t flow_u64s) | |
79df317f | 2658 | { |
361d808d | 2659 | return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s); |
79df317f DDP |
2660 | } |
2661 | ||
79df317f DDP |
2662 | static inline bool |
2663 | netdev_flow_key_equal(const struct netdev_flow_key *a, | |
0de8783a JR |
2664 | const struct netdev_flow_key *b) |
2665 | { | |
caeb4906 JR |
2666 | /* 'b->len' may be not set yet. */ |
2667 | return a->hash == b->hash && !memcmp(&a->mf, &b->mf, a->len); | |
0de8783a JR |
2668 | } |
2669 | ||
2670 | /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow. | |
d79a39fe | 2671 | * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been |
0de8783a JR |
2672 | * generated by miniflow_extract. */ |
2673 | static inline bool | |
2674 | netdev_flow_key_equal_mf(const struct netdev_flow_key *key, | |
2675 | const struct miniflow *mf) | |
79df317f | 2676 | { |
caeb4906 | 2677 | return !memcmp(&key->mf, mf, key->len); |
79df317f DDP |
2678 | } |
2679 | ||
2680 | static inline void | |
2681 | netdev_flow_key_clone(struct netdev_flow_key *dst, | |
0de8783a JR |
2682 | const struct netdev_flow_key *src) |
2683 | { | |
caeb4906 JR |
2684 | memcpy(dst, src, |
2685 | offsetof(struct netdev_flow_key, mf) + src->len); | |
0de8783a JR |
2686 | } |
2687 | ||
0de8783a JR |
2688 | /* Initialize a netdev_flow_key 'mask' from 'match'. */ |
2689 | static inline void | |
2690 | netdev_flow_mask_init(struct netdev_flow_key *mask, | |
2691 | const struct match *match) | |
2692 | { | |
09b0fa9c | 2693 | uint64_t *dst = miniflow_values(&mask->mf); |
5fcff47b | 2694 | struct flowmap fmap; |
0de8783a | 2695 | uint32_t hash = 0; |
5fcff47b | 2696 | size_t idx; |
0de8783a JR |
2697 | |
2698 | /* Only check masks that make sense for the flow. */ | |
5fcff47b JR |
2699 | flow_wc_map(&match->flow, &fmap); |
2700 | flowmap_init(&mask->mf.map); | |
0de8783a | 2701 | |
5fcff47b JR |
2702 | FLOWMAP_FOR_EACH_INDEX(idx, fmap) { |
2703 | uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx); | |
0de8783a | 2704 | |
5fcff47b JR |
2705 | if (mask_u64) { |
2706 | flowmap_set(&mask->mf.map, idx, 1); | |
2707 | *dst++ = mask_u64; | |
2708 | hash = hash_add64(hash, mask_u64); | |
0de8783a | 2709 | } |
0de8783a JR |
2710 | } |
2711 | ||
5fcff47b | 2712 | map_t map; |
0de8783a | 2713 | |
5fcff47b JR |
2714 | FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) { |
2715 | hash = hash_add64(hash, map); | |
2716 | } | |
0de8783a | 2717 | |
5fcff47b | 2718 | size_t n = dst - miniflow_get_values(&mask->mf); |
0de8783a | 2719 | |
d70e8c28 | 2720 | mask->hash = hash_finish(hash, n * 8); |
0de8783a JR |
2721 | mask->len = netdev_flow_key_size(n); |
2722 | } | |
2723 | ||
361d808d | 2724 | /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */ |
0de8783a JR |
2725 | static inline void |
2726 | netdev_flow_key_init_masked(struct netdev_flow_key *dst, | |
2727 | const struct flow *flow, | |
2728 | const struct netdev_flow_key *mask) | |
79df317f | 2729 | { |
09b0fa9c JR |
2730 | uint64_t *dst_u64 = miniflow_values(&dst->mf); |
2731 | const uint64_t *mask_u64 = miniflow_get_values(&mask->mf); | |
0de8783a | 2732 | uint32_t hash = 0; |
d70e8c28 | 2733 | uint64_t value; |
0de8783a JR |
2734 | |
2735 | dst->len = mask->len; | |
361d808d | 2736 | dst->mf = mask->mf; /* Copy maps. */ |
0de8783a | 2737 | |
5fcff47b | 2738 | FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) { |
d70e8c28 JR |
2739 | *dst_u64 = value & *mask_u64++; |
2740 | hash = hash_add64(hash, *dst_u64++); | |
0de8783a | 2741 | } |
09b0fa9c JR |
2742 | dst->hash = hash_finish(hash, |
2743 | (dst_u64 - miniflow_get_values(&dst->mf)) * 8); | |
0de8783a JR |
2744 | } |
2745 | ||
5fcff47b JR |
2746 | /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */ |
2747 | #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \ | |
2748 | MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP) | |
0de8783a JR |
2749 | |
2750 | /* Returns a hash value for the bits of 'key' where there are 1-bits in | |
2751 | * 'mask'. */ | |
2752 | static inline uint32_t | |
2753 | netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key, | |
2754 | const struct netdev_flow_key *mask) | |
2755 | { | |
09b0fa9c | 2756 | const uint64_t *p = miniflow_get_values(&mask->mf); |
0de8783a | 2757 | uint32_t hash = 0; |
5fcff47b | 2758 | uint64_t value; |
0de8783a | 2759 | |
5fcff47b JR |
2760 | NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) { |
2761 | hash = hash_add64(hash, value & *p++); | |
0de8783a JR |
2762 | } |
2763 | ||
09b0fa9c | 2764 | return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8); |
79df317f DDP |
2765 | } |
2766 | ||
9bbf1c3d DDP |
2767 | static inline bool |
2768 | emc_entry_alive(struct emc_entry *ce) | |
2769 | { | |
2770 | return ce->flow && !ce->flow->dead; | |
2771 | } | |
2772 | ||
2773 | static void | |
2774 | emc_clear_entry(struct emc_entry *ce) | |
2775 | { | |
2776 | if (ce->flow) { | |
2777 | dp_netdev_flow_unref(ce->flow); | |
2778 | ce->flow = NULL; | |
2779 | } | |
2780 | } | |
2781 | ||
2782 | static inline void | |
2783 | emc_change_entry(struct emc_entry *ce, struct dp_netdev_flow *flow, | |
0de8783a | 2784 | const struct netdev_flow_key *key) |
9bbf1c3d DDP |
2785 | { |
2786 | if (ce->flow != flow) { | |
2787 | if (ce->flow) { | |
2788 | dp_netdev_flow_unref(ce->flow); | |
2789 | } | |
2790 | ||
2791 | if (dp_netdev_flow_ref(flow)) { | |
2792 | ce->flow = flow; | |
2793 | } else { | |
2794 | ce->flow = NULL; | |
2795 | } | |
2796 | } | |
0de8783a JR |
2797 | if (key) { |
2798 | netdev_flow_key_clone(&ce->key, key); | |
9bbf1c3d DDP |
2799 | } |
2800 | } | |
2801 | ||
2802 | static inline void | |
0de8783a | 2803 | emc_insert(struct emc_cache *cache, const struct netdev_flow_key *key, |
9bbf1c3d DDP |
2804 | struct dp_netdev_flow *flow) |
2805 | { | |
2806 | struct emc_entry *to_be_replaced = NULL; | |
2807 | struct emc_entry *current_entry; | |
2808 | ||
0de8783a JR |
2809 | EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) { |
2810 | if (netdev_flow_key_equal(¤t_entry->key, key)) { | |
9bbf1c3d | 2811 | /* We found the entry with the 'mf' miniflow */ |
0de8783a | 2812 | emc_change_entry(current_entry, flow, NULL); |
9bbf1c3d DDP |
2813 | return; |
2814 | } | |
2815 | ||
2816 | /* Replacement policy: put the flow in an empty (not alive) entry, or | |
2817 | * in the first entry where it can be */ | |
2818 | if (!to_be_replaced | |
2819 | || (emc_entry_alive(to_be_replaced) | |
2820 | && !emc_entry_alive(current_entry)) | |
0de8783a | 2821 | || current_entry->key.hash < to_be_replaced->key.hash) { |
9bbf1c3d DDP |
2822 | to_be_replaced = current_entry; |
2823 | } | |
2824 | } | |
2825 | /* We didn't find the miniflow in the cache. | |
2826 | * The 'to_be_replaced' entry is where the new flow will be stored */ | |
2827 | ||
0de8783a | 2828 | emc_change_entry(to_be_replaced, flow, key); |
9bbf1c3d DDP |
2829 | } |
2830 | ||
4c30b246 CL |
2831 | static inline void |
2832 | emc_probabilistic_insert(struct dp_netdev_pmd_thread *pmd, | |
2833 | const struct netdev_flow_key *key, | |
2834 | struct dp_netdev_flow *flow) | |
2835 | { | |
2836 | /* Insert an entry into the EMC based on probability value 'min'. By | |
2837 | * default the value is UINT32_MAX / 100 which yields an insertion | |
2838 | * probability of 1/100 ie. 1% */ | |
2839 | ||
2fbadeb6 | 2840 | uint32_t min = pmd->ctx.emc_insert_min; |
4c30b246 | 2841 | |
656238ee | 2842 | if (min && random_uint32() <= min) { |
60d8ccae | 2843 | emc_insert(&(pmd->flow_cache).emc_cache, key, flow); |
4c30b246 CL |
2844 | } |
2845 | } | |
2846 | ||
9bbf1c3d | 2847 | static inline struct dp_netdev_flow * |
0de8783a | 2848 | emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key) |
9bbf1c3d DDP |
2849 | { |
2850 | struct emc_entry *current_entry; | |
2851 | ||
0de8783a JR |
2852 | EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) { |
2853 | if (current_entry->key.hash == key->hash | |
2854 | && emc_entry_alive(current_entry) | |
2855 | && netdev_flow_key_equal_mf(¤t_entry->key, &key->mf)) { | |
9bbf1c3d | 2856 | |
0de8783a | 2857 | /* We found the entry with the 'key->mf' miniflow */ |
9bbf1c3d DDP |
2858 | return current_entry->flow; |
2859 | } | |
2860 | } | |
2861 | ||
2862 | return NULL; | |
2863 | } | |
2864 | ||
60d8ccae YW |
2865 | static inline const struct cmap_node * |
2866 | smc_entry_get(struct dp_netdev_pmd_thread *pmd, const uint32_t hash) | |
2867 | { | |
2868 | struct smc_cache *cache = &(pmd->flow_cache).smc_cache; | |
2869 | struct smc_bucket *bucket = &cache->buckets[hash & SMC_MASK]; | |
2870 | uint16_t sig = hash >> 16; | |
2871 | uint16_t index = UINT16_MAX; | |
2872 | ||
2873 | for (int i = 0; i < SMC_ENTRY_PER_BUCKET; i++) { | |
2874 | if (bucket->sig[i] == sig) { | |
2875 | index = bucket->flow_idx[i]; | |
2876 | break; | |
2877 | } | |
2878 | } | |
2879 | if (index != UINT16_MAX) { | |
2880 | return cmap_find_by_index(&pmd->flow_table, index); | |
2881 | } | |
2882 | return NULL; | |
2883 | } | |
2884 | ||
2885 | static void | |
2886 | smc_clear_entry(struct smc_bucket *b, int idx) | |
2887 | { | |
2888 | b->flow_idx[idx] = UINT16_MAX; | |
2889 | } | |
2890 | ||
2891 | /* Insert the flow_table index into SMC. Insertion may fail when 1) SMC is | |
2892 | * turned off, 2) the flow_table index is larger than uint16_t can handle. | |
2893 | * If there is already an SMC entry having same signature, the index will be | |
2894 | * updated. If there is no existing entry, but an empty entry is available, | |
2895 | * the empty entry will be taken. If no empty entry or existing same signature, | |
2896 | * a random entry from the hashed bucket will be picked. */ | |
2897 | static inline void | |
2898 | smc_insert(struct dp_netdev_pmd_thread *pmd, | |
2899 | const struct netdev_flow_key *key, | |
2900 | uint32_t hash) | |
2901 | { | |
2902 | struct smc_cache *smc_cache = &(pmd->flow_cache).smc_cache; | |
2903 | struct smc_bucket *bucket = &smc_cache->buckets[key->hash & SMC_MASK]; | |
2904 | uint16_t index; | |
2905 | uint32_t cmap_index; | |
2906 | bool smc_enable_db; | |
2907 | int i; | |
2908 | ||
2909 | atomic_read_relaxed(&pmd->dp->smc_enable_db, &smc_enable_db); | |
2910 | if (!smc_enable_db) { | |
2911 | return; | |
2912 | } | |
2913 | ||
2914 | cmap_index = cmap_find_index(&pmd->flow_table, hash); | |
2915 | index = (cmap_index >= UINT16_MAX) ? UINT16_MAX : (uint16_t)cmap_index; | |
2916 | ||
2917 | /* If the index is larger than SMC can handle (uint16_t), we don't | |
2918 | * insert */ | |
2919 | if (index == UINT16_MAX) { | |
2920 | return; | |
2921 | } | |
2922 | ||
2923 | /* If an entry with same signature already exists, update the index */ | |
2924 | uint16_t sig = key->hash >> 16; | |
2925 | for (i = 0; i < SMC_ENTRY_PER_BUCKET; i++) { | |
2926 | if (bucket->sig[i] == sig) { | |
2927 | bucket->flow_idx[i] = index; | |
2928 | return; | |
2929 | } | |
2930 | } | |
2931 | /* If there is an empty entry, occupy it. */ | |
2932 | for (i = 0; i < SMC_ENTRY_PER_BUCKET; i++) { | |
2933 | if (bucket->flow_idx[i] == UINT16_MAX) { | |
2934 | bucket->sig[i] = sig; | |
2935 | bucket->flow_idx[i] = index; | |
2936 | return; | |
2937 | } | |
2938 | } | |
2939 | /* Otherwise, pick a random entry. */ | |
2940 | i = random_uint32() % SMC_ENTRY_PER_BUCKET; | |
2941 | bucket->sig[i] = sig; | |
2942 | bucket->flow_idx[i] = index; | |
2943 | } | |
2944 | ||
72865317 | 2945 | static struct dp_netdev_flow * |
3453b4d6 JS |
2946 | dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread *pmd, |
2947 | const struct netdev_flow_key *key, | |
2948 | int *lookup_num_p) | |
2c0ea78f | 2949 | { |
3453b4d6 | 2950 | struct dpcls *cls; |
0de8783a | 2951 | struct dpcls_rule *rule; |
f825fdd4 BP |
2952 | odp_port_t in_port = u32_to_odp(MINIFLOW_GET_U32(&key->mf, |
2953 | in_port.odp_port)); | |
3453b4d6 | 2954 | struct dp_netdev_flow *netdev_flow = NULL; |
2c0ea78f | 2955 | |
3453b4d6 JS |
2956 | cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); |
2957 | if (OVS_LIKELY(cls)) { | |
60d8ccae | 2958 | dpcls_lookup(cls, &key, &rule, 1, lookup_num_p); |
3453b4d6 JS |
2959 | netdev_flow = dp_netdev_flow_cast(rule); |
2960 | } | |
8a4e3a85 | 2961 | return netdev_flow; |
2c0ea78f GS |
2962 | } |
2963 | ||
2964 | static struct dp_netdev_flow * | |
1c1e46ed AW |
2965 | dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd, |
2966 | const ovs_u128 *ufidp, const struct nlattr *key, | |
2967 | size_t key_len) | |
72865317 | 2968 | { |
1763b4b8 | 2969 | struct dp_netdev_flow *netdev_flow; |
70e5ed6f JS |
2970 | struct flow flow; |
2971 | ovs_u128 ufid; | |
2972 | ||
2973 | /* If a UFID is not provided, determine one based on the key. */ | |
2974 | if (!ufidp && key && key_len | |
f0fb825a | 2975 | && !dpif_netdev_flow_from_nlattrs(key, key_len, &flow, false)) { |
1c1e46ed | 2976 | dpif_flow_hash(pmd->dp->dpif, &flow, sizeof flow, &ufid); |
70e5ed6f JS |
2977 | ufidp = &ufid; |
2978 | } | |
72865317 | 2979 | |
70e5ed6f JS |
2980 | if (ufidp) { |
2981 | CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp), | |
1c1e46ed | 2982 | &pmd->flow_table) { |
2ff8484b | 2983 | if (ovs_u128_equals(netdev_flow->ufid, *ufidp)) { |
70e5ed6f JS |
2984 | return netdev_flow; |
2985 | } | |
72865317 BP |
2986 | } |
2987 | } | |
8a4e3a85 | 2988 | |
72865317 BP |
2989 | return NULL; |
2990 | } | |
2991 | ||
2992 | static void | |
eb94da30 | 2993 | get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_, |
1763b4b8 | 2994 | struct dpif_flow_stats *stats) |
feebdea2 | 2995 | { |
eb94da30 DDP |
2996 | struct dp_netdev_flow *netdev_flow; |
2997 | unsigned long long n; | |
2998 | long long used; | |
2999 | uint16_t flags; | |
3000 | ||
3001 | netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_); | |
3002 | ||
3003 | atomic_read_relaxed(&netdev_flow->stats.packet_count, &n); | |
3004 | stats->n_packets = n; | |
3005 | atomic_read_relaxed(&netdev_flow->stats.byte_count, &n); | |
3006 | stats->n_bytes = n; | |
3007 | atomic_read_relaxed(&netdev_flow->stats.used, &used); | |
3008 | stats->used = used; | |
3009 | atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); | |
3010 | stats->tcp_flags = flags; | |
72865317 BP |
3011 | } |
3012 | ||
7af12bd7 JS |
3013 | /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for |
3014 | * storing the netlink-formatted key/mask. 'key_buf' may be the same as | |
3015 | * 'mask_buf'. Actions will be returned without copying, by relying on RCU to | |
3016 | * protect them. */ | |
6fe09f8c | 3017 | static void |
70e5ed6f | 3018 | dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow, |
7af12bd7 | 3019 | struct ofpbuf *key_buf, struct ofpbuf *mask_buf, |
64bb477f | 3020 | struct dpif_flow *flow, bool terse) |
6fe09f8c | 3021 | { |
64bb477f JS |
3022 | if (terse) { |
3023 | memset(flow, 0, sizeof *flow); | |
3024 | } else { | |
3025 | struct flow_wildcards wc; | |
3026 | struct dp_netdev_actions *actions; | |
3027 | size_t offset; | |
5262eea1 JG |
3028 | struct odp_flow_key_parms odp_parms = { |
3029 | .flow = &netdev_flow->flow, | |
3030 | .mask = &wc.masks, | |
2494ccd7 | 3031 | .support = dp_netdev_support, |
5262eea1 | 3032 | }; |
64bb477f JS |
3033 | |
3034 | miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks); | |
f4b835bb JR |
3035 | /* in_port is exact matched, but we have left it out from the mask for |
3036 | * optimnization reasons. Add in_port back to the mask. */ | |
3037 | wc.masks.in_port.odp_port = ODPP_NONE; | |
64bb477f JS |
3038 | |
3039 | /* Key */ | |
6fd6ed71 | 3040 | offset = key_buf->size; |
64bb477f | 3041 | flow->key = ofpbuf_tail(key_buf); |
5262eea1 | 3042 | odp_flow_key_from_flow(&odp_parms, key_buf); |
6fd6ed71 | 3043 | flow->key_len = key_buf->size - offset; |
64bb477f JS |
3044 | |
3045 | /* Mask */ | |
6fd6ed71 | 3046 | offset = mask_buf->size; |
64bb477f | 3047 | flow->mask = ofpbuf_tail(mask_buf); |
ec1f6f32 | 3048 | odp_parms.key_buf = key_buf; |
5262eea1 | 3049 | odp_flow_key_from_mask(&odp_parms, mask_buf); |
6fd6ed71 | 3050 | flow->mask_len = mask_buf->size - offset; |
64bb477f JS |
3051 | |
3052 | /* Actions */ | |
3053 | actions = dp_netdev_flow_get_actions(netdev_flow); | |
3054 | flow->actions = actions->actions; | |
3055 | flow->actions_len = actions->size; | |
3056 | } | |
6fe09f8c | 3057 | |
70e5ed6f JS |
3058 | flow->ufid = netdev_flow->ufid; |
3059 | flow->ufid_present = true; | |
1c1e46ed | 3060 | flow->pmd_id = netdev_flow->pmd_id; |
6fe09f8c | 3061 | get_dpif_flow_stats(netdev_flow, &flow->stats); |
0d6b401c GT |
3062 | |
3063 | flow->attrs.offloaded = false; | |
3064 | flow->attrs.dp_layer = "ovs"; | |
6fe09f8c JS |
3065 | } |
3066 | ||
36956a7d | 3067 | static int |
8c301900 JR |
3068 | dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len, |
3069 | const struct nlattr *mask_key, | |
3070 | uint32_t mask_key_len, const struct flow *flow, | |
f0fb825a | 3071 | struct flow_wildcards *wc, bool probe) |
8c301900 | 3072 | { |
ca8d3442 DDP |
3073 | enum odp_key_fitness fitness; |
3074 | ||
8d8ab6c2 | 3075 | fitness = odp_flow_key_to_mask(mask_key, mask_key_len, wc, flow); |
ca8d3442 | 3076 | if (fitness) { |
f0fb825a EG |
3077 | if (!probe) { |
3078 | /* This should not happen: it indicates that | |
3079 | * odp_flow_key_from_mask() and odp_flow_key_to_mask() | |
3080 | * disagree on the acceptable form of a mask. Log the problem | |
3081 | * as an error, with enough details to enable debugging. */ | |
3082 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
3083 | ||
3084 | if (!VLOG_DROP_ERR(&rl)) { | |
3085 | struct ds s; | |
3086 | ||
3087 | ds_init(&s); | |
3088 | odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s, | |
3089 | true); | |
3090 | VLOG_ERR("internal error parsing flow mask %s (%s)", | |
3091 | ds_cstr(&s), odp_key_fitness_to_string(fitness)); | |
3092 | ds_destroy(&s); | |
3093 | } | |
8c301900 | 3094 | } |
ca8d3442 DDP |
3095 | |
3096 | return EINVAL; | |
8c301900 JR |
3097 | } |
3098 | ||
3099 | return 0; | |
3100 | } | |
3101 | ||
3102 | static int | |
3103 | dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len, | |
f0fb825a | 3104 | struct flow *flow, bool probe) |
36956a7d | 3105 | { |
8d8ab6c2 | 3106 | if (odp_flow_key_to_flow(key, key_len, flow)) { |
f0fb825a EG |
3107 | if (!probe) { |
3108 | /* This should not happen: it indicates that | |
3109 | * odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on | |
3110 | * the acceptable form of a flow. Log the problem as an error, | |
3111 | * with enough details to enable debugging. */ | |
3112 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
3113 | ||
3114 | if (!VLOG_DROP_ERR(&rl)) { | |
3115 | struct ds s; | |
3116 | ||
3117 | ds_init(&s); | |
3118 | odp_flow_format(key, key_len, NULL, 0, NULL, &s, true); | |
3119 | VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s)); | |
3120 | ds_destroy(&s); | |
3121 | } | |
36956a7d BP |
3122 | } |
3123 | ||
3124 | return EINVAL; | |
3125 | } | |
3126 | ||
5cf3edb3 | 3127 | if (flow->ct_state & DP_NETDEV_CS_UNSUPPORTED_MASK) { |
07659514 JS |
3128 | return EINVAL; |
3129 | } | |
3130 | ||
36956a7d BP |
3131 | return 0; |
3132 | } | |
3133 | ||
72865317 | 3134 | static int |
6fe09f8c | 3135 | dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get) |
72865317 BP |
3136 | { |
3137 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
1763b4b8 | 3138 | struct dp_netdev_flow *netdev_flow; |
1c1e46ed | 3139 | struct dp_netdev_pmd_thread *pmd; |
c673049c IM |
3140 | struct hmapx to_find = HMAPX_INITIALIZER(&to_find); |
3141 | struct hmapx_node *node; | |
3142 | int error = EINVAL; | |
3143 | ||
3144 | if (get->pmd_id == PMD_ID_NULL) { | |
3145 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3146 | if (dp_netdev_pmd_try_ref(pmd) && !hmapx_add(&to_find, pmd)) { | |
3147 | dp_netdev_pmd_unref(pmd); | |
3148 | } | |
3149 | } | |
3150 | } else { | |
3151 | pmd = dp_netdev_get_pmd(dp, get->pmd_id); | |
3152 | if (!pmd) { | |
3153 | goto out; | |
3154 | } | |
3155 | hmapx_add(&to_find, pmd); | |
1c1e46ed AW |
3156 | } |
3157 | ||
c673049c IM |
3158 | if (!hmapx_count(&to_find)) { |
3159 | goto out; | |
72865317 | 3160 | } |
1c1e46ed | 3161 | |
c673049c IM |
3162 | HMAPX_FOR_EACH (node, &to_find) { |
3163 | pmd = (struct dp_netdev_pmd_thread *) node->data; | |
3164 | netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key, | |
3165 | get->key_len); | |
3166 | if (netdev_flow) { | |
3167 | dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer, | |
3168 | get->flow, false); | |
3169 | error = 0; | |
3170 | break; | |
3171 | } else { | |
3172 | error = ENOENT; | |
3173 | } | |
3174 | } | |
bc4a05c6 | 3175 | |
c673049c IM |
3176 | HMAPX_FOR_EACH (node, &to_find) { |
3177 | pmd = (struct dp_netdev_pmd_thread *) node->data; | |
3178 | dp_netdev_pmd_unref(pmd); | |
3179 | } | |
3180 | out: | |
3181 | hmapx_destroy(&to_find); | |
5279f8fd | 3182 | return error; |
72865317 BP |
3183 | } |
3184 | ||
241bad15 YL |
3185 | static void |
3186 | dp_netdev_get_mega_ufid(const struct match *match, ovs_u128 *mega_ufid) | |
3187 | { | |
3188 | struct flow masked_flow; | |
3189 | size_t i; | |
3190 | ||
3191 | for (i = 0; i < sizeof(struct flow); i++) { | |
3192 | ((uint8_t *)&masked_flow)[i] = ((uint8_t *)&match->flow)[i] & | |
3193 | ((uint8_t *)&match->wc)[i]; | |
3194 | } | |
3195 | dpif_flow_hash(NULL, &masked_flow, sizeof(struct flow), mega_ufid); | |
3196 | } | |
3197 | ||
0de8783a | 3198 | static struct dp_netdev_flow * |
1c1e46ed AW |
3199 | dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, |
3200 | struct match *match, const ovs_u128 *ufid, | |
ae2ceebd | 3201 | const struct nlattr *actions, size_t actions_len) |
1c1e46ed | 3202 | OVS_REQUIRES(pmd->flow_mutex) |
72865317 | 3203 | { |
0de8783a JR |
3204 | struct dp_netdev_flow *flow; |
3205 | struct netdev_flow_key mask; | |
3453b4d6 | 3206 | struct dpcls *cls; |
f4b835bb JR |
3207 | |
3208 | /* Make sure in_port is exact matched before we read it. */ | |
3209 | ovs_assert(match->wc.masks.in_port.odp_port == ODPP_NONE); | |
3453b4d6 | 3210 | odp_port_t in_port = match->flow.in_port.odp_port; |
ed79f89a | 3211 | |
f4b835bb JR |
3212 | /* As we select the dpcls based on the port number, each netdev flow |
3213 | * belonging to the same dpcls will have the same odp_port value. | |
3214 | * For performance reasons we wildcard odp_port here in the mask. In the | |
3215 | * typical case dp_hash is also wildcarded, and the resulting 8-byte | |
3216 | * chunk {dp_hash, in_port} will be ignored by netdev_flow_mask_init() and | |
3217 | * will not be part of the subtable mask. | |
3218 | * This will speed up the hash computation during dpcls_lookup() because | |
3219 | * there is one less call to hash_add64() in this case. */ | |
3220 | match->wc.masks.in_port.odp_port = 0; | |
0de8783a | 3221 | netdev_flow_mask_init(&mask, match); |
f4b835bb JR |
3222 | match->wc.masks.in_port.odp_port = ODPP_NONE; |
3223 | ||
0de8783a | 3224 | /* Make sure wc does not have metadata. */ |
5fcff47b JR |
3225 | ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata) |
3226 | && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs)); | |
679ba04c | 3227 | |
0de8783a | 3228 | /* Do not allocate extra space. */ |
caeb4906 | 3229 | flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len); |
1c1e46ed | 3230 | memset(&flow->stats, 0, sizeof flow->stats); |
0de8783a | 3231 | flow->dead = false; |
11e5cf1f | 3232 | flow->batch = NULL; |
241bad15 | 3233 | flow->mark = INVALID_FLOW_MARK; |
bd5131ba | 3234 | *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id; |
0de8783a | 3235 | *CONST_CAST(struct flow *, &flow->flow) = match->flow; |
70e5ed6f | 3236 | *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid; |
0de8783a | 3237 | ovs_refcount_init(&flow->ref_cnt); |
0de8783a | 3238 | ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len)); |
2c0ea78f | 3239 | |
241bad15 | 3240 | dp_netdev_get_mega_ufid(match, CONST_CAST(ovs_u128 *, &flow->mega_ufid)); |
0de8783a | 3241 | netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask); |
3453b4d6 | 3242 | |
f4b835bb | 3243 | /* Select dpcls for in_port. Relies on in_port to be exact match. */ |
3453b4d6 JS |
3244 | cls = dp_netdev_pmd_find_dpcls(pmd, in_port); |
3245 | dpcls_insert(cls, &flow->cr, &mask); | |
72865317 | 3246 | |
4c75aaab EJ |
3247 | cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node), |
3248 | dp_netdev_flow_hash(&flow->ufid)); | |
3249 | ||
02bb2824 | 3250 | queue_netdev_flow_put(pmd, flow, match, actions, actions_len); |
241bad15 | 3251 | |
beb75a40 | 3252 | if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl)))) { |
623540e4 | 3253 | struct ds ds = DS_EMPTY_INITIALIZER; |
9044f2c1 JG |
3254 | struct ofpbuf key_buf, mask_buf; |
3255 | struct odp_flow_key_parms odp_parms = { | |
3256 | .flow = &match->flow, | |
3257 | .mask = &match->wc.masks, | |
3258 | .support = dp_netdev_support, | |
3259 | }; | |
3260 | ||
3261 | ofpbuf_init(&key_buf, 0); | |
3262 | ofpbuf_init(&mask_buf, 0); | |
623540e4 | 3263 | |
9044f2c1 JG |
3264 | odp_flow_key_from_flow(&odp_parms, &key_buf); |
3265 | odp_parms.key_buf = &key_buf; | |
3266 | odp_flow_key_from_mask(&odp_parms, &mask_buf); | |
0de8783a | 3267 | |
623540e4 | 3268 | ds_put_cstr(&ds, "flow_add: "); |
70e5ed6f JS |
3269 | odp_format_ufid(ufid, &ds); |
3270 | ds_put_cstr(&ds, " "); | |
9044f2c1 JG |
3271 | odp_flow_format(key_buf.data, key_buf.size, |
3272 | mask_buf.data, mask_buf.size, | |
3273 | NULL, &ds, false); | |
623540e4 | 3274 | ds_put_cstr(&ds, ", actions:"); |
0722f341 | 3275 | format_odp_actions(&ds, actions, actions_len, NULL); |
623540e4 | 3276 | |
beb75a40 | 3277 | VLOG_DBG("%s", ds_cstr(&ds)); |
623540e4 | 3278 | |
9044f2c1 JG |
3279 | ofpbuf_uninit(&key_buf); |
3280 | ofpbuf_uninit(&mask_buf); | |
beb75a40 JS |
3281 | |
3282 | /* Add a printout of the actual match installed. */ | |
3283 | struct match m; | |
3284 | ds_clear(&ds); | |
3285 | ds_put_cstr(&ds, "flow match: "); | |
3286 | miniflow_expand(&flow->cr.flow.mf, &m.flow); | |
3287 | miniflow_expand(&flow->cr.mask->mf, &m.wc.masks); | |
b2f4b622 | 3288 | memset(&m.tun_md, 0, sizeof m.tun_md); |
beb75a40 JS |
3289 | match_format(&m, NULL, &ds, OFP_DEFAULT_PRIORITY); |
3290 | ||
3291 | VLOG_DBG("%s", ds_cstr(&ds)); | |
3292 | ||
623540e4 EJ |
3293 | ds_destroy(&ds); |
3294 | } | |
3295 | ||
0de8783a | 3296 | return flow; |
72865317 BP |
3297 | } |
3298 | ||
72865317 | 3299 | static int |
f5d317a1 DDP |
3300 | flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd, |
3301 | struct netdev_flow_key *key, | |
3302 | struct match *match, | |
3303 | ovs_u128 *ufid, | |
3304 | const struct dpif_flow_put *put, | |
3305 | struct dpif_flow_stats *stats) | |
72865317 | 3306 | { |
1763b4b8 | 3307 | struct dp_netdev_flow *netdev_flow; |
f5d317a1 | 3308 | int error = 0; |
72865317 | 3309 | |
f5d317a1 DDP |
3310 | if (stats) { |
3311 | memset(stats, 0, sizeof *stats); | |
70e5ed6f JS |
3312 | } |
3313 | ||
1c1e46ed | 3314 | ovs_mutex_lock(&pmd->flow_mutex); |
f5d317a1 | 3315 | netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL); |
1763b4b8 | 3316 | if (!netdev_flow) { |
89625d1e | 3317 | if (put->flags & DPIF_FP_CREATE) { |
1c1e46ed | 3318 | if (cmap_count(&pmd->flow_table) < MAX_FLOWS) { |
f5d317a1 | 3319 | dp_netdev_flow_add(pmd, match, ufid, put->actions, |
70e5ed6f | 3320 | put->actions_len); |
0de8783a | 3321 | error = 0; |
72865317 | 3322 | } else { |
5279f8fd | 3323 | error = EFBIG; |
72865317 BP |
3324 | } |
3325 | } else { | |
5279f8fd | 3326 | error = ENOENT; |
72865317 BP |
3327 | } |
3328 | } else { | |
beb75a40 | 3329 | if (put->flags & DPIF_FP_MODIFY) { |
8a4e3a85 BP |
3330 | struct dp_netdev_actions *new_actions; |
3331 | struct dp_netdev_actions *old_actions; | |
3332 | ||
3333 | new_actions = dp_netdev_actions_create(put->actions, | |
3334 | put->actions_len); | |
3335 | ||
61e7deb1 BP |
3336 | old_actions = dp_netdev_flow_get_actions(netdev_flow); |
3337 | ovsrcu_set(&netdev_flow->actions, new_actions); | |
679ba04c | 3338 | |
02bb2824 YL |
3339 | queue_netdev_flow_put(pmd, netdev_flow, match, |
3340 | put->actions, put->actions_len); | |
241bad15 | 3341 | |
f5d317a1 DDP |
3342 | if (stats) { |
3343 | get_dpif_flow_stats(netdev_flow, stats); | |
a84cb64a BP |
3344 | } |
3345 | if (put->flags & DPIF_FP_ZERO_STATS) { | |
97447f55 DDP |
3346 | /* XXX: The userspace datapath uses thread local statistics |
3347 | * (for flows), which should be updated only by the owning | |
3348 | * thread. Since we cannot write on stats memory here, | |
3349 | * we choose not to support this flag. Please note: | |
3350 | * - This feature is currently used only by dpctl commands with | |
3351 | * option --clear. | |
3352 | * - Should the need arise, this operation can be implemented | |
3353 | * by keeping a base value (to be update here) for each | |
3354 | * counter, and subtracting it before outputting the stats */ | |
3355 | error = EOPNOTSUPP; | |
72865317 | 3356 | } |
8a4e3a85 | 3357 | |
61e7deb1 | 3358 | ovsrcu_postpone(dp_netdev_actions_free, old_actions); |
2c0ea78f | 3359 | } else if (put->flags & DPIF_FP_CREATE) { |
5279f8fd | 3360 | error = EEXIST; |
2c0ea78f GS |
3361 | } else { |
3362 | /* Overlapping flow. */ | |
3363 | error = EINVAL; | |
72865317 BP |
3364 | } |
3365 | } | |
1c1e46ed | 3366 | ovs_mutex_unlock(&pmd->flow_mutex); |
5279f8fd | 3367 | return error; |
72865317 BP |
3368 | } |
3369 | ||
72865317 | 3370 | static int |
f5d317a1 | 3371 | dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) |
72865317 BP |
3372 | { |
3373 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
beb75a40 | 3374 | struct netdev_flow_key key, mask; |
1c1e46ed | 3375 | struct dp_netdev_pmd_thread *pmd; |
f5d317a1 DDP |
3376 | struct match match; |
3377 | ovs_u128 ufid; | |
3378 | int error; | |
f0fb825a | 3379 | bool probe = put->flags & DPIF_FP_PROBE; |
72865317 | 3380 | |
f5d317a1 DDP |
3381 | if (put->stats) { |
3382 | memset(put->stats, 0, sizeof *put->stats); | |
3383 | } | |
f0fb825a EG |
3384 | error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow, |
3385 | probe); | |
f5d317a1 DDP |
3386 | if (error) { |
3387 | return error; | |
3388 | } | |
3389 | error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len, | |
3390 | put->mask, put->mask_len, | |
f0fb825a | 3391 | &match.flow, &match.wc, probe); |
f5d317a1 DDP |
3392 | if (error) { |
3393 | return error; | |
1c1e46ed AW |
3394 | } |
3395 | ||
f5d317a1 DDP |
3396 | if (put->ufid) { |
3397 | ufid = *put->ufid; | |
3398 | } else { | |
3399 | dpif_flow_hash(dpif, &match.flow, sizeof match.flow, &ufid); | |
3400 | } | |
3401 | ||
35fe9efb IM |
3402 | /* The Netlink encoding of datapath flow keys cannot express |
3403 | * wildcarding the presence of a VLAN tag. Instead, a missing VLAN | |
3404 | * tag is interpreted as exact match on the fact that there is no | |
3405 | * VLAN. Unless we refactor a lot of code that translates between | |
3406 | * Netlink and struct flow representations, we have to do the same | |
3407 | * here. This must be in sync with 'match' in handle_packet_upcall(). */ | |
3408 | if (!match.wc.masks.vlans[0].tci) { | |
3409 | match.wc.masks.vlans[0].tci = htons(0xffff); | |
3410 | } | |
3411 | ||
f5d317a1 | 3412 | /* Must produce a netdev_flow_key for lookup. |
beb75a40 JS |
3413 | * Use the same method as employed to create the key when adding |
3414 | * the flow to the dplcs to make sure they match. */ | |
3415 | netdev_flow_mask_init(&mask, &match); | |
3416 | netdev_flow_key_init_masked(&key, &match.flow, &mask); | |
f5d317a1 DDP |
3417 | |
3418 | if (put->pmd_id == PMD_ID_NULL) { | |
3419 | if (cmap_count(&dp->poll_threads) == 0) { | |
3420 | return EINVAL; | |
3421 | } | |
3422 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3423 | struct dpif_flow_stats pmd_stats; | |
3424 | int pmd_error; | |
3425 | ||
3426 | pmd_error = flow_put_on_pmd(pmd, &key, &match, &ufid, put, | |
3427 | &pmd_stats); | |
3428 | if (pmd_error) { | |
3429 | error = pmd_error; | |
3430 | } else if (put->stats) { | |
3431 | put->stats->n_packets += pmd_stats.n_packets; | |
3432 | put->stats->n_bytes += pmd_stats.n_bytes; | |
3433 | put->stats->used = MAX(put->stats->used, pmd_stats.used); | |
3434 | put->stats->tcp_flags |= pmd_stats.tcp_flags; | |
3435 | } | |
3436 | } | |
3437 | } else { | |
3438 | pmd = dp_netdev_get_pmd(dp, put->pmd_id); | |
3439 | if (!pmd) { | |
3440 | return EINVAL; | |
3441 | } | |
3442 | error = flow_put_on_pmd(pmd, &key, &match, &ufid, put, put->stats); | |
3443 | dp_netdev_pmd_unref(pmd); | |
3444 | } | |
3445 | ||
3446 | return error; | |
3447 | } | |
3448 | ||
3449 | static int | |
3450 | flow_del_on_pmd(struct dp_netdev_pmd_thread *pmd, | |
3451 | struct dpif_flow_stats *stats, | |
3452 | const struct dpif_flow_del *del) | |
3453 | { | |
3454 | struct dp_netdev_flow *netdev_flow; | |
3455 | int error = 0; | |
3456 | ||
1c1e46ed AW |
3457 | ovs_mutex_lock(&pmd->flow_mutex); |
3458 | netdev_flow = dp_netdev_pmd_find_flow(pmd, del->ufid, del->key, | |
3459 | del->key_len); | |
1763b4b8 | 3460 | if (netdev_flow) { |
f5d317a1 DDP |
3461 | if (stats) { |
3462 | get_dpif_flow_stats(netdev_flow, stats); | |
feebdea2 | 3463 | } |
1c1e46ed | 3464 | dp_netdev_pmd_remove_flow(pmd, netdev_flow); |
72865317 | 3465 | } else { |
5279f8fd | 3466 | error = ENOENT; |
72865317 | 3467 | } |
1c1e46ed | 3468 | ovs_mutex_unlock(&pmd->flow_mutex); |
f5d317a1 DDP |
3469 | |
3470 | return error; | |
3471 | } | |
3472 | ||
3473 | static int | |
3474 | dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) | |
3475 | { | |
3476 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
3477 | struct dp_netdev_pmd_thread *pmd; | |
3478 | int error = 0; | |
3479 | ||
3480 | if (del->stats) { | |
3481 | memset(del->stats, 0, sizeof *del->stats); | |
3482 | } | |
3483 | ||
3484 | if (del->pmd_id == PMD_ID_NULL) { | |
3485 | if (cmap_count(&dp->poll_threads) == 0) { | |
3486 | return EINVAL; | |
3487 | } | |
3488 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3489 | struct dpif_flow_stats pmd_stats; | |
3490 | int pmd_error; | |
3491 | ||
3492 | pmd_error = flow_del_on_pmd(pmd, &pmd_stats, del); | |
3493 | if (pmd_error) { | |
3494 | error = pmd_error; | |
3495 | } else if (del->stats) { | |
3496 | del->stats->n_packets += pmd_stats.n_packets; | |
3497 | del->stats->n_bytes += pmd_stats.n_bytes; | |
3498 | del->stats->used = MAX(del->stats->used, pmd_stats.used); | |
3499 | del->stats->tcp_flags |= pmd_stats.tcp_flags; | |
3500 | } | |
3501 | } | |
3502 | } else { | |
3503 | pmd = dp_netdev_get_pmd(dp, del->pmd_id); | |
3504 | if (!pmd) { | |
3505 | return EINVAL; | |
3506 | } | |
3507 | error = flow_del_on_pmd(pmd, del->stats, del); | |
3508 | dp_netdev_pmd_unref(pmd); | |
3509 | } | |
3510 | ||
5279f8fd BP |
3511 | |
3512 | return error; | |
72865317 BP |
3513 | } |
3514 | ||
ac64794a BP |
3515 | struct dpif_netdev_flow_dump { |
3516 | struct dpif_flow_dump up; | |
1c1e46ed AW |
3517 | struct cmap_position poll_thread_pos; |
3518 | struct cmap_position flow_pos; | |
3519 | struct dp_netdev_pmd_thread *cur_pmd; | |
d2ad7ef1 JS |
3520 | int status; |
3521 | struct ovs_mutex mutex; | |
e723fd32 JS |
3522 | }; |
3523 | ||
ac64794a BP |
3524 | static struct dpif_netdev_flow_dump * |
3525 | dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump) | |
72865317 | 3526 | { |
ac64794a | 3527 | return CONTAINER_OF(dump, struct dpif_netdev_flow_dump, up); |
e723fd32 JS |
3528 | } |
3529 | ||
ac64794a | 3530 | static struct dpif_flow_dump * |
7e8b7199 | 3531 | dpif_netdev_flow_dump_create(const struct dpif *dpif_, bool terse, |
a692410a | 3532 | struct dpif_flow_dump_types *types OVS_UNUSED) |
e723fd32 | 3533 | { |
ac64794a | 3534 | struct dpif_netdev_flow_dump *dump; |
e723fd32 | 3535 | |
1c1e46ed | 3536 | dump = xzalloc(sizeof *dump); |
ac64794a | 3537 | dpif_flow_dump_init(&dump->up, dpif_); |
64bb477f | 3538 | dump->up.terse = terse; |
ac64794a BP |
3539 | ovs_mutex_init(&dump->mutex); |
3540 | ||
3541 | return &dump->up; | |
e723fd32 JS |
3542 | } |
3543 | ||
3544 | static int | |
ac64794a | 3545 | dpif_netdev_flow_dump_destroy(struct dpif_flow_dump *dump_) |
e723fd32 | 3546 | { |
ac64794a | 3547 | struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_); |
e723fd32 | 3548 | |
ac64794a BP |
3549 | ovs_mutex_destroy(&dump->mutex); |
3550 | free(dump); | |
704a1e09 BP |
3551 | return 0; |
3552 | } | |
3553 | ||
ac64794a BP |
3554 | struct dpif_netdev_flow_dump_thread { |
3555 | struct dpif_flow_dump_thread up; | |
3556 | struct dpif_netdev_flow_dump *dump; | |
8bb113da RW |
3557 | struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH]; |
3558 | struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH]; | |
ac64794a BP |
3559 | }; |
3560 | ||
3561 | static struct dpif_netdev_flow_dump_thread * | |
3562 | dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread) | |
3563 | { | |
3564 | return CONTAINER_OF(thread, struct dpif_netdev_flow_dump_thread, up); | |
3565 | } | |
3566 | ||
3567 | static struct dpif_flow_dump_thread * | |
3568 | dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump *dump_) | |
3569 | { | |
3570 | struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_); | |
3571 | struct dpif_netdev_flow_dump_thread *thread; | |
3572 | ||
3573 | thread = xmalloc(sizeof *thread); | |
3574 | dpif_flow_dump_thread_init(&thread->up, &dump->up); | |
3575 | thread->dump = dump; | |
3576 | return &thread->up; | |
3577 | } | |
3578 | ||
3579 | static void | |
3580 | dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_) | |
3581 | { | |
3582 | struct dpif_netdev_flow_dump_thread *thread | |
3583 | = dpif_netdev_flow_dump_thread_cast(thread_); | |
3584 | ||
3585 | free(thread); | |
3586 | } | |
3587 | ||
704a1e09 | 3588 | static int |
ac64794a | 3589 | dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_, |
8bb113da | 3590 | struct dpif_flow *flows, int max_flows) |
ac64794a BP |
3591 | { |
3592 | struct dpif_netdev_flow_dump_thread *thread | |
3593 | = dpif_netdev_flow_dump_thread_cast(thread_); | |
3594 | struct dpif_netdev_flow_dump *dump = thread->dump; | |
8bb113da | 3595 | struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH]; |
8bb113da RW |
3596 | int n_flows = 0; |
3597 | int i; | |
14608a15 | 3598 | |
ac64794a | 3599 | ovs_mutex_lock(&dump->mutex); |
8bb113da | 3600 | if (!dump->status) { |
1c1e46ed AW |
3601 | struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif); |
3602 | struct dp_netdev *dp = get_dp_netdev(&dpif->dpif); | |
3603 | struct dp_netdev_pmd_thread *pmd = dump->cur_pmd; | |
3604 | int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH); | |
3605 | ||
3606 | /* First call to dump_next(), extracts the first pmd thread. | |
3607 | * If there is no pmd thread, returns immediately. */ | |
3608 | if (!pmd) { | |
3609 | pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); | |
3610 | if (!pmd) { | |
3611 | ovs_mutex_unlock(&dump->mutex); | |
3612 | return n_flows; | |
8bb113da | 3613 | |
8bb113da | 3614 | } |
d2ad7ef1 | 3615 | } |
1c1e46ed AW |
3616 | |
3617 | do { | |
3618 | for (n_flows = 0; n_flows < flow_limit; n_flows++) { | |
3619 | struct cmap_node *node; | |
3620 | ||
3621 | node = cmap_next_position(&pmd->flow_table, &dump->flow_pos); | |
3622 | if (!node) { | |
3623 | break; | |
3624 | } | |
3625 | netdev_flows[n_flows] = CONTAINER_OF(node, | |
3626 | struct dp_netdev_flow, | |
3627 | node); | |
3628 | } | |
3629 | /* When finishing dumping the current pmd thread, moves to | |
3630 | * the next. */ | |
3631 | if (n_flows < flow_limit) { | |
3632 | memset(&dump->flow_pos, 0, sizeof dump->flow_pos); | |
3633 | dp_netdev_pmd_unref(pmd); | |
3634 | pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); | |
3635 | if (!pmd) { | |
3636 | dump->status = EOF; | |
3637 | break; | |
3638 | } | |
3639 | } | |
3640 | /* Keeps the reference to next caller. */ | |
3641 | dump->cur_pmd = pmd; | |
3642 | ||
3643 | /* If the current dump is empty, do not exit the loop, since the | |
3644 | * remaining pmds could have flows to be dumped. Just dumps again | |
3645 | * on the new 'pmd'. */ | |
3646 | } while (!n_flows); | |
8a4e3a85 | 3647 | } |
ac64794a | 3648 | ovs_mutex_unlock(&dump->mutex); |
ac64794a | 3649 | |
8bb113da RW |
3650 | for (i = 0; i < n_flows; i++) { |
3651 | struct odputil_keybuf *maskbuf = &thread->maskbuf[i]; | |
3652 | struct odputil_keybuf *keybuf = &thread->keybuf[i]; | |
3653 | struct dp_netdev_flow *netdev_flow = netdev_flows[i]; | |
3654 | struct dpif_flow *f = &flows[i]; | |
7af12bd7 | 3655 | struct ofpbuf key, mask; |
8bb113da | 3656 | |
7af12bd7 JS |
3657 | ofpbuf_use_stack(&key, keybuf, sizeof *keybuf); |
3658 | ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf); | |
64bb477f JS |
3659 | dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f, |
3660 | dump->up.terse); | |
8bb113da | 3661 | } |
feebdea2 | 3662 | |
8bb113da | 3663 | return n_flows; |
72865317 BP |
3664 | } |
3665 | ||
3666 | static int | |
758c456d | 3667 | dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) |
65f13b50 | 3668 | OVS_NO_THREAD_SAFETY_ANALYSIS |
72865317 BP |
3669 | { |
3670 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
65f13b50 | 3671 | struct dp_netdev_pmd_thread *pmd; |
1895cc8d | 3672 | struct dp_packet_batch pp; |
72865317 | 3673 | |
cf62fa4c PS |
3674 | if (dp_packet_size(execute->packet) < ETH_HEADER_LEN || |
3675 | dp_packet_size(execute->packet) > UINT16_MAX) { | |
72865317 BP |
3676 | return EINVAL; |
3677 | } | |
3678 | ||
65f13b50 AW |
3679 | /* Tries finding the 'pmd'. If NULL is returned, that means |
3680 | * the current thread is a non-pmd thread and should use | |
b19befae | 3681 | * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */ |
65f13b50 AW |
3682 | pmd = ovsthread_getspecific(dp->per_pmd_key); |
3683 | if (!pmd) { | |
b19befae | 3684 | pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); |
546e57d4 DDP |
3685 | if (!pmd) { |
3686 | return EBUSY; | |
3687 | } | |
65f13b50 AW |
3688 | } |
3689 | ||
05267613 AZ |
3690 | if (execute->probe) { |
3691 | /* If this is part of a probe, Drop the packet, since executing | |
3692 | * the action may actually cause spurious packets be sent into | |
3693 | * the network. */ | |
d1ce9c20 YS |
3694 | if (pmd->core_id == NON_PMD_CORE_ID) { |
3695 | dp_netdev_pmd_unref(pmd); | |
3696 | } | |
05267613 AZ |
3697 | return 0; |
3698 | } | |
3699 | ||
65f13b50 AW |
3700 | /* If the current thread is non-pmd thread, acquires |
3701 | * the 'non_pmd_mutex'. */ | |
3702 | if (pmd->core_id == NON_PMD_CORE_ID) { | |
3703 | ovs_mutex_lock(&dp->non_pmd_mutex); | |
3704 | } | |
1c1e46ed | 3705 | |
2fbadeb6 IM |
3706 | /* Update current time in PMD context. We don't care about EMC insertion |
3707 | * probability, because we are on a slow path. */ | |
b010be17 IM |
3708 | pmd_thread_ctx_time_update(pmd); |
3709 | ||
36d8de17 DDP |
3710 | /* The action processing expects the RSS hash to be valid, because |
3711 | * it's always initialized at the beginning of datapath processing. | |
3712 | * In this case, though, 'execute->packet' may not have gone through | |
3713 | * the datapath at all, it may have been generated by the upper layer | |
3714 | * (OpenFlow packet-out, BFD frame, ...). */ | |
3715 | if (!dp_packet_rss_valid(execute->packet)) { | |
3716 | dp_packet_set_rss_hash(execute->packet, | |
3717 | flow_hash_5tuple(execute->flow, 0)); | |
3718 | } | |
3719 | ||
72c84bc2 | 3720 | dp_packet_batch_init_packet(&pp, execute->packet); |
9f17f104 | 3721 | pp.do_not_steal = true; |
66e4ad8a | 3722 | dp_netdev_execute_actions(pmd, &pp, false, execute->flow, |
b010be17 | 3723 | execute->actions, execute->actions_len); |
c71ea3c4 | 3724 | dp_netdev_pmd_flush_output_packets(pmd, true); |
36d8de17 | 3725 | |
65f13b50 AW |
3726 | if (pmd->core_id == NON_PMD_CORE_ID) { |
3727 | ovs_mutex_unlock(&dp->non_pmd_mutex); | |
e9985d6a | 3728 | dp_netdev_pmd_unref(pmd); |
65f13b50 | 3729 | } |
8a4e3a85 | 3730 | |
758c456d | 3731 | return 0; |
72865317 BP |
3732 | } |
3733 | ||
1a0c894a | 3734 | static void |
57924fc9 SB |
3735 | dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops, |
3736 | enum dpif_offload_type offload_type OVS_UNUSED) | |
1a0c894a BP |
3737 | { |
3738 | size_t i; | |
3739 | ||
3740 | for (i = 0; i < n_ops; i++) { | |
3741 | struct dpif_op *op = ops[i]; | |
3742 | ||
3743 | switch (op->type) { | |
3744 | case DPIF_OP_FLOW_PUT: | |
fa37affa | 3745 | op->error = dpif_netdev_flow_put(dpif, &op->flow_put); |
1a0c894a BP |
3746 | break; |
3747 | ||
3748 | case DPIF_OP_FLOW_DEL: | |
fa37affa | 3749 | op->error = dpif_netdev_flow_del(dpif, &op->flow_del); |
1a0c894a BP |
3750 | break; |
3751 | ||
3752 | case DPIF_OP_EXECUTE: | |
fa37affa | 3753 | op->error = dpif_netdev_execute(dpif, &op->execute); |
1a0c894a | 3754 | break; |
6fe09f8c JS |
3755 | |
3756 | case DPIF_OP_FLOW_GET: | |
fa37affa | 3757 | op->error = dpif_netdev_flow_get(dpif, &op->flow_get); |
6fe09f8c | 3758 | break; |
1a0c894a BP |
3759 | } |
3760 | } | |
3761 | } | |
3762 | ||
5bf84282 NK |
3763 | /* Enable or Disable PMD auto load balancing. */ |
3764 | static void | |
3765 | set_pmd_auto_lb(struct dp_netdev *dp) | |
3766 | { | |
3767 | unsigned int cnt = 0; | |
3768 | struct dp_netdev_pmd_thread *pmd; | |
3769 | struct pmd_auto_lb *pmd_alb = &dp->pmd_alb; | |
3770 | ||
3771 | bool enable_alb = false; | |
3772 | bool multi_rxq = false; | |
3773 | bool pmd_rxq_assign_cyc = dp->pmd_rxq_assign_cyc; | |
3774 | ||
3775 | /* Ensure that there is at least 2 non-isolated PMDs and | |
3776 | * one of them is polling more than one rxq. */ | |
3777 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
3778 | if (pmd->core_id == NON_PMD_CORE_ID || pmd->isolated) { | |
3779 | continue; | |
3780 | } | |
3781 | ||
3782 | if (hmap_count(&pmd->poll_list) > 1) { | |
3783 | multi_rxq = true; | |
3784 | } | |
3785 | if (cnt && multi_rxq) { | |
3786 | enable_alb = true; | |
3787 | break; | |
3788 | } | |
3789 | cnt++; | |
3790 | } | |
3791 | ||
3792 | /* Enable auto LB if it is requested and cycle based assignment is true. */ | |
3793 | enable_alb = enable_alb && pmd_rxq_assign_cyc && | |
3794 | pmd_alb->auto_lb_requested; | |
3795 | ||
3796 | if (pmd_alb->is_enabled != enable_alb) { | |
3797 | pmd_alb->is_enabled = enable_alb; | |
3798 | if (pmd_alb->is_enabled) { | |
3799 | VLOG_INFO("PMD auto load balance is enabled " | |
3800 | "(with rebalance interval:%"PRIu64" msec)", | |
3801 | pmd_alb->rebalance_intvl); | |
3802 | } else { | |
3803 | pmd_alb->rebalance_poll_timer = 0; | |
3804 | VLOG_INFO("PMD auto load balance is disabled"); | |
3805 | } | |
3806 | } | |
3807 | ||
3808 | } | |
3809 | ||
d4f6865c DDP |
3810 | /* Applies datapath configuration from the database. Some of the changes are |
3811 | * actually applied in dpif_netdev_run(). */ | |
f2eee189 | 3812 | static int |
d4f6865c | 3813 | dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config) |
f2eee189 AW |
3814 | { |
3815 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
d4f6865c | 3816 | const char *cmask = smap_get(other_config, "pmd-cpu-mask"); |
e77c97b9 KT |
3817 | const char *pmd_rxq_assign = smap_get_def(other_config, "pmd-rxq-assign", |
3818 | "cycles"); | |
4c30b246 CL |
3819 | unsigned long long insert_prob = |
3820 | smap_get_ullong(other_config, "emc-insert-inv-prob", | |
3821 | DEFAULT_EM_FLOW_INSERT_INV_PROB); | |
3822 | uint32_t insert_min, cur_min; | |
c71ea3c4 | 3823 | uint32_t tx_flush_interval, cur_tx_flush_interval; |
5bf84282 | 3824 | uint64_t rebalance_intvl; |
c71ea3c4 IM |
3825 | |
3826 | tx_flush_interval = smap_get_int(other_config, "tx-flush-interval", | |
3827 | DEFAULT_TX_FLUSH_INTERVAL); | |
3828 | atomic_read_relaxed(&dp->tx_flush_interval, &cur_tx_flush_interval); | |
3829 | if (tx_flush_interval != cur_tx_flush_interval) { | |
3830 | atomic_store_relaxed(&dp->tx_flush_interval, tx_flush_interval); | |
3831 | VLOG_INFO("Flushing interval for tx queues set to %"PRIu32" us", | |
3832 | tx_flush_interval); | |
3833 | } | |
f2eee189 | 3834 | |
a6a426d6 IM |
3835 | if (!nullable_string_is_equal(dp->pmd_cmask, cmask)) { |
3836 | free(dp->pmd_cmask); | |
3837 | dp->pmd_cmask = nullable_xstrdup(cmask); | |
3838 | dp_netdev_request_reconfigure(dp); | |
f2eee189 AW |
3839 | } |
3840 | ||
4c30b246 CL |
3841 | atomic_read_relaxed(&dp->emc_insert_min, &cur_min); |
3842 | if (insert_prob <= UINT32_MAX) { | |
3843 | insert_min = insert_prob == 0 ? 0 : UINT32_MAX / insert_prob; | |
3844 | } else { | |
3845 | insert_min = DEFAULT_EM_FLOW_INSERT_MIN; | |
3846 | insert_prob = DEFAULT_EM_FLOW_INSERT_INV_PROB; | |
3847 | } | |
3848 | ||
3849 | if (insert_min != cur_min) { | |
3850 | atomic_store_relaxed(&dp->emc_insert_min, insert_min); | |
3851 | if (insert_min == 0) { | |
2fbadeb6 | 3852 | VLOG_INFO("EMC insertion probability changed to zero"); |
4c30b246 CL |
3853 | } else { |
3854 | VLOG_INFO("EMC insertion probability changed to 1/%llu (~%.2f%%)", | |
3855 | insert_prob, (100 / (float)insert_prob)); | |
3856 | } | |
3857 | } | |
3858 | ||
79f36875 JS |
3859 | bool perf_enabled = smap_get_bool(other_config, "pmd-perf-metrics", false); |
3860 | bool cur_perf_enabled; | |
3861 | atomic_read_relaxed(&dp->pmd_perf_metrics, &cur_perf_enabled); | |
3862 | if (perf_enabled != cur_perf_enabled) { | |
3863 | atomic_store_relaxed(&dp->pmd_perf_metrics, perf_enabled); | |
3864 | if (perf_enabled) { | |
3865 | VLOG_INFO("PMD performance metrics collection enabled"); | |
3866 | } else { | |
3867 | VLOG_INFO("PMD performance metrics collection disabled"); | |
3868 | } | |
3869 | } | |
3870 | ||
60d8ccae YW |
3871 | bool smc_enable = smap_get_bool(other_config, "smc-enable", false); |
3872 | bool cur_smc; | |
3873 | atomic_read_relaxed(&dp->smc_enable_db, &cur_smc); | |
3874 | if (smc_enable != cur_smc) { | |
3875 | atomic_store_relaxed(&dp->smc_enable_db, smc_enable); | |
3876 | if (smc_enable) { | |
3877 | VLOG_INFO("SMC cache is enabled"); | |
3878 | } else { | |
3879 | VLOG_INFO("SMC cache is disabled"); | |
3880 | } | |
3881 | } | |
e77c97b9 KT |
3882 | |
3883 | bool pmd_rxq_assign_cyc = !strcmp(pmd_rxq_assign, "cycles"); | |
3884 | if (!pmd_rxq_assign_cyc && strcmp(pmd_rxq_assign, "roundrobin")) { | |
3885 | VLOG_WARN("Unsupported Rxq to PMD assignment mode in pmd-rxq-assign. " | |
3886 | "Defaulting to 'cycles'."); | |
3887 | pmd_rxq_assign_cyc = true; | |
3888 | pmd_rxq_assign = "cycles"; | |
3889 | } | |
3890 | if (dp->pmd_rxq_assign_cyc != pmd_rxq_assign_cyc) { | |
3891 | dp->pmd_rxq_assign_cyc = pmd_rxq_assign_cyc; | |
3892 | VLOG_INFO("Rxq to PMD assignment mode changed to: \'%s\'.", | |
3893 | pmd_rxq_assign); | |
3894 | dp_netdev_request_reconfigure(dp); | |
3895 | } | |
5bf84282 NK |
3896 | |
3897 | struct pmd_auto_lb *pmd_alb = &dp->pmd_alb; | |
3898 | pmd_alb->auto_lb_requested = smap_get_bool(other_config, "pmd-auto-lb", | |
3899 | false); | |
3900 | ||
3901 | rebalance_intvl = smap_get_int(other_config, "pmd-auto-lb-rebal-interval", | |
3902 | ALB_PMD_REBALANCE_POLL_INTERVAL); | |
3903 | ||
3904 | /* Input is in min, convert it to msec. */ | |
3905 | rebalance_intvl = | |
3906 | rebalance_intvl ? rebalance_intvl * MIN_TO_MSEC : MIN_TO_MSEC; | |
3907 | ||
3908 | if (pmd_alb->rebalance_intvl != rebalance_intvl) { | |
3909 | pmd_alb->rebalance_intvl = rebalance_intvl; | |
3910 | } | |
3911 | ||
3912 | set_pmd_auto_lb(dp); | |
f2eee189 AW |
3913 | return 0; |
3914 | } | |
3915 | ||
3eb67853 IM |
3916 | /* Parses affinity list and returns result in 'core_ids'. */ |
3917 | static int | |
3918 | parse_affinity_list(const char *affinity_list, unsigned *core_ids, int n_rxq) | |
3919 | { | |
3920 | unsigned i; | |
3921 | char *list, *copy, *key, *value; | |
3922 | int error = 0; | |
3923 | ||
3924 | for (i = 0; i < n_rxq; i++) { | |
51c37a56 | 3925 | core_ids[i] = OVS_CORE_UNSPEC; |
3eb67853 IM |
3926 | } |
3927 | ||
3928 | if (!affinity_list) { | |
3929 | return 0; | |
3930 | } | |
3931 | ||
3932 | list = copy = xstrdup(affinity_list); | |
3933 | ||
3934 | while (ofputil_parse_key_value(&list, &key, &value)) { | |
3935 | int rxq_id, core_id; | |
3936 | ||
3937 | if (!str_to_int(key, 0, &rxq_id) || rxq_id < 0 | |
3938 | || !str_to_int(value, 0, &core_id) || core_id < 0) { | |
3939 | error = EINVAL; | |
3940 | break; | |
3941 | } | |
3942 | ||
3943 | if (rxq_id < n_rxq) { | |
3944 | core_ids[rxq_id] = core_id; | |
3945 | } | |
3946 | } | |
3947 | ||
3948 | free(copy); | |
3949 | return error; | |
3950 | } | |
3951 | ||
3952 | /* Parses 'affinity_list' and applies configuration if it is valid. */ | |
3953 | static int | |
3954 | dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port *port, | |
3955 | const char *affinity_list) | |
3956 | { | |
3957 | unsigned *core_ids, i; | |
3958 | int error = 0; | |
3959 | ||
3960 | core_ids = xmalloc(port->n_rxq * sizeof *core_ids); | |
3961 | if (parse_affinity_list(affinity_list, core_ids, port->n_rxq)) { | |
3962 | error = EINVAL; | |
3963 | goto exit; | |
3964 | } | |
3965 | ||
3966 | for (i = 0; i < port->n_rxq; i++) { | |
3967 | port->rxqs[i].core_id = core_ids[i]; | |
3968 | } | |
3969 | ||
3970 | exit: | |
3971 | free(core_ids); | |
3972 | return error; | |
3973 | } | |
3974 | ||
2fbadeb6 IM |
3975 | /* Returns 'true' if one of the 'port's RX queues exists in 'poll_list' |
3976 | * of given PMD thread. */ | |
3977 | static bool | |
3978 | dpif_netdev_pmd_polls_port(struct dp_netdev_pmd_thread *pmd, | |
3979 | struct dp_netdev_port *port) | |
3980 | OVS_EXCLUDED(pmd->port_mutex) | |
3981 | { | |
3982 | struct rxq_poll *poll; | |
3983 | bool found = false; | |
3984 | ||
3985 | ovs_mutex_lock(&pmd->port_mutex); | |
3986 | HMAP_FOR_EACH (poll, node, &pmd->poll_list) { | |
3987 | if (port == poll->rxq->port) { | |
3988 | found = true; | |
3989 | break; | |
3990 | } | |
3991 | } | |
3992 | ovs_mutex_unlock(&pmd->port_mutex); | |
3993 | return found; | |
3994 | } | |
3995 | ||
3996 | /* Updates port configuration from the database. The changes are actually | |
3997 | * applied in dpif_netdev_run(). */ | |
3eb67853 IM |
3998 | static int |
3999 | dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no, | |
4000 | const struct smap *cfg) | |
4001 | { | |
4002 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
4003 | struct dp_netdev_port *port; | |
4004 | int error = 0; | |
4005 | const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity"); | |
2fbadeb6 | 4006 | bool emc_enabled = smap_get_bool(cfg, "emc-enable", true); |
3eb67853 IM |
4007 | |
4008 | ovs_mutex_lock(&dp->port_mutex); | |
4009 | error = get_port_by_number(dp, port_no, &port); | |
2fbadeb6 IM |
4010 | if (error) { |
4011 | goto unlock; | |
4012 | } | |
4013 | ||
4014 | if (emc_enabled != port->emc_enabled) { | |
4015 | struct dp_netdev_pmd_thread *pmd; | |
4016 | struct ds ds = DS_EMPTY_INITIALIZER; | |
4017 | uint32_t cur_min, insert_prob; | |
4018 | ||
4019 | port->emc_enabled = emc_enabled; | |
4020 | /* Mark for reload all the threads that polls this port and request | |
4021 | * for reconfiguration for the actual reloading of threads. */ | |
4022 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
4023 | if (dpif_netdev_pmd_polls_port(pmd, port)) { | |
4024 | pmd->need_reload = true; | |
4025 | } | |
4026 | } | |
4027 | dp_netdev_request_reconfigure(dp); | |
4028 | ||
4029 | ds_put_format(&ds, "%s: EMC has been %s.", | |
4030 | netdev_get_name(port->netdev), | |
4031 | (emc_enabled) ? "enabled" : "disabled"); | |
4032 | if (emc_enabled) { | |
4033 | ds_put_cstr(&ds, " Current insertion probability is "); | |
4034 | atomic_read_relaxed(&dp->emc_insert_min, &cur_min); | |
4035 | if (!cur_min) { | |
4036 | ds_put_cstr(&ds, "zero."); | |
4037 | } else { | |
4038 | insert_prob = UINT32_MAX / cur_min; | |
4039 | ds_put_format(&ds, "1/%"PRIu32" (~%.2f%%).", | |
4040 | insert_prob, 100 / (float) insert_prob); | |
4041 | } | |
4042 | } | |
4043 | VLOG_INFO("%s", ds_cstr(&ds)); | |
4044 | ds_destroy(&ds); | |
4045 | } | |
4046 | ||
4047 | /* Checking for RXq affinity changes. */ | |
4048 | if (!netdev_is_pmd(port->netdev) | |
3eb67853 IM |
4049 | || nullable_string_is_equal(affinity_list, port->rxq_affinity_list)) { |
4050 | goto unlock; | |
4051 | } | |
4052 | ||
4053 | error = dpif_netdev_port_set_rxq_affinity(port, affinity_list); | |
4054 | if (error) { | |
4055 | goto unlock; | |
4056 | } | |
4057 | free(port->rxq_affinity_list); | |
4058 | port->rxq_affinity_list = nullable_xstrdup(affinity_list); | |
4059 | ||
4060 | dp_netdev_request_reconfigure(dp); | |
4061 | unlock: | |
4062 | ovs_mutex_unlock(&dp->port_mutex); | |
4063 | return error; | |
4064 | } | |
4065 | ||
5bf93d67 EJ |
4066 | static int |
4067 | dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED, | |
4068 | uint32_t queue_id, uint32_t *priority) | |
4069 | { | |
4070 | *priority = queue_id; | |
4071 | return 0; | |
4072 | } | |
4073 | ||
72865317 | 4074 | \f |
9ff55ae2 | 4075 | /* Creates and returns a new 'struct dp_netdev_actions', whose actions are |
1401f6de | 4076 | * a copy of the 'size' bytes of 'actions' input parameters. */ |
a84cb64a BP |
4077 | struct dp_netdev_actions * |
4078 | dp_netdev_actions_create(const struct nlattr *actions, size_t size) | |
4079 | { | |
4080 | struct dp_netdev_actions *netdev_actions; | |
4081 | ||
9ff55ae2 DDP |
4082 | netdev_actions = xmalloc(sizeof *netdev_actions + size); |
4083 | memcpy(netdev_actions->actions, actions, size); | |
a84cb64a BP |
4084 | netdev_actions->size = size; |
4085 | ||
4086 | return netdev_actions; | |
4087 | } | |
4088 | ||
a84cb64a | 4089 | struct dp_netdev_actions * |
61e7deb1 | 4090 | dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow) |
a84cb64a | 4091 | { |
61e7deb1 | 4092 | return ovsrcu_get(struct dp_netdev_actions *, &flow->actions); |
a84cb64a BP |
4093 | } |
4094 | ||
61e7deb1 BP |
4095 | static void |
4096 | dp_netdev_actions_free(struct dp_netdev_actions *actions) | |
a84cb64a | 4097 | { |
61e7deb1 | 4098 | free(actions); |
a84cb64a BP |
4099 | } |
4100 | \f | |
a19896ab JS |
4101 | static void |
4102 | dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx, | |
4103 | enum rxq_cycles_counter_type type, | |
4104 | unsigned long long cycles) | |
a2ac666d | 4105 | { |
a19896ab | 4106 | atomic_store_relaxed(&rx->cycles[type], cycles); |
a2ac666d CL |
4107 | } |
4108 | ||
4809891b | 4109 | static void |
a19896ab | 4110 | dp_netdev_rxq_add_cycles(struct dp_netdev_rxq *rx, |
4809891b KT |
4111 | enum rxq_cycles_counter_type type, |
4112 | unsigned long long cycles) | |
4113 | { | |
a19896ab | 4114 | non_atomic_ullong_add(&rx->cycles[type], cycles); |
4809891b KT |
4115 | } |
4116 | ||
4117 | static uint64_t | |
4118 | dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx, | |
4119 | enum rxq_cycles_counter_type type) | |
4120 | { | |
4121 | unsigned long long processing_cycles; | |
4122 | atomic_read_relaxed(&rx->cycles[type], &processing_cycles); | |
4123 | return processing_cycles; | |
4124 | } | |
4125 | ||
4126 | static void | |
4127 | dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx, | |
4128 | unsigned long long cycles) | |
4129 | { | |
4ee87ad3 BP |
4130 | unsigned int idx = rx->intrvl_idx++ % PMD_RXQ_INTERVAL_MAX; |
4131 | atomic_store_relaxed(&rx->cycles_intrvl[idx], cycles); | |
4809891b KT |
4132 | } |
4133 | ||
655856ef KT |
4134 | static uint64_t |
4135 | dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx) | |
4136 | { | |
4137 | unsigned long long processing_cycles; | |
4138 | atomic_read_relaxed(&rx->cycles_intrvl[idx], &processing_cycles); | |
4139 | return processing_cycles; | |
4140 | } | |
4141 | ||
79f36875 JS |
4142 | #if ATOMIC_ALWAYS_LOCK_FREE_8B |
4143 | static inline bool | |
4144 | pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread *pmd) | |
4145 | { | |
4146 | bool pmd_perf_enabled; | |
4147 | atomic_read_relaxed(&pmd->dp->pmd_perf_metrics, &pmd_perf_enabled); | |
4148 | return pmd_perf_enabled; | |
4149 | } | |
4150 | #else | |
4151 | /* If stores and reads of 64-bit integers are not atomic, the full PMD | |
4152 | * performance metrics are not available as locked access to 64 bit | |
4153 | * integers would be prohibitively expensive. */ | |
4154 | static inline bool | |
4155 | pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread *pmd OVS_UNUSED) | |
4156 | { | |
4157 | return false; | |
4158 | } | |
4159 | #endif | |
4160 | ||
c71ea3c4 | 4161 | static int |
009e0033 IM |
4162 | dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread *pmd, |
4163 | struct tx_port *p) | |
4164 | { | |
58ed6df0 | 4165 | int i; |
009e0033 | 4166 | int tx_qid; |
cc4891f3 | 4167 | int output_cnt; |
009e0033 | 4168 | bool dynamic_txqs; |
58ed6df0 IM |
4169 | struct cycle_timer timer; |
4170 | uint64_t cycles; | |
c71ea3c4 | 4171 | uint32_t tx_flush_interval; |
58ed6df0 IM |
4172 | |
4173 | cycle_timer_start(&pmd->perf_stats, &timer); | |
009e0033 IM |
4174 | |
4175 | dynamic_txqs = p->port->dynamic_txqs; | |
4176 | if (dynamic_txqs) { | |
4177 | tx_qid = dpif_netdev_xps_get_tx_qid(pmd, p); | |
4178 | } else { | |
4179 | tx_qid = pmd->static_tx_qid; | |
4180 | } | |
4181 | ||
cc4891f3 | 4182 | output_cnt = dp_packet_batch_size(&p->output_pkts); |
58ed6df0 | 4183 | ovs_assert(output_cnt > 0); |
cc4891f3 | 4184 | |
b30896c9 | 4185 | netdev_send(p->port->netdev, tx_qid, &p->output_pkts, dynamic_txqs); |
009e0033 | 4186 | dp_packet_batch_init(&p->output_pkts); |
cc4891f3 | 4187 | |
c71ea3c4 IM |
4188 | /* Update time of the next flush. */ |
4189 | atomic_read_relaxed(&pmd->dp->tx_flush_interval, &tx_flush_interval); | |
4190 | p->flush_time = pmd->ctx.now + tx_flush_interval; | |
4191 | ||
4192 | ovs_assert(pmd->n_output_batches > 0); | |
4193 | pmd->n_output_batches--; | |
4194 | ||
82a48ead JS |
4195 | pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SENT_PKTS, output_cnt); |
4196 | pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SENT_BATCHES, 1); | |
58ed6df0 IM |
4197 | |
4198 | /* Distribute send cycles evenly among transmitted packets and assign to | |
4199 | * their respective rx queues. */ | |
4200 | cycles = cycle_timer_stop(&pmd->perf_stats, &timer) / output_cnt; | |
4201 | for (i = 0; i < output_cnt; i++) { | |
4202 | if (p->output_pkts_rxqs[i]) { | |
4203 | dp_netdev_rxq_add_cycles(p->output_pkts_rxqs[i], | |
4204 | RXQ_CYCLES_PROC_CURR, cycles); | |
4205 | } | |
4206 | } | |
c71ea3c4 IM |
4207 | |
4208 | return output_cnt; | |
009e0033 IM |
4209 | } |
4210 | ||
c71ea3c4 IM |
4211 | static int |
4212 | dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread *pmd, | |
4213 | bool force) | |
009e0033 IM |
4214 | { |
4215 | struct tx_port *p; | |
c71ea3c4 IM |
4216 | int output_cnt = 0; |
4217 | ||
4218 | if (!pmd->n_output_batches) { | |
4219 | return 0; | |
4220 | } | |
009e0033 IM |
4221 | |
4222 | HMAP_FOR_EACH (p, node, &pmd->send_port_cache) { | |
c71ea3c4 IM |
4223 | if (!dp_packet_batch_is_empty(&p->output_pkts) |
4224 | && (force || pmd->ctx.now >= p->flush_time)) { | |
4225 | output_cnt += dp_netdev_pmd_flush_output_on_port(pmd, p); | |
009e0033 IM |
4226 | } |
4227 | } | |
c71ea3c4 | 4228 | return output_cnt; |
009e0033 IM |
4229 | } |
4230 | ||
a2ac666d | 4231 | static int |
65f13b50 | 4232 | dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, |
a19896ab | 4233 | struct dp_netdev_rxq *rxq, |
947dc567 | 4234 | odp_port_t port_no) |
e4cfed38 | 4235 | { |
79f36875 | 4236 | struct pmd_perf_stats *s = &pmd->perf_stats; |
1895cc8d | 4237 | struct dp_packet_batch batch; |
a19896ab | 4238 | struct cycle_timer timer; |
1895cc8d | 4239 | int error; |
79f36875 JS |
4240 | int batch_cnt = 0; |
4241 | int rem_qlen = 0, *qlen_p = NULL; | |
58ed6df0 | 4242 | uint64_t cycles; |
e4cfed38 | 4243 | |
a19896ab JS |
4244 | /* Measure duration for polling and processing rx burst. */ |
4245 | cycle_timer_start(&pmd->perf_stats, &timer); | |
58ed6df0 IM |
4246 | |
4247 | pmd->ctx.last_rxq = rxq; | |
1895cc8d | 4248 | dp_packet_batch_init(&batch); |
58ed6df0 | 4249 | |
79f36875 JS |
4250 | /* Fetch the rx queue length only for vhostuser ports. */ |
4251 | if (pmd_perf_metrics_enabled(pmd) && rxq->is_vhost) { | |
4252 | qlen_p = &rem_qlen; | |
4253 | } | |
4254 | ||
4255 | error = netdev_rxq_recv(rxq->rx, &batch, qlen_p); | |
e4cfed38 | 4256 | if (!error) { |
a19896ab | 4257 | /* At least one packet received. */ |
3c33f0ff | 4258 | *recirc_depth_get() = 0; |
009e0033 | 4259 | pmd_thread_ctx_time_update(pmd); |
a2ac666d | 4260 | batch_cnt = batch.count; |
79f36875 JS |
4261 | if (pmd_perf_metrics_enabled(pmd)) { |
4262 | /* Update batch histogram. */ | |
4263 | s->current.batches++; | |
4264 | histogram_add_sample(&s->pkts_per_batch, batch_cnt); | |
4265 | /* Update the maximum vhost rx queue fill level. */ | |
4266 | if (rxq->is_vhost && rem_qlen >= 0) { | |
4267 | uint32_t qfill = batch_cnt + rem_qlen; | |
4268 | if (qfill > s->current.max_vhost_qfill) { | |
4269 | s->current.max_vhost_qfill = qfill; | |
4270 | } | |
4271 | } | |
4272 | } | |
4273 | /* Process packet batch. */ | |
947dc567 | 4274 | dp_netdev_input(pmd, &batch, port_no); |
e4cfed38 | 4275 | |
a19896ab | 4276 | /* Assign processing cycles to rx queue. */ |
58ed6df0 | 4277 | cycles = cycle_timer_stop(&pmd->perf_stats, &timer); |
a19896ab JS |
4278 | dp_netdev_rxq_add_cycles(rxq, RXQ_CYCLES_PROC_CURR, cycles); |
4279 | ||
79f36875 | 4280 | dp_netdev_pmd_flush_output_packets(pmd, false); |
a19896ab JS |
4281 | } else { |
4282 | /* Discard cycles. */ | |
4283 | cycle_timer_stop(&pmd->perf_stats, &timer); | |
4284 | if (error != EAGAIN && error != EOPNOTSUPP) { | |
4285 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); | |
4286 | ||
4287 | VLOG_ERR_RL(&rl, "error receiving data from %s: %s", | |
4288 | netdev_rxq_get_name(rxq->rx), ovs_strerror(error)); | |
4289 | } | |
e4cfed38 | 4290 | } |
a2ac666d | 4291 | |
58ed6df0 IM |
4292 | pmd->ctx.last_rxq = NULL; |
4293 | ||
79f36875 | 4294 | return batch_cnt; |
e4cfed38 PS |
4295 | } |
4296 | ||
e32971b8 DDP |
4297 | static struct tx_port * |
4298 | tx_port_lookup(const struct hmap *hmap, odp_port_t port_no) | |
4299 | { | |
4300 | struct tx_port *tx; | |
4301 | ||
4302 | HMAP_FOR_EACH_IN_BUCKET (tx, node, hash_port_no(port_no), hmap) { | |
4303 | if (tx->port->port_no == port_no) { | |
4304 | return tx; | |
4305 | } | |
4306 | } | |
4307 | ||
4308 | return NULL; | |
4309 | } | |
4310 | ||
dc36593c DDP |
4311 | static int |
4312 | port_reconfigure(struct dp_netdev_port *port) | |
4313 | { | |
4314 | struct netdev *netdev = port->netdev; | |
dc36593c DDP |
4315 | int i, err; |
4316 | ||
dc36593c DDP |
4317 | /* Closes the existing 'rxq's. */ |
4318 | for (i = 0; i < port->n_rxq; i++) { | |
947dc567 DDP |
4319 | netdev_rxq_close(port->rxqs[i].rx); |
4320 | port->rxqs[i].rx = NULL; | |
dc36593c | 4321 | } |
4809891b | 4322 | unsigned last_nrxq = port->n_rxq; |
dc36593c DDP |
4323 | port->n_rxq = 0; |
4324 | ||
050c60bf | 4325 | /* Allows 'netdev' to apply the pending configuration changes. */ |
606f6650 | 4326 | if (netdev_is_reconf_required(netdev) || port->need_reconfigure) { |
e32971b8 DDP |
4327 | err = netdev_reconfigure(netdev); |
4328 | if (err && (err != EOPNOTSUPP)) { | |
4329 | VLOG_ERR("Failed to set interface %s new configuration", | |
4330 | netdev_get_name(netdev)); | |
4331 | return err; | |
4332 | } | |
dc36593c | 4333 | } |
050c60bf | 4334 | /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */ |
3eb67853 IM |
4335 | port->rxqs = xrealloc(port->rxqs, |
4336 | sizeof *port->rxqs * netdev_n_rxq(netdev)); | |
324c8374 IM |
4337 | /* Realloc 'used' counters for tx queues. */ |
4338 | free(port->txq_used); | |
4339 | port->txq_used = xcalloc(netdev_n_txq(netdev), sizeof *port->txq_used); | |
4340 | ||
dc36593c | 4341 | for (i = 0; i < netdev_n_rxq(netdev); i++) { |
38259bd7 BP |
4342 | bool new_queue = i >= last_nrxq; |
4343 | if (new_queue) { | |
4344 | memset(&port->rxqs[i], 0, sizeof port->rxqs[i]); | |
4345 | } | |
4346 | ||
947dc567 | 4347 | port->rxqs[i].port = port; |
79f36875 | 4348 | port->rxqs[i].is_vhost = !strncmp(port->type, "dpdkvhost", 9); |
38259bd7 | 4349 | |
947dc567 | 4350 | err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i); |
dc36593c DDP |
4351 | if (err) { |
4352 | return err; | |
4353 | } | |
4354 | port->n_rxq++; | |
4355 | } | |
4356 | ||
3eb67853 IM |
4357 | /* Parse affinity list to apply configuration for new queues. */ |
4358 | dpif_netdev_port_set_rxq_affinity(port, port->rxq_affinity_list); | |
4359 | ||
606f6650 EC |
4360 | /* If reconfiguration was successful mark it as such, so we can use it */ |
4361 | port->need_reconfigure = false; | |
4362 | ||
dc36593c DDP |
4363 | return 0; |
4364 | } | |
4365 | ||
e32971b8 DDP |
4366 | struct rr_numa_list { |
4367 | struct hmap numas; /* Contains 'struct rr_numa' */ | |
4368 | }; | |
4369 | ||
4370 | struct rr_numa { | |
4371 | struct hmap_node node; | |
4372 | ||
4373 | int numa_id; | |
4374 | ||
4375 | /* Non isolated pmds on numa node 'numa_id' */ | |
4376 | struct dp_netdev_pmd_thread **pmds; | |
4377 | int n_pmds; | |
4378 | ||
4379 | int cur_index; | |
79da1e41 | 4380 | bool idx_inc; |
e32971b8 DDP |
4381 | }; |
4382 | ||
4383 | static struct rr_numa * | |
4384 | rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id) | |
4385 | { | |
4386 | struct rr_numa *numa; | |
4387 | ||
4388 | HMAP_FOR_EACH_WITH_HASH (numa, node, hash_int(numa_id, 0), &rr->numas) { | |
4389 | if (numa->numa_id == numa_id) { | |
4390 | return numa; | |
4391 | } | |
4392 | } | |
4393 | ||
4394 | return NULL; | |
4395 | } | |
4396 | ||
c37813fd BM |
4397 | /* Returns the next node in numa list following 'numa' in round-robin fashion. |
4398 | * Returns first node if 'numa' is a null pointer or the last node in 'rr'. | |
4399 | * Returns NULL if 'rr' numa list is empty. */ | |
4400 | static struct rr_numa * | |
4401 | rr_numa_list_next(struct rr_numa_list *rr, const struct rr_numa *numa) | |
4402 | { | |
4403 | struct hmap_node *node = NULL; | |
4404 | ||
4405 | if (numa) { | |
4406 | node = hmap_next(&rr->numas, &numa->node); | |
4407 | } | |
4408 | if (!node) { | |
4409 | node = hmap_first(&rr->numas); | |
4410 | } | |
4411 | ||
4412 | return (node) ? CONTAINER_OF(node, struct rr_numa, node) : NULL; | |
4413 | } | |
4414 | ||
e32971b8 DDP |
4415 | static void |
4416 | rr_numa_list_populate(struct dp_netdev *dp, struct rr_numa_list *rr) | |
4417 | { | |
4418 | struct dp_netdev_pmd_thread *pmd; | |
4419 | struct rr_numa *numa; | |
4420 | ||
4421 | hmap_init(&rr->numas); | |
4422 | ||
4423 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
4424 | if (pmd->core_id == NON_PMD_CORE_ID || pmd->isolated) { | |
4425 | continue; | |
4426 | } | |
4427 | ||
4428 | numa = rr_numa_list_lookup(rr, pmd->numa_id); | |
4429 | if (!numa) { | |
4430 | numa = xzalloc(sizeof *numa); | |
4431 | numa->numa_id = pmd->numa_id; | |
4432 | hmap_insert(&rr->numas, &numa->node, hash_int(pmd->numa_id, 0)); | |
4433 | } | |
4434 | numa->n_pmds++; | |
4435 | numa->pmds = xrealloc(numa->pmds, numa->n_pmds * sizeof *numa->pmds); | |
4436 | numa->pmds[numa->n_pmds - 1] = pmd; | |
79da1e41 KT |
4437 | /* At least one pmd so initialise curr_idx and idx_inc. */ |
4438 | numa->cur_index = 0; | |
4439 | numa->idx_inc = true; | |
e32971b8 DDP |
4440 | } |
4441 | } | |
4442 | ||
e77c97b9 KT |
4443 | /* |
4444 | * Returns the next pmd from the numa node. | |
4445 | * | |
4446 | * If 'updown' is 'true' it will alternate between selecting the next pmd in | |
4447 | * either an up or down walk, switching between up/down when the first or last | |
4448 | * core is reached. e.g. 1,2,3,3,2,1,1,2... | |
4449 | * | |
4450 | * If 'updown' is 'false' it will select the next pmd wrapping around when last | |
4451 | * core reached. e.g. 1,2,3,1,2,3,1,2... | |
4452 | */ | |
e32971b8 | 4453 | static struct dp_netdev_pmd_thread * |
e77c97b9 | 4454 | rr_numa_get_pmd(struct rr_numa *numa, bool updown) |
e32971b8 | 4455 | { |
79da1e41 KT |
4456 | int numa_idx = numa->cur_index; |
4457 | ||
4458 | if (numa->idx_inc == true) { | |
4459 | /* Incrementing through list of pmds. */ | |
4460 | if (numa->cur_index == numa->n_pmds-1) { | |
4461 | /* Reached the last pmd. */ | |
e77c97b9 KT |
4462 | if (updown) { |
4463 | numa->idx_inc = false; | |
4464 | } else { | |
4465 | numa->cur_index = 0; | |
4466 | } | |
79da1e41 KT |
4467 | } else { |
4468 | numa->cur_index++; | |
4469 | } | |
4470 | } else { | |
4471 | /* Decrementing through list of pmds. */ | |
4472 | if (numa->cur_index == 0) { | |
4473 | /* Reached the first pmd. */ | |
4474 | numa->idx_inc = true; | |
4475 | } else { | |
4476 | numa->cur_index--; | |
4477 | } | |
4478 | } | |
4479 | return numa->pmds[numa_idx]; | |
e32971b8 DDP |
4480 | } |
4481 | ||
4482 | static void | |
4483 | rr_numa_list_destroy(struct rr_numa_list *rr) | |
4484 | { | |
4485 | struct rr_numa *numa; | |
4486 | ||
4487 | HMAP_FOR_EACH_POP (numa, node, &rr->numas) { | |
4488 | free(numa->pmds); | |
4489 | free(numa); | |
4490 | } | |
4491 | hmap_destroy(&rr->numas); | |
4492 | } | |
4493 | ||
655856ef KT |
4494 | /* Sort Rx Queues by the processing cycles they are consuming. */ |
4495 | static int | |
cc131ac1 | 4496 | compare_rxq_cycles(const void *a, const void *b) |
655856ef | 4497 | { |
28080276 KT |
4498 | struct dp_netdev_rxq *qa; |
4499 | struct dp_netdev_rxq *qb; | |
8368866e | 4500 | uint64_t cycles_qa, cycles_qb; |
655856ef KT |
4501 | |
4502 | qa = *(struct dp_netdev_rxq **) a; | |
4503 | qb = *(struct dp_netdev_rxq **) b; | |
4504 | ||
8368866e KT |
4505 | cycles_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST); |
4506 | cycles_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST); | |
655856ef | 4507 | |
8368866e KT |
4508 | if (cycles_qa != cycles_qb) { |
4509 | return (cycles_qa < cycles_qb) ? 1 : -1; | |
a130f1a8 KT |
4510 | } else { |
4511 | /* Cycles are the same so tiebreak on port/queue id. | |
4512 | * Tiebreaking (as opposed to return 0) ensures consistent | |
4513 | * sort results across multiple OS's. */ | |
f0aa3801 BP |
4514 | uint32_t port_qa = odp_to_u32(qa->port->port_no); |
4515 | uint32_t port_qb = odp_to_u32(qb->port->port_no); | |
4516 | if (port_qa != port_qb) { | |
4517 | return port_qa > port_qb ? 1 : -1; | |
a130f1a8 KT |
4518 | } else { |
4519 | return netdev_rxq_get_queue_id(qa->rx) | |
4520 | - netdev_rxq_get_queue_id(qb->rx); | |
4521 | } | |
655856ef | 4522 | } |
655856ef KT |
4523 | } |
4524 | ||
e32971b8 DDP |
4525 | /* Assign pmds to queues. If 'pinned' is true, assign pmds to pinned |
4526 | * queues and marks the pmds as isolated. Otherwise, assign non isolated | |
4527 | * pmds to unpinned queues. | |
4528 | * | |
4529 | * The function doesn't touch the pmd threads, it just stores the assignment | |
4530 | * in the 'pmd' member of each rxq. */ | |
4531 | static void | |
4532 | rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex) | |
4533 | { | |
4534 | struct dp_netdev_port *port; | |
4535 | struct rr_numa_list rr; | |
c37813fd | 4536 | struct rr_numa *non_local_numa = NULL; |
655856ef | 4537 | struct dp_netdev_rxq ** rxqs = NULL; |
97bf8f47 | 4538 | int n_rxqs = 0; |
655856ef KT |
4539 | struct rr_numa *numa = NULL; |
4540 | int numa_id; | |
e77c97b9 | 4541 | bool assign_cyc = dp->pmd_rxq_assign_cyc; |
e32971b8 DDP |
4542 | |
4543 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
e32971b8 DDP |
4544 | if (!netdev_is_pmd(port->netdev)) { |
4545 | continue; | |
4546 | } | |
4547 | ||
e32971b8 DDP |
4548 | for (int qid = 0; qid < port->n_rxq; qid++) { |
4549 | struct dp_netdev_rxq *q = &port->rxqs[qid]; | |
4550 | ||
4551 | if (pinned && q->core_id != OVS_CORE_UNSPEC) { | |
4552 | struct dp_netdev_pmd_thread *pmd; | |
4553 | ||
4554 | pmd = dp_netdev_get_pmd(dp, q->core_id); | |
4555 | if (!pmd) { | |
4556 | VLOG_WARN("There is no PMD thread on core %d. Queue " | |
4557 | "%d on port \'%s\' will not be polled.", | |
4558 | q->core_id, qid, netdev_get_name(port->netdev)); | |
4559 | } else { | |
4560 | q->pmd = pmd; | |
4561 | pmd->isolated = true; | |
4562 | dp_netdev_pmd_unref(pmd); | |
4563 | } | |
4564 | } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) { | |
8368866e KT |
4565 | uint64_t cycle_hist = 0; |
4566 | ||
655856ef KT |
4567 | if (n_rxqs == 0) { |
4568 | rxqs = xmalloc(sizeof *rxqs); | |
e32971b8 | 4569 | } else { |
655856ef | 4570 | rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1)); |
e32971b8 | 4571 | } |
8368866e | 4572 | |
e77c97b9 KT |
4573 | if (assign_cyc) { |
4574 | /* Sum the queue intervals and store the cycle history. */ | |
4575 | for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) { | |
4576 | cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i); | |
4577 | } | |
4578 | dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST, | |
4579 | cycle_hist); | |
4580 | } | |
655856ef KT |
4581 | /* Store the queue. */ |
4582 | rxqs[n_rxqs++] = q; | |
e32971b8 DDP |
4583 | } |
4584 | } | |
4585 | } | |
4586 | ||
e77c97b9 | 4587 | if (n_rxqs > 1 && assign_cyc) { |
655856ef KT |
4588 | /* Sort the queues in order of the processing cycles |
4589 | * they consumed during their last pmd interval. */ | |
cc131ac1 | 4590 | qsort(rxqs, n_rxqs, sizeof *rxqs, compare_rxq_cycles); |
655856ef KT |
4591 | } |
4592 | ||
4593 | rr_numa_list_populate(dp, &rr); | |
4594 | /* Assign the sorted queues to pmds in round robin. */ | |
97bf8f47 | 4595 | for (int i = 0; i < n_rxqs; i++) { |
655856ef KT |
4596 | numa_id = netdev_get_numa_id(rxqs[i]->port->netdev); |
4597 | numa = rr_numa_list_lookup(&rr, numa_id); | |
4598 | if (!numa) { | |
4599 | /* There are no pmds on the queue's local NUMA node. | |
4600 | Round robin on the NUMA nodes that do have pmds. */ | |
4601 | non_local_numa = rr_numa_list_next(&rr, non_local_numa); | |
4602 | if (!non_local_numa) { | |
4603 | VLOG_ERR("There is no available (non-isolated) pmd " | |
4604 | "thread for port \'%s\' queue %d. This queue " | |
4605 | "will not be polled. Is pmd-cpu-mask set to " | |
4606 | "zero? Or are all PMDs isolated to other " | |
4607 | "queues?", netdev_rxq_get_name(rxqs[i]->rx), | |
4608 | netdev_rxq_get_queue_id(rxqs[i]->rx)); | |
4609 | continue; | |
4610 | } | |
e77c97b9 | 4611 | rxqs[i]->pmd = rr_numa_get_pmd(non_local_numa, assign_cyc); |
655856ef KT |
4612 | VLOG_WARN("There's no available (non-isolated) pmd thread " |
4613 | "on numa node %d. Queue %d on port \'%s\' will " | |
4614 | "be assigned to the pmd on core %d " | |
4615 | "(numa node %d). Expect reduced performance.", | |
4616 | numa_id, netdev_rxq_get_queue_id(rxqs[i]->rx), | |
4617 | netdev_rxq_get_name(rxqs[i]->rx), | |
4618 | rxqs[i]->pmd->core_id, rxqs[i]->pmd->numa_id); | |
4619 | } else { | |
e77c97b9 KT |
4620 | rxqs[i]->pmd = rr_numa_get_pmd(numa, assign_cyc); |
4621 | if (assign_cyc) { | |
4622 | VLOG_INFO("Core %d on numa node %d assigned port \'%s\' " | |
4623 | "rx queue %d " | |
4624 | "(measured processing cycles %"PRIu64").", | |
4625 | rxqs[i]->pmd->core_id, numa_id, | |
4626 | netdev_rxq_get_name(rxqs[i]->rx), | |
4627 | netdev_rxq_get_queue_id(rxqs[i]->rx), | |
4628 | dp_netdev_rxq_get_cycles(rxqs[i], | |
4629 | RXQ_CYCLES_PROC_HIST)); | |
4630 | } else { | |
4631 | VLOG_INFO("Core %d on numa node %d assigned port \'%s\' " | |
4632 | "rx queue %d.", rxqs[i]->pmd->core_id, numa_id, | |
4633 | netdev_rxq_get_name(rxqs[i]->rx), | |
4634 | netdev_rxq_get_queue_id(rxqs[i]->rx)); | |
4635 | } | |
655856ef KT |
4636 | } |
4637 | } | |
4638 | ||
e32971b8 | 4639 | rr_numa_list_destroy(&rr); |
655856ef | 4640 | free(rxqs); |
e32971b8 DDP |
4641 | } |
4642 | ||
140dd699 IM |
4643 | static void |
4644 | reload_affected_pmds(struct dp_netdev *dp) | |
4645 | { | |
4646 | struct dp_netdev_pmd_thread *pmd; | |
4647 | ||
4648 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
4649 | if (pmd->need_reload) { | |
241bad15 | 4650 | flow_mark_flush(pmd); |
140dd699 IM |
4651 | dp_netdev_reload_pmd__(pmd); |
4652 | pmd->need_reload = false; | |
4653 | } | |
4654 | } | |
4655 | } | |
4656 | ||
6e3c6fa4 DDP |
4657 | static void |
4658 | reconfigure_pmd_threads(struct dp_netdev *dp) | |
4659 | OVS_REQUIRES(dp->port_mutex) | |
4660 | { | |
e32971b8 DDP |
4661 | struct dp_netdev_pmd_thread *pmd; |
4662 | struct ovs_numa_dump *pmd_cores; | |
140dd699 IM |
4663 | struct ovs_numa_info_core *core; |
4664 | struct hmapx to_delete = HMAPX_INITIALIZER(&to_delete); | |
4665 | struct hmapx_node *node; | |
e32971b8 | 4666 | bool changed = false; |
140dd699 | 4667 | bool need_to_adjust_static_tx_qids = false; |
e32971b8 DDP |
4668 | |
4669 | /* The pmd threads should be started only if there's a pmd port in the | |
4670 | * datapath. If the user didn't provide any "pmd-cpu-mask", we start | |
4671 | * NR_PMD_THREADS per numa node. */ | |
4672 | if (!has_pmd_port(dp)) { | |
4673 | pmd_cores = ovs_numa_dump_n_cores_per_numa(0); | |
4674 | } else if (dp->pmd_cmask && dp->pmd_cmask[0]) { | |
4675 | pmd_cores = ovs_numa_dump_cores_with_cmask(dp->pmd_cmask); | |
4676 | } else { | |
4677 | pmd_cores = ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS); | |
4678 | } | |
4679 | ||
140dd699 IM |
4680 | /* We need to adjust 'static_tx_qid's only if we're reducing number of |
4681 | * PMD threads. Otherwise, new threads will allocate all the freed ids. */ | |
4682 | if (ovs_numa_dump_count(pmd_cores) < cmap_count(&dp->poll_threads) - 1) { | |
4683 | /* Adjustment is required to keep 'static_tx_qid's sequential and | |
4684 | * avoid possible issues, for example, imbalanced tx queue usage | |
4685 | * and unnecessary locking caused by remapping on netdev level. */ | |
4686 | need_to_adjust_static_tx_qids = true; | |
4687 | } | |
4688 | ||
4689 | /* Check for unwanted pmd threads */ | |
4690 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
4691 | if (pmd->core_id == NON_PMD_CORE_ID) { | |
4692 | continue; | |
4693 | } | |
4694 | if (!ovs_numa_dump_contains_core(pmd_cores, pmd->numa_id, | |
4695 | pmd->core_id)) { | |
4696 | hmapx_add(&to_delete, pmd); | |
4697 | } else if (need_to_adjust_static_tx_qids) { | |
4698 | pmd->need_reload = true; | |
e32971b8 DDP |
4699 | } |
4700 | } | |
4701 | ||
140dd699 IM |
4702 | HMAPX_FOR_EACH (node, &to_delete) { |
4703 | pmd = (struct dp_netdev_pmd_thread *) node->data; | |
4704 | VLOG_INFO("PMD thread on numa_id: %d, core id: %2d destroyed.", | |
4705 | pmd->numa_id, pmd->core_id); | |
4706 | dp_netdev_del_pmd(dp, pmd); | |
4707 | } | |
4708 | changed = !hmapx_is_empty(&to_delete); | |
4709 | hmapx_destroy(&to_delete); | |
e32971b8 | 4710 | |
140dd699 IM |
4711 | if (need_to_adjust_static_tx_qids) { |
4712 | /* 'static_tx_qid's are not sequential now. | |
4713 | * Reload remaining threads to fix this. */ | |
4714 | reload_affected_pmds(dp); | |
4715 | } | |
e32971b8 | 4716 | |
140dd699 IM |
4717 | /* Check for required new pmd threads */ |
4718 | FOR_EACH_CORE_ON_DUMP(core, pmd_cores) { | |
4719 | pmd = dp_netdev_get_pmd(dp, core->core_id); | |
4720 | if (!pmd) { | |
4721 | pmd = xzalloc(sizeof *pmd); | |
e32971b8 | 4722 | dp_netdev_configure_pmd(pmd, dp, core->core_id, core->numa_id); |
e32971b8 | 4723 | pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd); |
140dd699 IM |
4724 | VLOG_INFO("PMD thread on numa_id: %d, core id: %2d created.", |
4725 | pmd->numa_id, pmd->core_id); | |
4726 | changed = true; | |
4727 | } else { | |
4728 | dp_netdev_pmd_unref(pmd); | |
e32971b8 | 4729 | } |
140dd699 IM |
4730 | } |
4731 | ||
4732 | if (changed) { | |
4733 | struct ovs_numa_info_numa *numa; | |
e32971b8 DDP |
4734 | |
4735 | /* Log the number of pmd threads per numa node. */ | |
4736 | FOR_EACH_NUMA_ON_DUMP (numa, pmd_cores) { | |
140dd699 | 4737 | VLOG_INFO("There are %"PRIuSIZE" pmd threads on numa node %d", |
e32971b8 DDP |
4738 | numa->n_cores, numa->numa_id); |
4739 | } | |
4740 | } | |
4741 | ||
4742 | ovs_numa_dump_destroy(pmd_cores); | |
4743 | } | |
4744 | ||
e32971b8 DDP |
4745 | static void |
4746 | pmd_remove_stale_ports(struct dp_netdev *dp, | |
4747 | struct dp_netdev_pmd_thread *pmd) | |
4748 | OVS_EXCLUDED(pmd->port_mutex) | |
4749 | OVS_REQUIRES(dp->port_mutex) | |
4750 | { | |
4751 | struct rxq_poll *poll, *poll_next; | |
4752 | struct tx_port *tx, *tx_next; | |
4753 | ||
4754 | ovs_mutex_lock(&pmd->port_mutex); | |
4755 | HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) { | |
4756 | struct dp_netdev_port *port = poll->rxq->port; | |
4757 | ||
4758 | if (port->need_reconfigure | |
4759 | || !hmap_contains(&dp->ports, &port->node)) { | |
4760 | dp_netdev_del_rxq_from_pmd(pmd, poll); | |
4761 | } | |
4762 | } | |
4763 | HMAP_FOR_EACH_SAFE (tx, tx_next, node, &pmd->tx_ports) { | |
4764 | struct dp_netdev_port *port = tx->port; | |
4765 | ||
4766 | if (port->need_reconfigure | |
4767 | || !hmap_contains(&dp->ports, &port->node)) { | |
4768 | dp_netdev_del_port_tx_from_pmd(pmd, tx); | |
4769 | } | |
4770 | } | |
4771 | ovs_mutex_unlock(&pmd->port_mutex); | |
4772 | } | |
4773 | ||
4774 | /* Must be called each time a port is added/removed or the cmask changes. | |
4775 | * This creates and destroys pmd threads, reconfigures ports, opens their | |
4776 | * rxqs and assigns all rxqs/txqs to pmd threads. */ | |
4777 | static void | |
4778 | reconfigure_datapath(struct dp_netdev *dp) | |
4779 | OVS_REQUIRES(dp->port_mutex) | |
4780 | { | |
4781 | struct dp_netdev_pmd_thread *pmd; | |
4782 | struct dp_netdev_port *port; | |
4783 | int wanted_txqs; | |
6e3c6fa4 | 4784 | |
a6a426d6 IM |
4785 | dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq); |
4786 | ||
e32971b8 DDP |
4787 | /* Step 1: Adjust the pmd threads based on the datapath ports, the cores |
4788 | * on the system and the user configuration. */ | |
4789 | reconfigure_pmd_threads(dp); | |
6e3c6fa4 | 4790 | |
e32971b8 | 4791 | wanted_txqs = cmap_count(&dp->poll_threads); |
324c8374 | 4792 | |
e32971b8 DDP |
4793 | /* The number of pmd threads might have changed, or a port can be new: |
4794 | * adjust the txqs. */ | |
4795 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
4796 | netdev_set_tx_multiq(port->netdev, wanted_txqs); | |
324c8374 IM |
4797 | } |
4798 | ||
e32971b8 DDP |
4799 | /* Step 2: Remove from the pmd threads ports that have been removed or |
4800 | * need reconfiguration. */ | |
4801 | ||
4802 | /* Check for all the ports that need reconfiguration. We cache this in | |
85a4f238 IM |
4803 | * 'port->need_reconfigure', because netdev_is_reconf_required() can |
4804 | * change at any time. */ | |
e32971b8 DDP |
4805 | HMAP_FOR_EACH (port, node, &dp->ports) { |
4806 | if (netdev_is_reconf_required(port->netdev)) { | |
4807 | port->need_reconfigure = true; | |
4808 | } | |
4809 | } | |
4810 | ||
4811 | /* Remove from the pmd threads all the ports that have been deleted or | |
4812 | * need reconfiguration. */ | |
4813 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
4814 | pmd_remove_stale_ports(dp, pmd); | |
4815 | } | |
4816 | ||
4817 | /* Reload affected pmd threads. We must wait for the pmd threads before | |
4818 | * reconfiguring the ports, because a port cannot be reconfigured while | |
4819 | * it's being used. */ | |
4820 | reload_affected_pmds(dp); | |
4821 | ||
4822 | /* Step 3: Reconfigure ports. */ | |
4823 | ||
4824 | /* We only reconfigure the ports that we determined above, because they're | |
4825 | * not being used by any pmd thread at the moment. If a port fails to | |
4826 | * reconfigure we remove it from the datapath. */ | |
f582b6df BP |
4827 | struct dp_netdev_port *next_port; |
4828 | HMAP_FOR_EACH_SAFE (port, next_port, node, &dp->ports) { | |
dc36593c | 4829 | int err; |
6e3c6fa4 | 4830 | |
e32971b8 DDP |
4831 | if (!port->need_reconfigure) { |
4832 | continue; | |
4833 | } | |
4834 | ||
dc36593c DDP |
4835 | err = port_reconfigure(port); |
4836 | if (err) { | |
4837 | hmap_remove(&dp->ports, &port->node); | |
4838 | seq_change(dp->port_seq); | |
4839 | port_destroy(port); | |
324c8374 | 4840 | } else { |
e32971b8 | 4841 | port->dynamic_txqs = netdev_n_txq(port->netdev) < wanted_txqs; |
6e3c6fa4 DDP |
4842 | } |
4843 | } | |
e32971b8 DDP |
4844 | |
4845 | /* Step 4: Compute new rxq scheduling. We don't touch the pmd threads | |
4846 | * for now, we just update the 'pmd' pointer in each rxq to point to the | |
4847 | * wanted thread according to the scheduling policy. */ | |
4848 | ||
4849 | /* Reset all the pmd threads to non isolated. */ | |
4850 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
4851 | pmd->isolated = false; | |
4852 | } | |
4853 | ||
4854 | /* Reset all the queues to unassigned */ | |
4855 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
4856 | for (int i = 0; i < port->n_rxq; i++) { | |
4857 | port->rxqs[i].pmd = NULL; | |
4858 | } | |
4859 | } | |
4860 | ||
4861 | /* Add pinned queues and mark pmd threads isolated. */ | |
4862 | rxq_scheduling(dp, true); | |
4863 | ||
4864 | /* Add non-pinned queues. */ | |
4865 | rxq_scheduling(dp, false); | |
4866 | ||
4867 | /* Step 5: Remove queues not compliant with new scheduling. */ | |
4868 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
4869 | struct rxq_poll *poll, *poll_next; | |
4870 | ||
4871 | ovs_mutex_lock(&pmd->port_mutex); | |
4872 | HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) { | |
4873 | if (poll->rxq->pmd != pmd) { | |
4874 | dp_netdev_del_rxq_from_pmd(pmd, poll); | |
4875 | } | |
4876 | } | |
4877 | ovs_mutex_unlock(&pmd->port_mutex); | |
4878 | } | |
4879 | ||
4880 | /* Reload affected pmd threads. We must wait for the pmd threads to remove | |
4881 | * the old queues before readding them, otherwise a queue can be polled by | |
4882 | * two threads at the same time. */ | |
4883 | reload_affected_pmds(dp); | |
4884 | ||
4885 | /* Step 6: Add queues from scheduling, if they're not there already. */ | |
4886 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
4887 | if (!netdev_is_pmd(port->netdev)) { | |
4888 | continue; | |
4889 | } | |
4890 | ||
4891 | for (int qid = 0; qid < port->n_rxq; qid++) { | |
4892 | struct dp_netdev_rxq *q = &port->rxqs[qid]; | |
4893 | ||
4894 | if (q->pmd) { | |
4895 | ovs_mutex_lock(&q->pmd->port_mutex); | |
4896 | dp_netdev_add_rxq_to_pmd(q->pmd, q); | |
4897 | ovs_mutex_unlock(&q->pmd->port_mutex); | |
4898 | } | |
4899 | } | |
4900 | } | |
4901 | ||
4902 | /* Add every port to the tx cache of every pmd thread, if it's not | |
4903 | * there already and if this pmd has at least one rxq to poll. */ | |
4904 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
4905 | ovs_mutex_lock(&pmd->port_mutex); | |
4906 | if (hmap_count(&pmd->poll_list) || pmd->core_id == NON_PMD_CORE_ID) { | |
4907 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
4908 | dp_netdev_add_port_tx_to_pmd(pmd, port); | |
4909 | } | |
4910 | } | |
4911 | ovs_mutex_unlock(&pmd->port_mutex); | |
4912 | } | |
4913 | ||
4914 | /* Reload affected pmd threads. */ | |
4915 | reload_affected_pmds(dp); | |
5bf84282 NK |
4916 | |
4917 | /* Check if PMD Auto LB is to be enabled */ | |
4918 | set_pmd_auto_lb(dp); | |
6e3c6fa4 DDP |
4919 | } |
4920 | ||
050c60bf DDP |
4921 | /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */ |
4922 | static bool | |
4923 | ports_require_restart(const struct dp_netdev *dp) | |
4924 | OVS_REQUIRES(dp->port_mutex) | |
4925 | { | |
4926 | struct dp_netdev_port *port; | |
4927 | ||
4928 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
4929 | if (netdev_is_reconf_required(port->netdev)) { | |
4930 | return true; | |
4931 | } | |
4932 | } | |
4933 | ||
4934 | return false; | |
4935 | } | |
4936 | ||
5bf84282 NK |
4937 | /* Calculates variance in the values stored in array 'a'. 'n' is the number |
4938 | * of elements in array to be considered for calculating vairance. | |
4939 | * Usage example: data array 'a' contains the processing load of each pmd and | |
4940 | * 'n' is the number of PMDs. It returns the variance in processing load of | |
4941 | * PMDs*/ | |
4942 | static uint64_t | |
4943 | variance(uint64_t a[], int n) | |
4944 | { | |
4945 | /* Compute mean (average of elements). */ | |
4946 | uint64_t sum = 0; | |
4947 | uint64_t mean = 0; | |
4948 | uint64_t sqDiff = 0; | |
4949 | ||
4950 | if (!n) { | |
4951 | return 0; | |
4952 | } | |
4953 | ||
4954 | for (int i = 0; i < n; i++) { | |
4955 | sum += a[i]; | |
4956 | } | |
4957 | ||
4958 | if (sum) { | |
4959 | mean = sum / n; | |
4960 | ||
4961 | /* Compute sum squared differences with mean. */ | |
4962 | for (int i = 0; i < n; i++) { | |
4963 | sqDiff += (a[i] - mean)*(a[i] - mean); | |
4964 | } | |
4965 | } | |
4966 | return (sqDiff ? (sqDiff / n) : 0); | |
4967 | } | |
4968 | ||
4969 | ||
4970 | /* Returns the variance in the PMDs usage as part of dry run of rxqs | |
4971 | * assignment to PMDs. */ | |
4972 | static bool | |
4973 | get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list, | |
4974 | uint32_t num_pmds, uint64_t *predicted_variance) | |
4975 | OVS_REQUIRES(dp->port_mutex) | |
4976 | { | |
4977 | struct dp_netdev_port *port; | |
4978 | struct dp_netdev_pmd_thread *pmd; | |
4979 | struct dp_netdev_rxq **rxqs = NULL; | |
4980 | struct rr_numa *numa = NULL; | |
4981 | struct rr_numa_list rr; | |
4982 | int n_rxqs = 0; | |
4983 | bool ret = false; | |
4984 | uint64_t *pmd_usage; | |
4985 | ||
4986 | if (!predicted_variance) { | |
4987 | return ret; | |
4988 | } | |
4989 | ||
4990 | pmd_usage = xcalloc(num_pmds, sizeof(uint64_t)); | |
4991 | ||
4992 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
4993 | if (!netdev_is_pmd(port->netdev)) { | |
4994 | continue; | |
4995 | } | |
4996 | ||
4997 | for (int qid = 0; qid < port->n_rxq; qid++) { | |
4998 | struct dp_netdev_rxq *q = &port->rxqs[qid]; | |
4999 | uint64_t cycle_hist = 0; | |
5000 | ||
5001 | if (q->pmd->isolated) { | |
5002 | continue; | |
5003 | } | |
5004 | ||
5005 | if (n_rxqs == 0) { | |
5006 | rxqs = xmalloc(sizeof *rxqs); | |
5007 | } else { | |
5008 | rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1)); | |
5009 | } | |
5010 | ||
5011 | /* Sum the queue intervals and store the cycle history. */ | |
5012 | for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) { | |
5013 | cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i); | |
5014 | } | |
5015 | dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST, | |
5016 | cycle_hist); | |
5017 | /* Store the queue. */ | |
5018 | rxqs[n_rxqs++] = q; | |
5019 | } | |
5020 | } | |
5021 | if (n_rxqs > 1) { | |
5022 | /* Sort the queues in order of the processing cycles | |
5023 | * they consumed during their last pmd interval. */ | |
5024 | qsort(rxqs, n_rxqs, sizeof *rxqs, compare_rxq_cycles); | |
5025 | } | |
5026 | rr_numa_list_populate(dp, &rr); | |
5027 | ||
5028 | for (int i = 0; i < n_rxqs; i++) { | |
5029 | int numa_id = netdev_get_numa_id(rxqs[i]->port->netdev); | |
5030 | numa = rr_numa_list_lookup(&rr, numa_id); | |
5031 | if (!numa) { | |
5032 | /* Abort if cross NUMA polling. */ | |
5033 | VLOG_DBG("PMD auto lb dry run." | |
5034 | " Aborting due to cross-numa polling."); | |
5035 | goto cleanup; | |
5036 | } | |
5037 | ||
5038 | pmd = rr_numa_get_pmd(numa, true); | |
5039 | VLOG_DBG("PMD auto lb dry run. Predicted: Core %d on numa node %d " | |
5040 | "to be assigned port \'%s\' rx queue %d " | |
5041 | "(measured processing cycles %"PRIu64").", | |
5042 | pmd->core_id, numa_id, | |
5043 | netdev_rxq_get_name(rxqs[i]->rx), | |
5044 | netdev_rxq_get_queue_id(rxqs[i]->rx), | |
5045 | dp_netdev_rxq_get_cycles(rxqs[i], RXQ_CYCLES_PROC_HIST)); | |
5046 | ||
5047 | for (int id = 0; id < num_pmds; id++) { | |
5048 | if (pmd->core_id == core_list[id]) { | |
5049 | /* Add the processing cycles of rxq to pmd polling it. */ | |
5050 | pmd_usage[id] += dp_netdev_rxq_get_cycles(rxqs[i], | |
5051 | RXQ_CYCLES_PROC_HIST); | |
5052 | } | |
5053 | } | |
5054 | } | |
5055 | ||
5056 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
5057 | uint64_t total_cycles = 0; | |
5058 | ||
5059 | if ((pmd->core_id == NON_PMD_CORE_ID) || pmd->isolated) { | |
5060 | continue; | |
5061 | } | |
5062 | ||
5063 | /* Get the total pmd cycles for an interval. */ | |
5064 | atomic_read_relaxed(&pmd->intrvl_cycles, &total_cycles); | |
5065 | /* Estimate the cycles to cover all intervals. */ | |
5066 | total_cycles *= PMD_RXQ_INTERVAL_MAX; | |
5067 | for (int id = 0; id < num_pmds; id++) { | |
5068 | if (pmd->core_id == core_list[id]) { | |
5069 | if (pmd_usage[id]) { | |
5070 | pmd_usage[id] = (pmd_usage[id] * 100) / total_cycles; | |
5071 | } | |
5072 | VLOG_DBG("PMD auto lb dry run. Predicted: Core %d, " | |
5073 | "usage %"PRIu64"", pmd->core_id, pmd_usage[id]); | |
5074 | } | |
5075 | } | |
5076 | } | |
5077 | *predicted_variance = variance(pmd_usage, num_pmds); | |
5078 | ret = true; | |
5079 | ||
5080 | cleanup: | |
5081 | rr_numa_list_destroy(&rr); | |
5082 | free(rxqs); | |
5083 | free(pmd_usage); | |
5084 | return ret; | |
5085 | } | |
5086 | ||
5087 | /* Does the dry run of Rxq assignment to PMDs and returns true if it gives | |
5088 | * better distribution of load on PMDs. */ | |
5089 | static bool | |
5090 | pmd_rebalance_dry_run(struct dp_netdev *dp) | |
5091 | OVS_REQUIRES(dp->port_mutex) | |
5092 | { | |
5093 | struct dp_netdev_pmd_thread *pmd; | |
5094 | uint64_t *curr_pmd_usage; | |
5095 | ||
5096 | uint64_t curr_variance; | |
5097 | uint64_t new_variance; | |
5098 | uint64_t improvement = 0; | |
5099 | uint32_t num_pmds; | |
5100 | uint32_t *pmd_corelist; | |
5101 | struct rxq_poll *poll, *poll_next; | |
5102 | bool ret; | |
5103 | ||
5104 | num_pmds = cmap_count(&dp->poll_threads); | |
5105 | ||
5106 | if (num_pmds > 1) { | |
5107 | curr_pmd_usage = xcalloc(num_pmds, sizeof(uint64_t)); | |
5108 | pmd_corelist = xcalloc(num_pmds, sizeof(uint32_t)); | |
5109 | } else { | |
5110 | return false; | |
5111 | } | |
5112 | ||
5113 | num_pmds = 0; | |
5114 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
5115 | uint64_t total_cycles = 0; | |
5116 | uint64_t total_proc = 0; | |
5117 | ||
5118 | if ((pmd->core_id == NON_PMD_CORE_ID) || pmd->isolated) { | |
5119 | continue; | |
5120 | } | |
5121 | ||
5122 | /* Get the total pmd cycles for an interval. */ | |
5123 | atomic_read_relaxed(&pmd->intrvl_cycles, &total_cycles); | |
5124 | /* Estimate the cycles to cover all intervals. */ | |
5125 | total_cycles *= PMD_RXQ_INTERVAL_MAX; | |
5126 | ||
5127 | HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) { | |
5128 | uint64_t proc_cycles = 0; | |
5129 | for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) { | |
5130 | proc_cycles += dp_netdev_rxq_get_intrvl_cycles(poll->rxq, i); | |
5131 | } | |
5132 | total_proc += proc_cycles; | |
5133 | } | |
5134 | if (total_proc) { | |
5135 | curr_pmd_usage[num_pmds] = (total_proc * 100) / total_cycles; | |
5136 | } | |
5137 | ||
5138 | VLOG_DBG("PMD auto lb dry run. Current: Core %d, usage %"PRIu64"", | |
5139 | pmd->core_id, curr_pmd_usage[num_pmds]); | |
5140 | ||
5141 | if (atomic_count_get(&pmd->pmd_overloaded)) { | |
5142 | atomic_count_set(&pmd->pmd_overloaded, 0); | |
5143 | } | |
5144 | ||
5145 | pmd_corelist[num_pmds] = pmd->core_id; | |
5146 | num_pmds++; | |
5147 | } | |
5148 | ||
5149 | curr_variance = variance(curr_pmd_usage, num_pmds); | |
5150 | ret = get_dry_run_variance(dp, pmd_corelist, num_pmds, &new_variance); | |
5151 | ||
5152 | if (ret) { | |
5153 | VLOG_DBG("PMD auto lb dry run. Current PMD variance: %"PRIu64"," | |
5154 | " Predicted PMD variance: %"PRIu64"", | |
5155 | curr_variance, new_variance); | |
5156 | ||
5157 | if (new_variance < curr_variance) { | |
5158 | improvement = | |
5159 | ((curr_variance - new_variance) * 100) / curr_variance; | |
5160 | } | |
5161 | if (improvement < ALB_ACCEPTABLE_IMPROVEMENT) { | |
5162 | ret = false; | |
5163 | } | |
5164 | } | |
5165 | ||
5166 | free(curr_pmd_usage); | |
5167 | free(pmd_corelist); | |
5168 | return ret; | |
5169 | } | |
5170 | ||
5171 | ||
a36de779 PS |
5172 | /* Return true if needs to revalidate datapath flows. */ |
5173 | static bool | |
e4cfed38 PS |
5174 | dpif_netdev_run(struct dpif *dpif) |
5175 | { | |
5176 | struct dp_netdev_port *port; | |
5177 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
546e57d4 | 5178 | struct dp_netdev_pmd_thread *non_pmd; |
a36de779 | 5179 | uint64_t new_tnl_seq; |
c71ea3c4 | 5180 | bool need_to_flush = true; |
5bf84282 NK |
5181 | bool pmd_rebalance = false; |
5182 | long long int now = time_msec(); | |
5183 | struct dp_netdev_pmd_thread *pmd; | |
e4cfed38 | 5184 | |
e9985d6a | 5185 | ovs_mutex_lock(&dp->port_mutex); |
546e57d4 DDP |
5186 | non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); |
5187 | if (non_pmd) { | |
5188 | ovs_mutex_lock(&dp->non_pmd_mutex); | |
5189 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
5190 | if (!netdev_is_pmd(port->netdev)) { | |
5191 | int i; | |
55c955bd | 5192 | |
2fbadeb6 IM |
5193 | if (port->emc_enabled) { |
5194 | atomic_read_relaxed(&dp->emc_insert_min, | |
5195 | &non_pmd->ctx.emc_insert_min); | |
5196 | } else { | |
5197 | non_pmd->ctx.emc_insert_min = 0; | |
5198 | } | |
5199 | ||
546e57d4 | 5200 | for (i = 0; i < port->n_rxq; i++) { |
c71ea3c4 IM |
5201 | if (dp_netdev_process_rxq_port(non_pmd, |
5202 | &port->rxqs[i], | |
5203 | port->port_no)) { | |
5204 | need_to_flush = false; | |
5205 | } | |
546e57d4 | 5206 | } |
55c955bd | 5207 | } |
e4cfed38 | 5208 | } |
c71ea3c4 IM |
5209 | if (need_to_flush) { |
5210 | /* We didn't receive anything in the process loop. | |
5211 | * Check if we need to send something. | |
5212 | * There was no time updates on current iteration. */ | |
5213 | pmd_thread_ctx_time_update(non_pmd); | |
5214 | dp_netdev_pmd_flush_output_packets(non_pmd, false); | |
5215 | } | |
5216 | ||
b010be17 | 5217 | dpif_netdev_xps_revalidate_pmd(non_pmd, false); |
546e57d4 | 5218 | ovs_mutex_unlock(&dp->non_pmd_mutex); |
6e3c6fa4 | 5219 | |
546e57d4 DDP |
5220 | dp_netdev_pmd_unref(non_pmd); |
5221 | } | |
1c1e46ed | 5222 | |
5bf84282 NK |
5223 | struct pmd_auto_lb *pmd_alb = &dp->pmd_alb; |
5224 | if (pmd_alb->is_enabled) { | |
5225 | if (!pmd_alb->rebalance_poll_timer) { | |
5226 | pmd_alb->rebalance_poll_timer = now; | |
5227 | } else if ((pmd_alb->rebalance_poll_timer + | |
5228 | pmd_alb->rebalance_intvl) < now) { | |
5229 | pmd_alb->rebalance_poll_timer = now; | |
5230 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
5231 | if (atomic_count_get(&pmd->pmd_overloaded) >= | |
5232 | PMD_RXQ_INTERVAL_MAX) { | |
5233 | pmd_rebalance = true; | |
5234 | break; | |
5235 | } | |
5236 | } | |
5237 | ||
5238 | if (pmd_rebalance && | |
5239 | !dp_netdev_is_reconf_required(dp) && | |
5240 | !ports_require_restart(dp) && | |
5241 | pmd_rebalance_dry_run(dp)) { | |
5242 | VLOG_INFO("PMD auto lb dry run." | |
5243 | " requesting datapath reconfigure."); | |
5244 | dp_netdev_request_reconfigure(dp); | |
5245 | } | |
5246 | } | |
5247 | } | |
5248 | ||
a6a426d6 | 5249 | if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) { |
e32971b8 | 5250 | reconfigure_datapath(dp); |
6e3c6fa4 DDP |
5251 | } |
5252 | ovs_mutex_unlock(&dp->port_mutex); | |
5253 | ||
53902038 | 5254 | tnl_neigh_cache_run(); |
7f9b8504 | 5255 | tnl_port_map_run(); |
a36de779 PS |
5256 | new_tnl_seq = seq_read(tnl_conf_seq); |
5257 | ||
5258 | if (dp->last_tnl_conf_seq != new_tnl_seq) { | |
5259 | dp->last_tnl_conf_seq = new_tnl_seq; | |
5260 | return true; | |
5261 | } | |
5262 | return false; | |
e4cfed38 PS |
5263 | } |
5264 | ||
5265 | static void | |
5266 | dpif_netdev_wait(struct dpif *dpif) | |
5267 | { | |
5268 | struct dp_netdev_port *port; | |
5269 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5270 | ||
59e6d833 | 5271 | ovs_mutex_lock(&dp_netdev_mutex); |
e9985d6a DDP |
5272 | ovs_mutex_lock(&dp->port_mutex); |
5273 | HMAP_FOR_EACH (port, node, &dp->ports) { | |
050c60bf | 5274 | netdev_wait_reconf_required(port->netdev); |
55c955bd PS |
5275 | if (!netdev_is_pmd(port->netdev)) { |
5276 | int i; | |
5277 | ||
490e82af | 5278 | for (i = 0; i < port->n_rxq; i++) { |
947dc567 | 5279 | netdev_rxq_wait(port->rxqs[i].rx); |
55c955bd | 5280 | } |
e4cfed38 PS |
5281 | } |
5282 | } | |
e9985d6a | 5283 | ovs_mutex_unlock(&dp->port_mutex); |
59e6d833 | 5284 | ovs_mutex_unlock(&dp_netdev_mutex); |
a36de779 | 5285 | seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq); |
e4cfed38 PS |
5286 | } |
5287 | ||
d0cca6c3 DDP |
5288 | static void |
5289 | pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd) | |
5290 | { | |
5291 | struct tx_port *tx_port_cached; | |
5292 | ||
c71ea3c4 IM |
5293 | /* Flush all the queued packets. */ |
5294 | dp_netdev_pmd_flush_output_packets(pmd, true); | |
324c8374 | 5295 | /* Free all used tx queue ids. */ |
b010be17 | 5296 | dpif_netdev_xps_revalidate_pmd(pmd, true); |
324c8374 | 5297 | |
57eebbb4 DDP |
5298 | HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->tnl_port_cache) { |
5299 | free(tx_port_cached); | |
5300 | } | |
5301 | HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->send_port_cache) { | |
d0cca6c3 DDP |
5302 | free(tx_port_cached); |
5303 | } | |
5304 | } | |
5305 | ||
5306 | /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to | |
899363ed BB |
5307 | * thread-local copies. Copy to 'pmd->tnl_port_cache' if it is a tunnel |
5308 | * device, otherwise to 'pmd->send_port_cache' if the port has at least | |
5309 | * one txq. */ | |
d0cca6c3 DDP |
5310 | static void |
5311 | pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) | |
5312 | OVS_REQUIRES(pmd->port_mutex) | |
5313 | { | |
5314 | struct tx_port *tx_port, *tx_port_cached; | |
5315 | ||
5316 | pmd_free_cached_ports(pmd); | |
57eebbb4 DDP |
5317 | hmap_shrink(&pmd->send_port_cache); |
5318 | hmap_shrink(&pmd->tnl_port_cache); | |
d0cca6c3 DDP |
5319 | |
5320 | HMAP_FOR_EACH (tx_port, node, &pmd->tx_ports) { | |
57eebbb4 DDP |
5321 | if (netdev_has_tunnel_push_pop(tx_port->port->netdev)) { |
5322 | tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached); | |
5323 | hmap_insert(&pmd->tnl_port_cache, &tx_port_cached->node, | |
5324 | hash_port_no(tx_port_cached->port->port_no)); | |
5325 | } | |
5326 | ||
5327 | if (netdev_n_txq(tx_port->port->netdev)) { | |
5328 | tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached); | |
5329 | hmap_insert(&pmd->send_port_cache, &tx_port_cached->node, | |
5330 | hash_port_no(tx_port_cached->port->port_no)); | |
5331 | } | |
d0cca6c3 DDP |
5332 | } |
5333 | } | |
5334 | ||
140dd699 IM |
5335 | static void |
5336 | pmd_alloc_static_tx_qid(struct dp_netdev_pmd_thread *pmd) | |
5337 | { | |
5338 | ovs_mutex_lock(&pmd->dp->tx_qid_pool_mutex); | |
5339 | if (!id_pool_alloc_id(pmd->dp->tx_qid_pool, &pmd->static_tx_qid)) { | |
5340 | VLOG_ABORT("static_tx_qid allocation failed for PMD on core %2d" | |
5341 | ", numa_id %d.", pmd->core_id, pmd->numa_id); | |
5342 | } | |
5343 | ovs_mutex_unlock(&pmd->dp->tx_qid_pool_mutex); | |
5344 | ||
5345 | VLOG_DBG("static_tx_qid = %d allocated for PMD thread on core %2d" | |
5346 | ", numa_id %d.", pmd->static_tx_qid, pmd->core_id, pmd->numa_id); | |
5347 | } | |
5348 | ||
5349 | static void | |
5350 | pmd_free_static_tx_qid(struct dp_netdev_pmd_thread *pmd) | |
5351 | { | |
5352 | ovs_mutex_lock(&pmd->dp->tx_qid_pool_mutex); | |
5353 | id_pool_free_id(pmd->dp->tx_qid_pool, pmd->static_tx_qid); | |
5354 | ovs_mutex_unlock(&pmd->dp->tx_qid_pool_mutex); | |
5355 | } | |
5356 | ||
e4cfed38 | 5357 | static int |
d0cca6c3 | 5358 | pmd_load_queues_and_ports(struct dp_netdev_pmd_thread *pmd, |
947dc567 | 5359 | struct polled_queue **ppoll_list) |
e4cfed38 | 5360 | { |
947dc567 | 5361 | struct polled_queue *poll_list = *ppoll_list; |
ae7ad0a1 IM |
5362 | struct rxq_poll *poll; |
5363 | int i; | |
e4cfed38 | 5364 | |
d0cca6c3 | 5365 | ovs_mutex_lock(&pmd->port_mutex); |
947dc567 DDP |
5366 | poll_list = xrealloc(poll_list, hmap_count(&pmd->poll_list) |
5367 | * sizeof *poll_list); | |
a1fdee13 | 5368 | |
ae7ad0a1 | 5369 | i = 0; |
947dc567 | 5370 | HMAP_FOR_EACH (poll, node, &pmd->poll_list) { |
922b28d4 | 5371 | poll_list[i].rxq = poll->rxq; |
947dc567 | 5372 | poll_list[i].port_no = poll->rxq->port->port_no; |
2fbadeb6 | 5373 | poll_list[i].emc_enabled = poll->rxq->port->emc_enabled; |
947dc567 | 5374 | i++; |
e4cfed38 | 5375 | } |
d0cca6c3 DDP |
5376 | |
5377 | pmd_load_cached_ports(pmd); | |
5378 | ||
5379 | ovs_mutex_unlock(&pmd->port_mutex); | |
e4cfed38 | 5380 | |
e4cfed38 | 5381 | *ppoll_list = poll_list; |
d42f9307 | 5382 | return i; |
e4cfed38 PS |
5383 | } |
5384 | ||
6c3eee82 | 5385 | static void * |
e4cfed38 | 5386 | pmd_thread_main(void *f_) |
6c3eee82 | 5387 | { |
65f13b50 | 5388 | struct dp_netdev_pmd_thread *pmd = f_; |
82a48ead | 5389 | struct pmd_perf_stats *s = &pmd->perf_stats; |
e4cfed38 | 5390 | unsigned int lc = 0; |
947dc567 | 5391 | struct polled_queue *poll_list; |
d42f9307 | 5392 | bool exiting; |
e4cfed38 PS |
5393 | int poll_cnt; |
5394 | int i; | |
a2ac666d | 5395 | int process_packets = 0; |
6c3eee82 | 5396 | |
e4cfed38 PS |
5397 | poll_list = NULL; |
5398 | ||
65f13b50 AW |
5399 | /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */ |
5400 | ovsthread_setspecific(pmd->dp->per_pmd_key, pmd); | |
6930c7e0 DDP |
5401 | ovs_numa_thread_setaffinity_core(pmd->core_id); |
5402 | dpdk_set_lcore_id(pmd->core_id); | |
d0cca6c3 | 5403 | poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list); |
60d8ccae | 5404 | dfc_cache_init(&pmd->flow_cache); |
e4cfed38 | 5405 | reload: |
140dd699 | 5406 | pmd_alloc_static_tx_qid(pmd); |
ae7ad0a1 | 5407 | |
5bf84282 NK |
5408 | atomic_count_init(&pmd->pmd_overloaded, 0); |
5409 | ||
7dd671f0 MK |
5410 | /* List port/core affinity */ |
5411 | for (i = 0; i < poll_cnt; i++) { | |
ce179f11 | 5412 | VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n", |
922b28d4 KT |
5413 | pmd->core_id, netdev_rxq_get_name(poll_list[i].rxq->rx), |
5414 | netdev_rxq_get_queue_id(poll_list[i].rxq->rx)); | |
4f5d13e2 KT |
5415 | /* Reset the rxq current cycles counter. */ |
5416 | dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR, 0); | |
7dd671f0 MK |
5417 | } |
5418 | ||
2788a1b1 DDP |
5419 | if (!poll_cnt) { |
5420 | while (seq_read(pmd->reload_seq) == pmd->last_reload_seq) { | |
5421 | seq_wait(pmd->reload_seq, pmd->last_reload_seq); | |
5422 | poll_block(); | |
5423 | } | |
5424 | lc = UINT_MAX; | |
5425 | } | |
5426 | ||
2a2c67b4 KT |
5427 | pmd->intrvl_tsc_prev = 0; |
5428 | atomic_store_relaxed(&pmd->intrvl_cycles, 0); | |
a19896ab | 5429 | cycles_counter_update(s); |
79f36875 JS |
5430 | /* Protect pmd stats from external clearing while polling. */ |
5431 | ovs_mutex_lock(&pmd->perf_stats.stats_mutex); | |
e4cfed38 | 5432 | for (;;) { |
79f36875 | 5433 | uint64_t rx_packets = 0, tx_packets = 0; |
c71ea3c4 | 5434 | |
a19896ab | 5435 | pmd_perf_start_iteration(s); |
79f36875 | 5436 | |
e4cfed38 | 5437 | for (i = 0; i < poll_cnt; i++) { |
2fbadeb6 IM |
5438 | |
5439 | if (poll_list[i].emc_enabled) { | |
5440 | atomic_read_relaxed(&pmd->dp->emc_insert_min, | |
5441 | &pmd->ctx.emc_insert_min); | |
5442 | } else { | |
5443 | pmd->ctx.emc_insert_min = 0; | |
5444 | } | |
5445 | ||
a2ac666d | 5446 | process_packets = |
a19896ab | 5447 | dp_netdev_process_rxq_port(pmd, poll_list[i].rxq, |
a2ac666d | 5448 | poll_list[i].port_no); |
79f36875 | 5449 | rx_packets += process_packets; |
e4cfed38 PS |
5450 | } |
5451 | ||
79f36875 | 5452 | if (!rx_packets) { |
c71ea3c4 IM |
5453 | /* We didn't receive anything in the process loop. |
5454 | * Check if we need to send something. | |
5455 | * There was no time updates on current iteration. */ | |
5456 | pmd_thread_ctx_time_update(pmd); | |
79f36875 | 5457 | tx_packets = dp_netdev_pmd_flush_output_packets(pmd, false); |
c71ea3c4 IM |
5458 | } |
5459 | ||
e4cfed38 | 5460 | if (lc++ > 1024) { |
14e3e12a | 5461 | bool reload; |
6c3eee82 | 5462 | |
e4cfed38 | 5463 | lc = 0; |
84067a4c | 5464 | |
fbe0962b | 5465 | coverage_try_clear(); |
4809891b | 5466 | dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt); |
9dede5cf | 5467 | if (!ovsrcu_try_quiesce()) { |
60d8ccae | 5468 | emc_cache_slow_sweep(&((pmd->flow_cache).emc_cache)); |
9dede5cf | 5469 | } |
84067a4c | 5470 | |
14e3e12a DDP |
5471 | atomic_read_relaxed(&pmd->reload, &reload); |
5472 | if (reload) { | |
6c3eee82 BP |
5473 | break; |
5474 | } | |
5475 | } | |
79f36875 JS |
5476 | pmd_perf_end_iteration(s, rx_packets, tx_packets, |
5477 | pmd_perf_metrics_enabled(pmd)); | |
e4cfed38 | 5478 | } |
79f36875 | 5479 | ovs_mutex_unlock(&pmd->perf_stats.stats_mutex); |
6c3eee82 | 5480 | |
d0cca6c3 | 5481 | poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list); |
d42f9307 DDP |
5482 | exiting = latch_is_set(&pmd->exit_latch); |
5483 | /* Signal here to make sure the pmd finishes | |
5484 | * reloading the updated configuration. */ | |
5485 | dp_netdev_pmd_reload_done(pmd); | |
5486 | ||
140dd699 | 5487 | pmd_free_static_tx_qid(pmd); |
9bbf1c3d | 5488 | |
d42f9307 | 5489 | if (!exiting) { |
e4cfed38 PS |
5490 | goto reload; |
5491 | } | |
6c3eee82 | 5492 | |
60d8ccae | 5493 | dfc_cache_uninit(&pmd->flow_cache); |
e4cfed38 | 5494 | free(poll_list); |
d0cca6c3 | 5495 | pmd_free_cached_ports(pmd); |
6c3eee82 BP |
5496 | return NULL; |
5497 | } | |
5498 | ||
6b31e073 RW |
5499 | static void |
5500 | dp_netdev_disable_upcall(struct dp_netdev *dp) | |
5501 | OVS_ACQUIRES(dp->upcall_rwlock) | |
5502 | { | |
5503 | fat_rwlock_wrlock(&dp->upcall_rwlock); | |
5504 | } | |
5505 | ||
5dddf960 JR |
5506 | \f |
5507 | /* Meters */ | |
5508 | static void | |
5509 | dpif_netdev_meter_get_features(const struct dpif * dpif OVS_UNUSED, | |
5510 | struct ofputil_meter_features *features) | |
5511 | { | |
4b27db64 JR |
5512 | features->max_meters = MAX_METERS; |
5513 | features->band_types = DP_SUPPORTED_METER_BAND_TYPES; | |
5514 | features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK; | |
5515 | features->max_bands = MAX_BANDS; | |
5dddf960 JR |
5516 | features->max_color = 0; |
5517 | } | |
5518 | ||
425a7b9e JP |
5519 | /* Applies the meter identified by 'meter_id' to 'packets_'. Packets |
5520 | * that exceed a band are dropped in-place. */ | |
4b27db64 JR |
5521 | static void |
5522 | dp_netdev_run_meter(struct dp_netdev *dp, struct dp_packet_batch *packets_, | |
5523 | uint32_t meter_id, long long int now) | |
5524 | { | |
5525 | struct dp_meter *meter; | |
5526 | struct dp_meter_band *band; | |
79c81260 | 5527 | struct dp_packet *packet; |
4b27db64 JR |
5528 | long long int long_delta_t; /* msec */ |
5529 | uint32_t delta_t; /* msec */ | |
79c81260 | 5530 | const size_t cnt = dp_packet_batch_size(packets_); |
4b27db64 JR |
5531 | uint32_t bytes, volume; |
5532 | int exceeded_band[NETDEV_MAX_BURST]; | |
5533 | uint32_t exceeded_rate[NETDEV_MAX_BURST]; | |
5534 | int exceeded_pkt = cnt; /* First packet that exceeded a band rate. */ | |
5535 | ||
5536 | if (meter_id >= MAX_METERS) { | |
5537 | return; | |
5538 | } | |
5539 | ||
5540 | meter_lock(dp, meter_id); | |
5541 | meter = dp->meters[meter_id]; | |
5542 | if (!meter) { | |
5543 | goto out; | |
5544 | } | |
5545 | ||
5546 | /* Initialize as negative values. */ | |
5547 | memset(exceeded_band, 0xff, cnt * sizeof *exceeded_band); | |
5548 | /* Initialize as zeroes. */ | |
5549 | memset(exceeded_rate, 0, cnt * sizeof *exceeded_rate); | |
5550 | ||
5551 | /* All packets will hit the meter at the same time. */ | |
05f9e707 | 5552 | long_delta_t = (now - meter->used) / 1000; /* msec */ |
4b27db64 JR |
5553 | |
5554 | /* Make sure delta_t will not be too large, so that bucket will not | |
5555 | * wrap around below. */ | |
5556 | delta_t = (long_delta_t > (long long int)meter->max_delta_t) | |
5557 | ? meter->max_delta_t : (uint32_t)long_delta_t; | |
5558 | ||
5559 | /* Update meter stats. */ | |
5560 | meter->used = now; | |
5561 | meter->packet_count += cnt; | |
5562 | bytes = 0; | |
e883448e | 5563 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
79c81260 | 5564 | bytes += dp_packet_size(packet); |
4b27db64 JR |
5565 | } |
5566 | meter->byte_count += bytes; | |
5567 | ||
5568 | /* Meters can operate in terms of packets per second or kilobits per | |
5569 | * second. */ | |
5570 | if (meter->flags & OFPMF13_PKTPS) { | |
5571 | /* Rate in packets/second, bucket 1/1000 packets. */ | |
5572 | /* msec * packets/sec = 1/1000 packets. */ | |
5573 | volume = cnt * 1000; /* Take 'cnt' packets from the bucket. */ | |
5574 | } else { | |
5575 | /* Rate in kbps, bucket in bits. */ | |
5576 | /* msec * kbps = bits */ | |
5577 | volume = bytes * 8; | |
5578 | } | |
5579 | ||
5580 | /* Update all bands and find the one hit with the highest rate for each | |
5581 | * packet (if any). */ | |
5582 | for (int m = 0; m < meter->n_bands; ++m) { | |
5583 | band = &meter->bands[m]; | |
5584 | ||
5585 | /* Update band's bucket. */ | |
5586 | band->bucket += delta_t * band->up.rate; | |
5587 | if (band->bucket > band->up.burst_size) { | |
5588 | band->bucket = band->up.burst_size; | |
5589 | } | |
5590 | ||
5591 | /* Drain the bucket for all the packets, if possible. */ | |
5592 | if (band->bucket >= volume) { | |
5593 | band->bucket -= volume; | |
5594 | } else { | |
5595 | int band_exceeded_pkt; | |
5596 | ||
5597 | /* Band limit hit, must process packet-by-packet. */ | |
5598 | if (meter->flags & OFPMF13_PKTPS) { | |
5599 | band_exceeded_pkt = band->bucket / 1000; | |
5600 | band->bucket %= 1000; /* Remainder stays in bucket. */ | |
5601 | ||
5602 | /* Update the exceeding band for each exceeding packet. | |
5603 | * (Only one band will be fired by a packet, and that | |
5604 | * can be different for each packet.) */ | |
e883448e | 5605 | for (int i = band_exceeded_pkt; i < cnt; i++) { |
4b27db64 JR |
5606 | if (band->up.rate > exceeded_rate[i]) { |
5607 | exceeded_rate[i] = band->up.rate; | |
5608 | exceeded_band[i] = m; | |
5609 | } | |
5610 | } | |
5611 | } else { | |
5612 | /* Packet sizes differ, must process one-by-one. */ | |
5613 | band_exceeded_pkt = cnt; | |
e883448e | 5614 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
79c81260 | 5615 | uint32_t bits = dp_packet_size(packet) * 8; |
4b27db64 JR |
5616 | |
5617 | if (band->bucket >= bits) { | |
5618 | band->bucket -= bits; | |
5619 | } else { | |
5620 | if (i < band_exceeded_pkt) { | |
5621 | band_exceeded_pkt = i; | |
5622 | } | |
5623 | /* Update the exceeding band for the exceeding packet. | |
5624 | * (Only one band will be fired by a packet, and that | |
5625 | * can be different for each packet.) */ | |
5626 | if (band->up.rate > exceeded_rate[i]) { | |
5627 | exceeded_rate[i] = band->up.rate; | |
5628 | exceeded_band[i] = m; | |
5629 | } | |
5630 | } | |
5631 | } | |
5632 | } | |
5633 | /* Remember the first exceeding packet. */ | |
5634 | if (exceeded_pkt > band_exceeded_pkt) { | |
5635 | exceeded_pkt = band_exceeded_pkt; | |
5636 | } | |
5637 | } | |
5638 | } | |
5639 | ||
425a7b9e JP |
5640 | /* Fire the highest rate band exceeded by each packet, and drop |
5641 | * packets if needed. */ | |
4b27db64 | 5642 | size_t j; |
79c81260 | 5643 | DP_PACKET_BATCH_REFILL_FOR_EACH (j, cnt, packet, packets_) { |
4b27db64 JR |
5644 | if (exceeded_band[j] >= 0) { |
5645 | /* Meter drop packet. */ | |
5646 | band = &meter->bands[exceeded_band[j]]; | |
5647 | band->packet_count += 1; | |
5648 | band->byte_count += dp_packet_size(packet); | |
5649 | ||
5650 | dp_packet_delete(packet); | |
5651 | } else { | |
5652 | /* Meter accepts packet. */ | |
5653 | dp_packet_batch_refill(packets_, packet, j); | |
5654 | } | |
5655 | } | |
5656 | out: | |
5657 | meter_unlock(dp, meter_id); | |
5658 | } | |
5659 | ||
5660 | /* Meter set/get/del processing is still single-threaded. */ | |
5dddf960 | 5661 | static int |
8101f03f | 5662 | dpif_netdev_meter_set(struct dpif *dpif, ofproto_meter_id meter_id, |
4b27db64 | 5663 | struct ofputil_meter_config *config) |
5dddf960 | 5664 | { |
4b27db64 | 5665 | struct dp_netdev *dp = get_dp_netdev(dpif); |
8101f03f | 5666 | uint32_t mid = meter_id.uint32; |
4b27db64 JR |
5667 | struct dp_meter *meter; |
5668 | int i; | |
5669 | ||
4b27db64 JR |
5670 | if (mid >= MAX_METERS) { |
5671 | return EFBIG; /* Meter_id out of range. */ | |
5672 | } | |
5673 | ||
6508c845 | 5674 | if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK) { |
4b27db64 JR |
5675 | return EBADF; /* Unsupported flags set */ |
5676 | } | |
2029ce9a | 5677 | |
6508c845 JP |
5678 | if (config->n_bands > MAX_BANDS) { |
5679 | return EINVAL; | |
2029ce9a AVA |
5680 | } |
5681 | ||
4b27db64 JR |
5682 | for (i = 0; i < config->n_bands; ++i) { |
5683 | switch (config->bands[i].type) { | |
5684 | case OFPMBT13_DROP: | |
5685 | break; | |
5686 | default: | |
5687 | return ENODEV; /* Unsupported band type */ | |
5688 | } | |
5689 | } | |
5690 | ||
5691 | /* Allocate meter */ | |
5692 | meter = xzalloc(sizeof *meter | |
5693 | + config->n_bands * sizeof(struct dp_meter_band)); | |
4b27db64 | 5694 | |
d0db81ea JP |
5695 | meter->flags = config->flags; |
5696 | meter->n_bands = config->n_bands; | |
5697 | meter->max_delta_t = 0; | |
5698 | meter->used = time_usec(); | |
4b27db64 | 5699 | |
d0db81ea JP |
5700 | /* set up bands */ |
5701 | for (i = 0; i < config->n_bands; ++i) { | |
5702 | uint32_t band_max_delta_t; | |
4b27db64 | 5703 | |
d0db81ea JP |
5704 | /* Set burst size to a workable value if none specified. */ |
5705 | if (config->bands[i].burst_size == 0) { | |
5706 | config->bands[i].burst_size = config->bands[i].rate; | |
5707 | } | |
5708 | ||
5709 | meter->bands[i].up = config->bands[i]; | |
5710 | /* Convert burst size to the bucket units: */ | |
5711 | /* pkts => 1/1000 packets, kilobits => bits. */ | |
5712 | meter->bands[i].up.burst_size *= 1000; | |
5713 | /* Initialize bucket to empty. */ | |
5714 | meter->bands[i].bucket = 0; | |
5715 | ||
5716 | /* Figure out max delta_t that is enough to fill any bucket. */ | |
5717 | band_max_delta_t | |
5718 | = meter->bands[i].up.burst_size / meter->bands[i].up.rate; | |
5719 | if (band_max_delta_t > meter->max_delta_t) { | |
5720 | meter->max_delta_t = band_max_delta_t; | |
5721 | } | |
4b27db64 | 5722 | } |
d0db81ea JP |
5723 | |
5724 | meter_lock(dp, mid); | |
5725 | dp_delete_meter(dp, mid); /* Free existing meter, if any */ | |
5726 | dp->meters[mid] = meter; | |
5727 | meter_unlock(dp, mid); | |
5728 | ||
5729 | return 0; | |
5dddf960 JR |
5730 | } |
5731 | ||
5732 | static int | |
4b27db64 JR |
5733 | dpif_netdev_meter_get(const struct dpif *dpif, |
5734 | ofproto_meter_id meter_id_, | |
5735 | struct ofputil_meter_stats *stats, uint16_t n_bands) | |
5dddf960 | 5736 | { |
4b27db64 | 5737 | const struct dp_netdev *dp = get_dp_netdev(dpif); |
4b27db64 | 5738 | uint32_t meter_id = meter_id_.uint32; |
866bc756 | 5739 | int retval = 0; |
4b27db64 JR |
5740 | |
5741 | if (meter_id >= MAX_METERS) { | |
5742 | return EFBIG; | |
5743 | } | |
866bc756 JP |
5744 | |
5745 | meter_lock(dp, meter_id); | |
5746 | const struct dp_meter *meter = dp->meters[meter_id]; | |
4b27db64 | 5747 | if (!meter) { |
866bc756 JP |
5748 | retval = ENOENT; |
5749 | goto done; | |
4b27db64 JR |
5750 | } |
5751 | if (stats) { | |
5752 | int i = 0; | |
5753 | ||
4b27db64 JR |
5754 | stats->packet_in_count = meter->packet_count; |
5755 | stats->byte_in_count = meter->byte_count; | |
5756 | ||
5757 | for (i = 0; i < n_bands && i < meter->n_bands; ++i) { | |
5758 | stats->bands[i].packet_count = meter->bands[i].packet_count; | |
5759 | stats->bands[i].byte_count = meter->bands[i].byte_count; | |
5760 | } | |
4b27db64 JR |
5761 | |
5762 | stats->n_bands = i; | |
5763 | } | |
866bc756 JP |
5764 | |
5765 | done: | |
5766 | meter_unlock(dp, meter_id); | |
5767 | return retval; | |
5dddf960 JR |
5768 | } |
5769 | ||
5770 | static int | |
4b27db64 JR |
5771 | dpif_netdev_meter_del(struct dpif *dpif, |
5772 | ofproto_meter_id meter_id_, | |
5773 | struct ofputil_meter_stats *stats, uint16_t n_bands) | |
5dddf960 | 5774 | { |
4b27db64 JR |
5775 | struct dp_netdev *dp = get_dp_netdev(dpif); |
5776 | int error; | |
5777 | ||
5778 | error = dpif_netdev_meter_get(dpif, meter_id_, stats, n_bands); | |
5779 | if (!error) { | |
5780 | uint32_t meter_id = meter_id_.uint32; | |
5781 | ||
5782 | meter_lock(dp, meter_id); | |
5783 | dp_delete_meter(dp, meter_id); | |
5784 | meter_unlock(dp, meter_id); | |
4b27db64 JR |
5785 | } |
5786 | return error; | |
5dddf960 JR |
5787 | } |
5788 | ||
5789 | \f | |
6b31e073 RW |
5790 | static void |
5791 | dpif_netdev_disable_upcall(struct dpif *dpif) | |
5792 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
5793 | { | |
5794 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5795 | dp_netdev_disable_upcall(dp); | |
5796 | } | |
5797 | ||
5798 | static void | |
5799 | dp_netdev_enable_upcall(struct dp_netdev *dp) | |
5800 | OVS_RELEASES(dp->upcall_rwlock) | |
5801 | { | |
5802 | fat_rwlock_unlock(&dp->upcall_rwlock); | |
5803 | } | |
5804 | ||
5805 | static void | |
5806 | dpif_netdev_enable_upcall(struct dpif *dpif) | |
5807 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
5808 | { | |
5809 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
5810 | dp_netdev_enable_upcall(dp); | |
5811 | } | |
5812 | ||
ae7ad0a1 | 5813 | static void |
accf8626 AW |
5814 | dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd) |
5815 | { | |
5816 | ovs_mutex_lock(&pmd->cond_mutex); | |
14e3e12a | 5817 | atomic_store_relaxed(&pmd->reload, false); |
2788a1b1 | 5818 | pmd->last_reload_seq = seq_read(pmd->reload_seq); |
accf8626 AW |
5819 | xpthread_cond_signal(&pmd->cond); |
5820 | ovs_mutex_unlock(&pmd->cond_mutex); | |
5821 | } | |
5822 | ||
1c1e46ed | 5823 | /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns |
546e57d4 DDP |
5824 | * the pointer if succeeds, otherwise, NULL (it can return NULL even if |
5825 | * 'core_id' is NON_PMD_CORE_ID). | |
1c1e46ed AW |
5826 | * |
5827 | * Caller must unrefs the returned reference. */ | |
65f13b50 | 5828 | static struct dp_netdev_pmd_thread * |
bd5131ba | 5829 | dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id) |
65f13b50 AW |
5830 | { |
5831 | struct dp_netdev_pmd_thread *pmd; | |
55847abe | 5832 | const struct cmap_node *pnode; |
65f13b50 | 5833 | |
b19befae | 5834 | pnode = cmap_find(&dp->poll_threads, hash_int(core_id, 0)); |
1c1e46ed AW |
5835 | if (!pnode) { |
5836 | return NULL; | |
5837 | } | |
65f13b50 AW |
5838 | pmd = CONTAINER_OF(pnode, struct dp_netdev_pmd_thread, node); |
5839 | ||
1c1e46ed | 5840 | return dp_netdev_pmd_try_ref(pmd) ? pmd : NULL; |
65f13b50 AW |
5841 | } |
5842 | ||
f2eee189 AW |
5843 | /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */ |
5844 | static void | |
5845 | dp_netdev_set_nonpmd(struct dp_netdev *dp) | |
e9985d6a | 5846 | OVS_REQUIRES(dp->port_mutex) |
f2eee189 AW |
5847 | { |
5848 | struct dp_netdev_pmd_thread *non_pmd; | |
5849 | ||
5850 | non_pmd = xzalloc(sizeof *non_pmd); | |
00873463 | 5851 | dp_netdev_configure_pmd(non_pmd, dp, NON_PMD_CORE_ID, OVS_NUMA_UNSPEC); |
f2eee189 AW |
5852 | } |
5853 | ||
1c1e46ed AW |
5854 | /* Caller must have valid pointer to 'pmd'. */ |
5855 | static bool | |
5856 | dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd) | |
5857 | { | |
5858 | return ovs_refcount_try_ref_rcu(&pmd->ref_cnt); | |
5859 | } | |
5860 | ||
5861 | static void | |
5862 | dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd) | |
5863 | { | |
5864 | if (pmd && ovs_refcount_unref(&pmd->ref_cnt) == 1) { | |
5865 | ovsrcu_postpone(dp_netdev_destroy_pmd, pmd); | |
5866 | } | |
5867 | } | |
5868 | ||
5869 | /* Given cmap position 'pos', tries to ref the next node. If try_ref() | |
5870 | * fails, keeps checking for next node until reaching the end of cmap. | |
5871 | * | |
5872 | * Caller must unrefs the returned reference. */ | |
5873 | static struct dp_netdev_pmd_thread * | |
5874 | dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos) | |
5875 | { | |
5876 | struct dp_netdev_pmd_thread *next; | |
5877 | ||
5878 | do { | |
5879 | struct cmap_node *node; | |
5880 | ||
5881 | node = cmap_next_position(&dp->poll_threads, pos); | |
5882 | next = node ? CONTAINER_OF(node, struct dp_netdev_pmd_thread, node) | |
5883 | : NULL; | |
5884 | } while (next && !dp_netdev_pmd_try_ref(next)); | |
5885 | ||
5886 | return next; | |
5887 | } | |
5888 | ||
65f13b50 | 5889 | /* Configures the 'pmd' based on the input argument. */ |
6c3eee82 | 5890 | static void |
65f13b50 | 5891 | dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, |
00873463 | 5892 | unsigned core_id, int numa_id) |
65f13b50 AW |
5893 | { |
5894 | pmd->dp = dp; | |
65f13b50 AW |
5895 | pmd->core_id = core_id; |
5896 | pmd->numa_id = numa_id; | |
e32971b8 | 5897 | pmd->need_reload = false; |
c71ea3c4 | 5898 | pmd->n_output_batches = 0; |
1c1e46ed AW |
5899 | |
5900 | ovs_refcount_init(&pmd->ref_cnt); | |
65f13b50 | 5901 | latch_init(&pmd->exit_latch); |
2788a1b1 DDP |
5902 | pmd->reload_seq = seq_create(); |
5903 | pmd->last_reload_seq = seq_read(pmd->reload_seq); | |
14e3e12a | 5904 | atomic_init(&pmd->reload, false); |
accf8626 AW |
5905 | xpthread_cond_init(&pmd->cond, NULL); |
5906 | ovs_mutex_init(&pmd->cond_mutex); | |
1c1e46ed | 5907 | ovs_mutex_init(&pmd->flow_mutex); |
d0cca6c3 | 5908 | ovs_mutex_init(&pmd->port_mutex); |
1c1e46ed | 5909 | cmap_init(&pmd->flow_table); |
3453b4d6 | 5910 | cmap_init(&pmd->classifiers); |
58ed6df0 | 5911 | pmd->ctx.last_rxq = NULL; |
b010be17 IM |
5912 | pmd_thread_ctx_time_update(pmd); |
5913 | pmd->next_optimization = pmd->ctx.now + DPCLS_OPTIMIZATION_INTERVAL; | |
5914 | pmd->rxq_next_cycle_store = pmd->ctx.now + PMD_RXQ_INTERVAL_LEN; | |
947dc567 | 5915 | hmap_init(&pmd->poll_list); |
d0cca6c3 | 5916 | hmap_init(&pmd->tx_ports); |
57eebbb4 DDP |
5917 | hmap_init(&pmd->tnl_port_cache); |
5918 | hmap_init(&pmd->send_port_cache); | |
65f13b50 AW |
5919 | /* init the 'flow_cache' since there is no |
5920 | * actual thread created for NON_PMD_CORE_ID. */ | |
5921 | if (core_id == NON_PMD_CORE_ID) { | |
60d8ccae | 5922 | dfc_cache_init(&pmd->flow_cache); |
140dd699 | 5923 | pmd_alloc_static_tx_qid(pmd); |
65f13b50 | 5924 | } |
82a48ead | 5925 | pmd_perf_stats_init(&pmd->perf_stats); |
65f13b50 AW |
5926 | cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node), |
5927 | hash_int(core_id, 0)); | |
5928 | } | |
5929 | ||
1c1e46ed AW |
5930 | static void |
5931 | dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd) | |
5932 | { | |
3453b4d6 JS |
5933 | struct dpcls *cls; |
5934 | ||
1c1e46ed | 5935 | dp_netdev_pmd_flow_flush(pmd); |
57eebbb4 DDP |
5936 | hmap_destroy(&pmd->send_port_cache); |
5937 | hmap_destroy(&pmd->tnl_port_cache); | |
d0cca6c3 | 5938 | hmap_destroy(&pmd->tx_ports); |
947dc567 | 5939 | hmap_destroy(&pmd->poll_list); |
3453b4d6 JS |
5940 | /* All flows (including their dpcls_rules) have been deleted already */ |
5941 | CMAP_FOR_EACH (cls, node, &pmd->classifiers) { | |
5942 | dpcls_destroy(cls); | |
7c269972 | 5943 | ovsrcu_postpone(free, cls); |
3453b4d6 JS |
5944 | } |
5945 | cmap_destroy(&pmd->classifiers); | |
1c1e46ed AW |
5946 | cmap_destroy(&pmd->flow_table); |
5947 | ovs_mutex_destroy(&pmd->flow_mutex); | |
5948 | latch_destroy(&pmd->exit_latch); | |
2788a1b1 | 5949 | seq_destroy(pmd->reload_seq); |
1c1e46ed AW |
5950 | xpthread_cond_destroy(&pmd->cond); |
5951 | ovs_mutex_destroy(&pmd->cond_mutex); | |
d0cca6c3 | 5952 | ovs_mutex_destroy(&pmd->port_mutex); |
1c1e46ed AW |
5953 | free(pmd); |
5954 | } | |
5955 | ||
5956 | /* Stops the pmd thread, removes it from the 'dp->poll_threads', | |
5957 | * and unrefs the struct. */ | |
65f13b50 | 5958 | static void |
e4e74c3a | 5959 | dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd) |
6c3eee82 | 5960 | { |
d0cca6c3 DDP |
5961 | /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize, |
5962 | * but extra cleanup is necessary */ | |
65f13b50 | 5963 | if (pmd->core_id == NON_PMD_CORE_ID) { |
febf4a7a | 5964 | ovs_mutex_lock(&dp->non_pmd_mutex); |
60d8ccae | 5965 | dfc_cache_uninit(&pmd->flow_cache); |
d0cca6c3 | 5966 | pmd_free_cached_ports(pmd); |
140dd699 | 5967 | pmd_free_static_tx_qid(pmd); |
febf4a7a | 5968 | ovs_mutex_unlock(&dp->non_pmd_mutex); |
65f13b50 AW |
5969 | } else { |
5970 | latch_set(&pmd->exit_latch); | |
5971 | dp_netdev_reload_pmd__(pmd); | |
65f13b50 AW |
5972 | xpthread_join(pmd->thread, NULL); |
5973 | } | |
ae7ad0a1 | 5974 | |
d0cca6c3 | 5975 | dp_netdev_pmd_clear_ports(pmd); |
ae7ad0a1 | 5976 | |
e4e74c3a AW |
5977 | /* Purges the 'pmd''s flows after stopping the thread, but before |
5978 | * destroying the flows, so that the flow stats can be collected. */ | |
5979 | if (dp->dp_purge_cb) { | |
5980 | dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id); | |
5981 | } | |
65f13b50 | 5982 | cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0)); |
1c1e46ed | 5983 | dp_netdev_pmd_unref(pmd); |
65f13b50 | 5984 | } |
6c3eee82 | 5985 | |
e32971b8 DDP |
5986 | /* Destroys all pmd threads. If 'non_pmd' is true it also destroys the non pmd |
5987 | * thread. */ | |
65f13b50 | 5988 | static void |
e32971b8 | 5989 | dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd) |
65f13b50 AW |
5990 | { |
5991 | struct dp_netdev_pmd_thread *pmd; | |
d916785c DDP |
5992 | struct dp_netdev_pmd_thread **pmd_list; |
5993 | size_t k = 0, n_pmds; | |
5994 | ||
e32971b8 | 5995 | n_pmds = cmap_count(&dp->poll_threads); |
d916785c | 5996 | pmd_list = xcalloc(n_pmds, sizeof *pmd_list); |
65f13b50 AW |
5997 | |
5998 | CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { | |
e32971b8 | 5999 | if (!non_pmd && pmd->core_id == NON_PMD_CORE_ID) { |
b9584f21 DDP |
6000 | continue; |
6001 | } | |
d916785c DDP |
6002 | /* We cannot call dp_netdev_del_pmd(), since it alters |
6003 | * 'dp->poll_threads' (while we're iterating it) and it | |
6004 | * might quiesce. */ | |
6005 | ovs_assert(k < n_pmds); | |
6006 | pmd_list[k++] = pmd; | |
6c3eee82 | 6007 | } |
d916785c DDP |
6008 | |
6009 | for (size_t i = 0; i < k; i++) { | |
6010 | dp_netdev_del_pmd(dp, pmd_list[i]); | |
6011 | } | |
6012 | free(pmd_list); | |
65f13b50 | 6013 | } |
6c3eee82 | 6014 | |
d0cca6c3 DDP |
6015 | /* Deletes all rx queues from pmd->poll_list and all the ports from |
6016 | * pmd->tx_ports. */ | |
cc245ce8 | 6017 | static void |
d0cca6c3 | 6018 | dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd) |
cc245ce8 IM |
6019 | { |
6020 | struct rxq_poll *poll; | |
d0cca6c3 | 6021 | struct tx_port *port; |
cc245ce8 | 6022 | |
d0cca6c3 | 6023 | ovs_mutex_lock(&pmd->port_mutex); |
947dc567 | 6024 | HMAP_FOR_EACH_POP (poll, node, &pmd->poll_list) { |
cc245ce8 IM |
6025 | free(poll); |
6026 | } | |
d0cca6c3 DDP |
6027 | HMAP_FOR_EACH_POP (port, node, &pmd->tx_ports) { |
6028 | free(port); | |
6029 | } | |
6030 | ovs_mutex_unlock(&pmd->port_mutex); | |
cc245ce8 IM |
6031 | } |
6032 | ||
e32971b8 | 6033 | /* Adds rx queue to poll_list of PMD thread, if it's not there already. */ |
b68872d8 | 6034 | static void |
e32971b8 DDP |
6035 | dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd, |
6036 | struct dp_netdev_rxq *rxq) | |
6037 | OVS_REQUIRES(pmd->port_mutex) | |
b68872d8 | 6038 | { |
e32971b8 DDP |
6039 | int qid = netdev_rxq_get_queue_id(rxq->rx); |
6040 | uint32_t hash = hash_2words(odp_to_u32(rxq->port->port_no), qid); | |
6041 | struct rxq_poll *poll; | |
b68872d8 | 6042 | |
e32971b8 DDP |
6043 | HMAP_FOR_EACH_WITH_HASH (poll, node, hash, &pmd->poll_list) { |
6044 | if (poll->rxq == rxq) { | |
6045 | /* 'rxq' is already polled by this thread. Do nothing. */ | |
6046 | return; | |
d0cca6c3 | 6047 | } |
cc245ce8 | 6048 | } |
cc245ce8 | 6049 | |
e32971b8 DDP |
6050 | poll = xmalloc(sizeof *poll); |
6051 | poll->rxq = rxq; | |
6052 | hmap_insert(&pmd->poll_list, &poll->node, hash); | |
b68872d8 | 6053 | |
e32971b8 | 6054 | pmd->need_reload = true; |
ae7ad0a1 IM |
6055 | } |
6056 | ||
e32971b8 | 6057 | /* Delete 'poll' from poll_list of PMD thread. */ |
ae7ad0a1 | 6058 | static void |
e32971b8 DDP |
6059 | dp_netdev_del_rxq_from_pmd(struct dp_netdev_pmd_thread *pmd, |
6060 | struct rxq_poll *poll) | |
d0cca6c3 | 6061 | OVS_REQUIRES(pmd->port_mutex) |
ae7ad0a1 | 6062 | { |
e32971b8 DDP |
6063 | hmap_remove(&pmd->poll_list, &poll->node); |
6064 | free(poll); | |
ae7ad0a1 | 6065 | |
e32971b8 | 6066 | pmd->need_reload = true; |
ae7ad0a1 IM |
6067 | } |
6068 | ||
d0cca6c3 DDP |
6069 | /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the |
6070 | * changes to take effect. */ | |
cc245ce8 | 6071 | static void |
d0cca6c3 DDP |
6072 | dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd, |
6073 | struct dp_netdev_port *port) | |
e32971b8 | 6074 | OVS_REQUIRES(pmd->port_mutex) |
d0cca6c3 | 6075 | { |
57eebbb4 DDP |
6076 | struct tx_port *tx; |
6077 | ||
e32971b8 DDP |
6078 | tx = tx_port_lookup(&pmd->tx_ports, port->port_no); |
6079 | if (tx) { | |
6080 | /* 'port' is already on this thread tx cache. Do nothing. */ | |
6081 | return; | |
6082 | } | |
6083 | ||
57eebbb4 | 6084 | tx = xzalloc(sizeof *tx); |
d0cca6c3 | 6085 | |
324c8374 IM |
6086 | tx->port = port; |
6087 | tx->qid = -1; | |
c71ea3c4 | 6088 | tx->flush_time = 0LL; |
009e0033 | 6089 | dp_packet_batch_init(&tx->output_pkts); |
d0cca6c3 | 6090 | |
324c8374 | 6091 | hmap_insert(&pmd->tx_ports, &tx->node, hash_port_no(tx->port->port_no)); |
e32971b8 | 6092 | pmd->need_reload = true; |
d0cca6c3 DDP |
6093 | } |
6094 | ||
e32971b8 DDP |
6095 | /* Del 'tx' from the tx port cache of 'pmd', which must be reloaded for the |
6096 | * changes to take effect. */ | |
b9584f21 | 6097 | static void |
e32971b8 DDP |
6098 | dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd, |
6099 | struct tx_port *tx) | |
6100 | OVS_REQUIRES(pmd->port_mutex) | |
b9584f21 | 6101 | { |
e32971b8 DDP |
6102 | hmap_remove(&pmd->tx_ports, &tx->node); |
6103 | free(tx); | |
6104 | pmd->need_reload = true; | |
6c3eee82 BP |
6105 | } |
6106 | \f | |
b5cbbcf6 AZ |
6107 | static char * |
6108 | dpif_netdev_get_datapath_version(void) | |
6109 | { | |
6110 | return xstrdup("<built-in>"); | |
6111 | } | |
6112 | ||
72865317 | 6113 | static void |
1c1e46ed | 6114 | dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size, |
11bfdadd | 6115 | uint16_t tcp_flags, long long now) |
72865317 | 6116 | { |
eb94da30 | 6117 | uint16_t flags; |
72865317 | 6118 | |
eb94da30 DDP |
6119 | atomic_store_relaxed(&netdev_flow->stats.used, now); |
6120 | non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt); | |
6121 | non_atomic_ullong_add(&netdev_flow->stats.byte_count, size); | |
6122 | atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); | |
6123 | flags |= tcp_flags; | |
6124 | atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags); | |
51852a57 BP |
6125 | } |
6126 | ||
623540e4 | 6127 | static int |
e14deea0 | 6128 | dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet_, |
7af12bd7 | 6129 | struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid, |
623540e4 EJ |
6130 | enum dpif_upcall_type type, const struct nlattr *userdata, |
6131 | struct ofpbuf *actions, struct ofpbuf *put_actions) | |
6132 | { | |
1c1e46ed | 6133 | struct dp_netdev *dp = pmd->dp; |
623540e4 | 6134 | |
623540e4 EJ |
6135 | if (OVS_UNLIKELY(!dp->upcall_cb)) { |
6136 | return ENODEV; | |
6137 | } | |
6138 | ||
6139 | if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) { | |
6140 | struct ds ds = DS_EMPTY_INITIALIZER; | |
623540e4 | 6141 | char *packet_str; |
cf62fa4c | 6142 | struct ofpbuf key; |
5262eea1 JG |
6143 | struct odp_flow_key_parms odp_parms = { |
6144 | .flow = flow, | |
1dea1435 | 6145 | .mask = wc ? &wc->masks : NULL, |
2494ccd7 | 6146 | .support = dp_netdev_support, |
5262eea1 | 6147 | }; |
623540e4 EJ |
6148 | |
6149 | ofpbuf_init(&key, 0); | |
5262eea1 | 6150 | odp_flow_key_from_flow(&odp_parms, &key); |
2482b0b0 | 6151 | packet_str = ofp_dp_packet_to_string(packet_); |
623540e4 | 6152 | |
6fd6ed71 | 6153 | odp_flow_key_format(key.data, key.size, &ds); |
623540e4 EJ |
6154 | |
6155 | VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name, | |
6156 | dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str); | |
6157 | ||
6158 | ofpbuf_uninit(&key); | |
6159 | free(packet_str); | |
6fd6ed71 | 6160 | |
623540e4 EJ |
6161 | ds_destroy(&ds); |
6162 | } | |
6163 | ||
8d8ab6c2 JG |
6164 | return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata, |
6165 | actions, wc, put_actions, dp->upcall_aux); | |
623540e4 EJ |
6166 | } |
6167 | ||
bde94613 FA |
6168 | static inline uint32_t |
6169 | dpif_netdev_packet_get_rss_hash_orig_pkt(struct dp_packet *packet, | |
6170 | const struct miniflow *mf) | |
6171 | { | |
6172 | uint32_t hash; | |
6173 | ||
6174 | if (OVS_LIKELY(dp_packet_rss_valid(packet))) { | |
6175 | hash = dp_packet_get_rss_hash(packet); | |
6176 | } else { | |
6177 | hash = miniflow_hash_5tuple(mf, 0); | |
6178 | dp_packet_set_rss_hash(packet, hash); | |
6179 | } | |
6180 | ||
6181 | return hash; | |
6182 | } | |
6183 | ||
9bbf1c3d | 6184 | static inline uint32_t |
048963aa DDP |
6185 | dpif_netdev_packet_get_rss_hash(struct dp_packet *packet, |
6186 | const struct miniflow *mf) | |
9bbf1c3d | 6187 | { |
048963aa | 6188 | uint32_t hash, recirc_depth; |
9bbf1c3d | 6189 | |
f2f44f5d DDP |
6190 | if (OVS_LIKELY(dp_packet_rss_valid(packet))) { |
6191 | hash = dp_packet_get_rss_hash(packet); | |
6192 | } else { | |
9bbf1c3d | 6193 | hash = miniflow_hash_5tuple(mf, 0); |
2bc1bbd2 | 6194 | dp_packet_set_rss_hash(packet, hash); |
9bbf1c3d | 6195 | } |
048963aa DDP |
6196 | |
6197 | /* The RSS hash must account for the recirculation depth to avoid | |
6198 | * collisions in the exact match cache */ | |
6199 | recirc_depth = *recirc_depth_get_unsafe(); | |
6200 | if (OVS_UNLIKELY(recirc_depth)) { | |
6201 | hash = hash_finish(hash, recirc_depth); | |
6202 | dp_packet_set_rss_hash(packet, hash); | |
6203 | } | |
9bbf1c3d DDP |
6204 | return hash; |
6205 | } | |
6206 | ||
f7ce4811 | 6207 | struct packet_batch_per_flow { |
8cbf4f47 DDP |
6208 | unsigned int byte_count; |
6209 | uint16_t tcp_flags; | |
8cbf4f47 DDP |
6210 | struct dp_netdev_flow *flow; |
6211 | ||
1895cc8d | 6212 | struct dp_packet_batch array; |
8cbf4f47 DDP |
6213 | }; |
6214 | ||
6215 | static inline void | |
f7ce4811 PS |
6216 | packet_batch_per_flow_update(struct packet_batch_per_flow *batch, |
6217 | struct dp_packet *packet, | |
aab96ec4 | 6218 | uint16_t tcp_flags) |
8cbf4f47 | 6219 | { |
cf62fa4c | 6220 | batch->byte_count += dp_packet_size(packet); |
aab96ec4 | 6221 | batch->tcp_flags |= tcp_flags; |
1895cc8d | 6222 | batch->array.packets[batch->array.count++] = packet; |
8cbf4f47 DDP |
6223 | } |
6224 | ||
6225 | static inline void | |
f7ce4811 PS |
6226 | packet_batch_per_flow_init(struct packet_batch_per_flow *batch, |
6227 | struct dp_netdev_flow *flow) | |
8cbf4f47 | 6228 | { |
11e5cf1f | 6229 | flow->batch = batch; |
8cbf4f47 | 6230 | |
11e5cf1f | 6231 | batch->flow = flow; |
1895cc8d | 6232 | dp_packet_batch_init(&batch->array); |
8cbf4f47 DDP |
6233 | batch->byte_count = 0; |
6234 | batch->tcp_flags = 0; | |
8cbf4f47 DDP |
6235 | } |
6236 | ||
6237 | static inline void | |
f7ce4811 | 6238 | packet_batch_per_flow_execute(struct packet_batch_per_flow *batch, |
b010be17 | 6239 | struct dp_netdev_pmd_thread *pmd) |
8cbf4f47 DDP |
6240 | { |
6241 | struct dp_netdev_actions *actions; | |
6242 | struct dp_netdev_flow *flow = batch->flow; | |
6243 | ||
1895cc8d | 6244 | dp_netdev_flow_used(flow, batch->array.count, batch->byte_count, |
05f9e707 | 6245 | batch->tcp_flags, pmd->ctx.now / 1000); |
8cbf4f47 DDP |
6246 | |
6247 | actions = dp_netdev_flow_get_actions(flow); | |
6248 | ||
66e4ad8a | 6249 | dp_netdev_execute_actions(pmd, &batch->array, true, &flow->flow, |
b010be17 | 6250 | actions->actions, actions->size); |
8cbf4f47 DDP |
6251 | } |
6252 | ||
8aaa125d | 6253 | static inline void |
e14deea0 | 6254 | dp_netdev_queue_batches(struct dp_packet *pkt, |
aab96ec4 | 6255 | struct dp_netdev_flow *flow, uint16_t tcp_flags, |
47a45d86 KT |
6256 | struct packet_batch_per_flow *batches, |
6257 | size_t *n_batches) | |
9bbf1c3d | 6258 | { |
f7ce4811 | 6259 | struct packet_batch_per_flow *batch = flow->batch; |
11e5cf1f | 6260 | |
f9fe365b AZ |
6261 | if (OVS_UNLIKELY(!batch)) { |
6262 | batch = &batches[(*n_batches)++]; | |
f7ce4811 | 6263 | packet_batch_per_flow_init(batch, flow); |
9bbf1c3d DDP |
6264 | } |
6265 | ||
aab96ec4 | 6266 | packet_batch_per_flow_update(batch, pkt, tcp_flags); |
9bbf1c3d DDP |
6267 | } |
6268 | ||
9b4f08cd VDA |
6269 | static inline void |
6270 | packet_enqueue_to_flow_map(struct dp_packet *packet, | |
6271 | struct dp_netdev_flow *flow, | |
6272 | uint16_t tcp_flags, | |
6273 | struct dp_packet_flow_map *flow_map, | |
6274 | size_t index) | |
6275 | { | |
6276 | struct dp_packet_flow_map *map = &flow_map[index]; | |
6277 | map->flow = flow; | |
6278 | map->packet = packet; | |
6279 | map->tcp_flags = tcp_flags; | |
6280 | } | |
6281 | ||
60d8ccae YW |
6282 | /* SMC lookup function for a batch of packets. |
6283 | * By doing batching SMC lookup, we can use prefetch | |
6284 | * to hide memory access latency. | |
6285 | */ | |
6286 | static inline void | |
6287 | smc_lookup_batch(struct dp_netdev_pmd_thread *pmd, | |
6288 | struct netdev_flow_key *keys, | |
6289 | struct netdev_flow_key **missed_keys, | |
6290 | struct dp_packet_batch *packets_, | |
9b4f08cd VDA |
6291 | const int cnt, |
6292 | struct dp_packet_flow_map *flow_map, | |
6293 | uint8_t *index_map) | |
60d8ccae YW |
6294 | { |
6295 | int i; | |
6296 | struct dp_packet *packet; | |
6297 | size_t n_smc_hit = 0, n_missed = 0; | |
6298 | struct dfc_cache *cache = &pmd->flow_cache; | |
6299 | struct smc_cache *smc_cache = &cache->smc_cache; | |
6300 | const struct cmap_node *flow_node; | |
9b4f08cd VDA |
6301 | int recv_idx; |
6302 | uint16_t tcp_flags; | |
60d8ccae YW |
6303 | |
6304 | /* Prefetch buckets for all packets */ | |
6305 | for (i = 0; i < cnt; i++) { | |
6306 | OVS_PREFETCH(&smc_cache->buckets[keys[i].hash & SMC_MASK]); | |
6307 | } | |
6308 | ||
6309 | DP_PACKET_BATCH_REFILL_FOR_EACH (i, cnt, packet, packets_) { | |
6310 | struct dp_netdev_flow *flow = NULL; | |
6311 | flow_node = smc_entry_get(pmd, keys[i].hash); | |
6312 | bool hit = false; | |
9b4f08cd VDA |
6313 | /* Get the original order of this packet in received batch. */ |
6314 | recv_idx = index_map[i]; | |
60d8ccae YW |
6315 | |
6316 | if (OVS_LIKELY(flow_node != NULL)) { | |
6317 | CMAP_NODE_FOR_EACH (flow, node, flow_node) { | |
6318 | /* Since we dont have per-port megaflow to check the port | |
6319 | * number, we need to verify that the input ports match. */ | |
6320 | if (OVS_LIKELY(dpcls_rule_matches_key(&flow->cr, &keys[i]) && | |
6321 | flow->flow.in_port.odp_port == packet->md.in_port.odp_port)) { | |
9b4f08cd VDA |
6322 | tcp_flags = miniflow_get_tcp_flags(&keys[i].mf); |
6323 | ||
60d8ccae | 6324 | /* SMC hit and emc miss, we insert into EMC */ |
60d8ccae YW |
6325 | keys[i].len = |
6326 | netdev_flow_key_size(miniflow_n_values(&keys[i].mf)); | |
18e08953 | 6327 | emc_probabilistic_insert(pmd, &keys[i], flow); |
9b4f08cd VDA |
6328 | /* Add these packets into the flow map in the same order |
6329 | * as received. | |
6330 | */ | |
6331 | packet_enqueue_to_flow_map(packet, flow, tcp_flags, | |
6332 | flow_map, recv_idx); | |
60d8ccae YW |
6333 | n_smc_hit++; |
6334 | hit = true; | |
6335 | break; | |
6336 | } | |
6337 | } | |
6338 | if (hit) { | |
6339 | continue; | |
6340 | } | |
6341 | } | |
6342 | ||
6343 | /* SMC missed. Group missed packets together at | |
6344 | * the beginning of the 'packets' array. */ | |
6345 | dp_packet_batch_refill(packets_, packet, i); | |
9b4f08cd VDA |
6346 | |
6347 | /* Preserve the order of packet for flow batching. */ | |
6348 | index_map[n_missed] = recv_idx; | |
6349 | ||
60d8ccae YW |
6350 | /* Put missed keys to the pointer arrays return to the caller */ |
6351 | missed_keys[n_missed++] = &keys[i]; | |
6352 | } | |
6353 | ||
6354 | pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, n_smc_hit); | |
6355 | } | |
6356 | ||
6357 | /* Try to process all ('cnt') the 'packets' using only the datapath flow cache | |
a90ed026 | 6358 | * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the |
8aaa125d | 6359 | * miniflow is copied into 'keys' and the packet pointer is moved at the |
60d8ccae YW |
6360 | * beginning of the 'packets' array. The pointers of missed keys are put in the |
6361 | * missed_keys pointer array for future processing. | |
9bbf1c3d DDP |
6362 | * |
6363 | * The function returns the number of packets that needs to be processed in the | |
6364 | * 'packets' array (they have been moved to the beginning of the vector). | |
a90ed026 | 6365 | * |
02305520 FA |
6366 | * For performance reasons a caller may choose not to initialize the metadata |
6367 | * in 'packets_'. If 'md_is_valid' is false, the metadata in 'packets' | |
6368 | * is not valid and must be initialized by this function using 'port_no'. | |
6369 | * If 'md_is_valid' is true, the metadata is already valid and 'port_no' | |
6370 | * will be ignored. | |
9bbf1c3d DDP |
6371 | */ |
6372 | static inline size_t | |
60d8ccae | 6373 | dfc_processing(struct dp_netdev_pmd_thread *pmd, |
72c84bc2 | 6374 | struct dp_packet_batch *packets_, |
1895cc8d | 6375 | struct netdev_flow_key *keys, |
60d8ccae | 6376 | struct netdev_flow_key **missed_keys, |
f7ce4811 | 6377 | struct packet_batch_per_flow batches[], size_t *n_batches, |
9b4f08cd VDA |
6378 | struct dp_packet_flow_map *flow_map, |
6379 | size_t *n_flows, uint8_t *index_map, | |
a90ed026 | 6380 | bool md_is_valid, odp_port_t port_no) |
72865317 | 6381 | { |
b89c678b | 6382 | struct netdev_flow_key *key = &keys[0]; |
60d8ccae YW |
6383 | size_t n_missed = 0, n_emc_hit = 0; |
6384 | struct dfc_cache *cache = &pmd->flow_cache; | |
72c84bc2 | 6385 | struct dp_packet *packet; |
45df9fef | 6386 | const size_t cnt = dp_packet_batch_size(packets_); |
2fbadeb6 | 6387 | uint32_t cur_min = pmd->ctx.emc_insert_min; |
72c84bc2 | 6388 | int i; |
aab96ec4 | 6389 | uint16_t tcp_flags; |
60d8ccae | 6390 | bool smc_enable_db; |
9b4f08cd VDA |
6391 | size_t map_cnt = 0; |
6392 | bool batch_enable = true; | |
8cbf4f47 | 6393 | |
60d8ccae | 6394 | atomic_read_relaxed(&pmd->dp->smc_enable_db, &smc_enable_db); |
82a48ead JS |
6395 | pmd_perf_update_counter(&pmd->perf_stats, |
6396 | md_is_valid ? PMD_STAT_RECIRC : PMD_STAT_RECV, | |
6397 | cnt); | |
f79b1ddb | 6398 | |
45df9fef | 6399 | DP_PACKET_BATCH_REFILL_FOR_EACH (i, cnt, packet, packets_) { |
9bbf1c3d | 6400 | struct dp_netdev_flow *flow; |
aab96ec4 | 6401 | uint32_t mark; |
9bbf1c3d | 6402 | |
5a2fed48 AZ |
6403 | if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) { |
6404 | dp_packet_delete(packet); | |
84d6d5eb EJ |
6405 | continue; |
6406 | } | |
8cbf4f47 | 6407 | |
45df9fef | 6408 | if (i != cnt - 1) { |
72c84bc2 | 6409 | struct dp_packet **packets = packets_->packets; |
a90ed026 | 6410 | /* Prefetch next packet data and metadata. */ |
72a5e2b8 | 6411 | OVS_PREFETCH(dp_packet_data(packets[i+1])); |
a90ed026 | 6412 | pkt_metadata_prefetch_init(&packets[i+1]->md); |
72a5e2b8 DDP |
6413 | } |
6414 | ||
a90ed026 DDP |
6415 | if (!md_is_valid) { |
6416 | pkt_metadata_init(&packet->md, port_no); | |
6417 | } | |
aab96ec4 YL |
6418 | |
6419 | if ((*recirc_depth_get() == 0) && | |
6420 | dp_packet_has_flow_mark(packet, &mark)) { | |
6421 | flow = mark_to_flow_find(pmd, mark); | |
9b4f08cd | 6422 | if (OVS_LIKELY(flow)) { |
aab96ec4 | 6423 | tcp_flags = parse_tcp_flags(packet); |
9b4f08cd VDA |
6424 | if (OVS_LIKELY(batch_enable)) { |
6425 | dp_netdev_queue_batches(packet, flow, tcp_flags, batches, | |
6426 | n_batches); | |
6427 | } else { | |
6428 | /* Flow batching should be performed only after fast-path | |
6429 | * processing is also completed for packets with emc miss | |
6430 | * or else it will result in reordering of packets with | |
6431 | * same datapath flows. */ | |
6432 | packet_enqueue_to_flow_map(packet, flow, tcp_flags, | |
6433 | flow_map, map_cnt++); | |
6434 | } | |
aab96ec4 YL |
6435 | continue; |
6436 | } | |
6437 | } | |
6438 | ||
5a2fed48 | 6439 | miniflow_extract(packet, &key->mf); |
d262ac2c | 6440 | key->len = 0; /* Not computed yet. */ |
60d8ccae YW |
6441 | /* If EMC and SMC disabled skip hash computation */ |
6442 | if (smc_enable_db == true || cur_min != 0) { | |
bde94613 FA |
6443 | if (!md_is_valid) { |
6444 | key->hash = dpif_netdev_packet_get_rss_hash_orig_pkt(packet, | |
6445 | &key->mf); | |
6446 | } else { | |
6447 | key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf); | |
6448 | } | |
60d8ccae YW |
6449 | } |
6450 | if (cur_min) { | |
6451 | flow = emc_lookup(&cache->emc_cache, key); | |
bde94613 FA |
6452 | } else { |
6453 | flow = NULL; | |
6454 | } | |
8aaa125d | 6455 | if (OVS_LIKELY(flow)) { |
aab96ec4 | 6456 | tcp_flags = miniflow_get_tcp_flags(&key->mf); |
60d8ccae | 6457 | n_emc_hit++; |
9b4f08cd VDA |
6458 | if (OVS_LIKELY(batch_enable)) { |
6459 | dp_netdev_queue_batches(packet, flow, tcp_flags, batches, | |
6460 | n_batches); | |
6461 | } else { | |
6462 | /* Flow batching should be performed only after fast-path | |
6463 | * processing is also completed for packets with emc miss | |
6464 | * or else it will result in reordering of packets with | |
6465 | * same datapath flows. */ | |
6466 | packet_enqueue_to_flow_map(packet, flow, tcp_flags, | |
6467 | flow_map, map_cnt++); | |
6468 | } | |
8aaa125d | 6469 | } else { |
d1aa0b94 | 6470 | /* Exact match cache missed. Group missed packets together at |
72c84bc2 AZ |
6471 | * the beginning of the 'packets' array. */ |
6472 | dp_packet_batch_refill(packets_, packet, i); | |
9b4f08cd VDA |
6473 | |
6474 | /* Preserve the order of packet for flow batching. */ | |
6475 | index_map[n_missed] = map_cnt; | |
6476 | flow_map[map_cnt++].flow = NULL; | |
6477 | ||
400486f7 | 6478 | /* 'key[n_missed]' contains the key of the current packet and it |
60d8ccae YW |
6479 | * will be passed to SMC lookup. The next key should be extracted |
6480 | * to 'keys[n_missed + 1]'. | |
6481 | * We also maintain a pointer array to keys missed both SMC and EMC | |
6482 | * which will be returned to the caller for future processing. */ | |
6483 | missed_keys[n_missed] = key; | |
400486f7 | 6484 | key = &keys[++n_missed]; |
9b4f08cd VDA |
6485 | |
6486 | /* Skip batching for subsequent packets to avoid reordering. */ | |
6487 | batch_enable = false; | |
9bbf1c3d DDP |
6488 | } |
6489 | } | |
9b4f08cd VDA |
6490 | /* Count of packets which are not flow batched. */ |
6491 | *n_flows = map_cnt; | |
9bbf1c3d | 6492 | |
60d8ccae YW |
6493 | pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_EXACT_HIT, n_emc_hit); |
6494 | ||
6495 | if (!smc_enable_db) { | |
6496 | return dp_packet_batch_size(packets_); | |
6497 | } | |
6498 | ||
6499 | /* Packets miss EMC will do a batch lookup in SMC if enabled */ | |
9b4f08cd VDA |
6500 | smc_lookup_batch(pmd, keys, missed_keys, packets_, |
6501 | n_missed, flow_map, index_map); | |
4f150744 | 6502 | |
72c84bc2 | 6503 | return dp_packet_batch_size(packets_); |
9bbf1c3d DDP |
6504 | } |
6505 | ||
82a48ead | 6506 | static inline int |
47a45d86 KT |
6507 | handle_packet_upcall(struct dp_netdev_pmd_thread *pmd, |
6508 | struct dp_packet *packet, | |
a260d966 | 6509 | const struct netdev_flow_key *key, |
82a48ead | 6510 | struct ofpbuf *actions, struct ofpbuf *put_actions) |
a260d966 PS |
6511 | { |
6512 | struct ofpbuf *add_actions; | |
6513 | struct dp_packet_batch b; | |
6514 | struct match match; | |
6515 | ovs_u128 ufid; | |
6516 | int error; | |
79f36875 | 6517 | uint64_t cycles = cycles_counter_update(&pmd->perf_stats); |
a260d966 PS |
6518 | |
6519 | match.tun_md.valid = false; | |
6520 | miniflow_expand(&key->mf, &match.flow); | |
6521 | ||
6522 | ofpbuf_clear(actions); | |
6523 | ofpbuf_clear(put_actions); | |
6524 | ||
6525 | dpif_flow_hash(pmd->dp->dpif, &match.flow, sizeof match.flow, &ufid); | |
6526 | error = dp_netdev_upcall(pmd, packet, &match.flow, &match.wc, | |
6527 | &ufid, DPIF_UC_MISS, NULL, actions, | |
6528 | put_actions); | |
6529 | if (OVS_UNLIKELY(error && error != ENOSPC)) { | |
6530 | dp_packet_delete(packet); | |
82a48ead | 6531 | return error; |
a260d966 PS |
6532 | } |
6533 | ||
6534 | /* The Netlink encoding of datapath flow keys cannot express | |
6535 | * wildcarding the presence of a VLAN tag. Instead, a missing VLAN | |
6536 | * tag is interpreted as exact match on the fact that there is no | |
6537 | * VLAN. Unless we refactor a lot of code that translates between | |
6538 | * Netlink and struct flow representations, we have to do the same | |
35fe9efb | 6539 | * here. This must be in sync with 'match' in dpif_netdev_flow_put(). */ |
f0fb825a EG |
6540 | if (!match.wc.masks.vlans[0].tci) { |
6541 | match.wc.masks.vlans[0].tci = htons(0xffff); | |
a260d966 PS |
6542 | } |
6543 | ||
6544 | /* We can't allow the packet batching in the next loop to execute | |
6545 | * the actions. Otherwise, if there are any slow path actions, | |
6546 | * we'll send the packet up twice. */ | |
72c84bc2 | 6547 | dp_packet_batch_init_packet(&b, packet); |
66e4ad8a | 6548 | dp_netdev_execute_actions(pmd, &b, true, &match.flow, |
b010be17 | 6549 | actions->data, actions->size); |
a260d966 PS |
6550 | |
6551 | add_actions = put_actions->size ? put_actions : actions; | |
6552 | if (OVS_LIKELY(error != ENOSPC)) { | |
6553 | struct dp_netdev_flow *netdev_flow; | |
6554 | ||
6555 | /* XXX: There's a race window where a flow covering this packet | |
6556 | * could have already been installed since we last did the flow | |
6557 | * lookup before upcall. This could be solved by moving the | |
6558 | * mutex lock outside the loop, but that's an awful long time | |
6559 | * to be locking everyone out of making flow installs. If we | |
6560 | * move to a per-core classifier, it would be reasonable. */ | |
6561 | ovs_mutex_lock(&pmd->flow_mutex); | |
3453b4d6 | 6562 | netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL); |
a260d966 PS |
6563 | if (OVS_LIKELY(!netdev_flow)) { |
6564 | netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid, | |
6565 | add_actions->data, | |
6566 | add_actions->size); | |
6567 | } | |
6568 | ovs_mutex_unlock(&pmd->flow_mutex); | |
60d8ccae YW |
6569 | uint32_t hash = dp_netdev_flow_hash(&netdev_flow->ufid); |
6570 | smc_insert(pmd, key, hash); | |
4c30b246 | 6571 | emc_probabilistic_insert(pmd, key, netdev_flow); |
a260d966 | 6572 | } |
79f36875 JS |
6573 | if (pmd_perf_metrics_enabled(pmd)) { |
6574 | /* Update upcall stats. */ | |
6575 | cycles = cycles_counter_update(&pmd->perf_stats) - cycles; | |
6576 | struct pmd_perf_stats *s = &pmd->perf_stats; | |
6577 | s->current.upcalls++; | |
6578 | s->current.upcall_cycles += cycles; | |
6579 | histogram_add_sample(&s->cycles_per_upcall, cycles); | |
6580 | } | |
82a48ead | 6581 | return error; |
a260d966 PS |
6582 | } |
6583 | ||
9bbf1c3d | 6584 | static inline void |
65f13b50 | 6585 | fast_path_processing(struct dp_netdev_pmd_thread *pmd, |
1895cc8d | 6586 | struct dp_packet_batch *packets_, |
60d8ccae | 6587 | struct netdev_flow_key **keys, |
9b4f08cd VDA |
6588 | struct dp_packet_flow_map *flow_map, |
6589 | uint8_t *index_map, | |
b010be17 | 6590 | odp_port_t in_port) |
9bbf1c3d | 6591 | { |
31c82130 | 6592 | const size_t cnt = dp_packet_batch_size(packets_); |
1a0d5831 | 6593 | #if !defined(__CHECKER__) && !defined(_WIN32) |
9bbf1c3d DDP |
6594 | const size_t PKT_ARRAY_SIZE = cnt; |
6595 | #else | |
1a0d5831 | 6596 | /* Sparse or MSVC doesn't like variable length array. */ |
cd159f1a | 6597 | enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; |
9bbf1c3d | 6598 | #endif |
31c82130 | 6599 | struct dp_packet *packet; |
3453b4d6 | 6600 | struct dpcls *cls; |
0de8783a | 6601 | struct dpcls_rule *rules[PKT_ARRAY_SIZE]; |
65f13b50 | 6602 | struct dp_netdev *dp = pmd->dp; |
82a48ead | 6603 | int upcall_ok_cnt = 0, upcall_fail_cnt = 0; |
3453b4d6 | 6604 | int lookup_cnt = 0, add_lookup_cnt; |
9bbf1c3d DDP |
6605 | bool any_miss; |
6606 | ||
e883448e | 6607 | for (size_t i = 0; i < cnt; i++) { |
0de8783a | 6608 | /* Key length is needed in all the cases, hash computed on demand. */ |
60d8ccae | 6609 | keys[i]->len = netdev_flow_key_size(miniflow_n_values(&keys[i]->mf)); |
9bbf1c3d | 6610 | } |
3453b4d6 JS |
6611 | /* Get the classifier for the in_port */ |
6612 | cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); | |
6613 | if (OVS_LIKELY(cls)) { | |
60d8ccae YW |
6614 | any_miss = !dpcls_lookup(cls, (const struct netdev_flow_key **)keys, |
6615 | rules, cnt, &lookup_cnt); | |
3453b4d6 JS |
6616 | } else { |
6617 | any_miss = true; | |
6618 | memset(rules, 0, sizeof(rules)); | |
6619 | } | |
623540e4 EJ |
6620 | if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) { |
6621 | uint64_t actions_stub[512 / 8], slow_stub[512 / 8]; | |
6622 | struct ofpbuf actions, put_actions; | |
623540e4 EJ |
6623 | |
6624 | ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub); | |
6625 | ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub); | |
6626 | ||
e883448e | 6627 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
0de8783a | 6628 | struct dp_netdev_flow *netdev_flow; |
623540e4 | 6629 | |
0de8783a | 6630 | if (OVS_LIKELY(rules[i])) { |
623540e4 EJ |
6631 | continue; |
6632 | } | |
6633 | ||
6634 | /* It's possible that an earlier slow path execution installed | |
0de8783a | 6635 | * a rule covering this flow. In this case, it's a lot cheaper |
623540e4 | 6636 | * to catch it here than execute a miss. */ |
60d8ccae | 6637 | netdev_flow = dp_netdev_pmd_lookup_flow(pmd, keys[i], |
3453b4d6 | 6638 | &add_lookup_cnt); |
623540e4 | 6639 | if (netdev_flow) { |
3453b4d6 | 6640 | lookup_cnt += add_lookup_cnt; |
0de8783a | 6641 | rules[i] = &netdev_flow->cr; |
623540e4 EJ |
6642 | continue; |
6643 | } | |
6644 | ||
60d8ccae | 6645 | int error = handle_packet_upcall(pmd, packet, keys[i], |
82a48ead JS |
6646 | &actions, &put_actions); |
6647 | ||
6648 | if (OVS_UNLIKELY(error)) { | |
6649 | upcall_fail_cnt++; | |
6650 | } else { | |
6651 | upcall_ok_cnt++; | |
6652 | } | |
623540e4 EJ |
6653 | } |
6654 | ||
6655 | ofpbuf_uninit(&actions); | |
6656 | ofpbuf_uninit(&put_actions); | |
6657 | fat_rwlock_unlock(&dp->upcall_rwlock); | |
ac8c2081 | 6658 | } else if (OVS_UNLIKELY(any_miss)) { |
e883448e | 6659 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
0de8783a | 6660 | if (OVS_UNLIKELY(!rules[i])) { |
31c82130 | 6661 | dp_packet_delete(packet); |
82a48ead | 6662 | upcall_fail_cnt++; |
ac8c2081 DDP |
6663 | } |
6664 | } | |
623540e4 | 6665 | } |
84d6d5eb | 6666 | |
e883448e | 6667 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
84d6d5eb | 6668 | struct dp_netdev_flow *flow; |
9b4f08cd VDA |
6669 | /* Get the original order of this packet in received batch. */ |
6670 | int recv_idx = index_map[i]; | |
6671 | uint16_t tcp_flags; | |
8cbf4f47 | 6672 | |
0de8783a | 6673 | if (OVS_UNLIKELY(!rules[i])) { |
84d6d5eb EJ |
6674 | continue; |
6675 | } | |
6676 | ||
84d6d5eb | 6677 | flow = dp_netdev_flow_cast(rules[i]); |
60d8ccae YW |
6678 | uint32_t hash = dp_netdev_flow_hash(&flow->ufid); |
6679 | smc_insert(pmd, keys[i], hash); | |
0de8783a | 6680 | |
60d8ccae | 6681 | emc_probabilistic_insert(pmd, keys[i], flow); |
9b4f08cd VDA |
6682 | /* Add these packets into the flow map in the same order |
6683 | * as received. | |
6684 | */ | |
6685 | tcp_flags = miniflow_get_tcp_flags(&keys[i]->mf); | |
6686 | packet_enqueue_to_flow_map(packet, flow, tcp_flags, | |
6687 | flow_map, recv_idx); | |
8cbf4f47 DDP |
6688 | } |
6689 | ||
82a48ead JS |
6690 | pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MASKED_HIT, |
6691 | cnt - upcall_ok_cnt - upcall_fail_cnt); | |
6692 | pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MASKED_LOOKUP, | |
6693 | lookup_cnt); | |
6694 | pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MISS, | |
6695 | upcall_ok_cnt); | |
6696 | pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_LOST, | |
6697 | upcall_fail_cnt); | |
72865317 BP |
6698 | } |
6699 | ||
a90ed026 DDP |
6700 | /* Packets enter the datapath from a port (or from recirculation) here. |
6701 | * | |
02305520 FA |
6702 | * When 'md_is_valid' is true the metadata in 'packets' are already valid. |
6703 | * When false the metadata in 'packets' need to be initialized. */ | |
adcf00ba | 6704 | static void |
a90ed026 | 6705 | dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, |
1895cc8d | 6706 | struct dp_packet_batch *packets, |
a90ed026 | 6707 | bool md_is_valid, odp_port_t port_no) |
9bbf1c3d | 6708 | { |
1a0d5831 | 6709 | #if !defined(__CHECKER__) && !defined(_WIN32) |
37eabc70 | 6710 | const size_t PKT_ARRAY_SIZE = dp_packet_batch_size(packets); |
9bbf1c3d | 6711 | #else |
1a0d5831 | 6712 | /* Sparse or MSVC doesn't like variable length array. */ |
cd159f1a | 6713 | enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; |
9bbf1c3d | 6714 | #endif |
47a45d86 KT |
6715 | OVS_ALIGNED_VAR(CACHE_LINE_SIZE) |
6716 | struct netdev_flow_key keys[PKT_ARRAY_SIZE]; | |
60d8ccae | 6717 | struct netdev_flow_key *missed_keys[PKT_ARRAY_SIZE]; |
f7ce4811 | 6718 | struct packet_batch_per_flow batches[PKT_ARRAY_SIZE]; |
72c84bc2 | 6719 | size_t n_batches; |
9b4f08cd VDA |
6720 | struct dp_packet_flow_map flow_map[PKT_ARRAY_SIZE]; |
6721 | uint8_t index_map[PKT_ARRAY_SIZE]; | |
6722 | size_t n_flows, i; | |
6723 | ||
3453b4d6 | 6724 | odp_port_t in_port; |
9bbf1c3d | 6725 | |
8aaa125d | 6726 | n_batches = 0; |
60d8ccae | 6727 | dfc_processing(pmd, packets, keys, missed_keys, batches, &n_batches, |
9b4f08cd VDA |
6728 | flow_map, &n_flows, index_map, md_is_valid, port_no); |
6729 | ||
72c84bc2 | 6730 | if (!dp_packet_batch_is_empty(packets)) { |
3453b4d6 JS |
6731 | /* Get ingress port from first packet's metadata. */ |
6732 | in_port = packets->packets[0]->md.in_port.odp_port; | |
60d8ccae | 6733 | fast_path_processing(pmd, packets, missed_keys, |
9b4f08cd | 6734 | flow_map, index_map, in_port); |
8aaa125d DDP |
6735 | } |
6736 | ||
9b4f08cd VDA |
6737 | /* Batch rest of packets which are in flow map. */ |
6738 | for (i = 0; i < n_flows; i++) { | |
6739 | struct dp_packet_flow_map *map = &flow_map[i]; | |
6740 | ||
6741 | if (OVS_UNLIKELY(!map->flow)) { | |
6742 | continue; | |
6743 | } | |
6744 | dp_netdev_queue_batches(map->packet, map->flow, map->tcp_flags, | |
6745 | batches, &n_batches); | |
6746 | } | |
6747 | ||
ad9f0581 BB |
6748 | /* All the flow batches need to be reset before any call to |
6749 | * packet_batch_per_flow_execute() as it could potentially trigger | |
6750 | * recirculation. When a packet matching flow ‘j’ happens to be | |
6751 | * recirculated, the nested call to dp_netdev_input__() could potentially | |
6752 | * classify the packet as matching another flow - say 'k'. It could happen | |
6753 | * that in the previous call to dp_netdev_input__() that same flow 'k' had | |
6754 | * already its own batches[k] still waiting to be served. So if its | |
6755 | * ‘batch’ member is not reset, the recirculated packet would be wrongly | |
6756 | * appended to batches[k] of the 1st call to dp_netdev_input__(). */ | |
603f2ce0 EJ |
6757 | for (i = 0; i < n_batches; i++) { |
6758 | batches[i].flow->batch = NULL; | |
6759 | } | |
6760 | ||
8aaa125d | 6761 | for (i = 0; i < n_batches; i++) { |
b010be17 | 6762 | packet_batch_per_flow_execute(&batches[i], pmd); |
9bbf1c3d DDP |
6763 | } |
6764 | } | |
6765 | ||
a90ed026 DDP |
6766 | static void |
6767 | dp_netdev_input(struct dp_netdev_pmd_thread *pmd, | |
1895cc8d | 6768 | struct dp_packet_batch *packets, |
a90ed026 DDP |
6769 | odp_port_t port_no) |
6770 | { | |
3453b4d6 | 6771 | dp_netdev_input__(pmd, packets, false, port_no); |
a90ed026 DDP |
6772 | } |
6773 | ||
6774 | static void | |
6775 | dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, | |
1895cc8d | 6776 | struct dp_packet_batch *packets) |
a90ed026 | 6777 | { |
3453b4d6 | 6778 | dp_netdev_input__(pmd, packets, true, 0); |
a90ed026 DDP |
6779 | } |
6780 | ||
9080a111 | 6781 | struct dp_netdev_execute_aux { |
65f13b50 | 6782 | struct dp_netdev_pmd_thread *pmd; |
66e4ad8a | 6783 | const struct flow *flow; |
9080a111 JR |
6784 | }; |
6785 | ||
e4e74c3a AW |
6786 | static void |
6787 | dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, | |
6788 | void *aux) | |
6789 | { | |
6790 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
6791 | dp->dp_purge_aux = aux; | |
6792 | dp->dp_purge_cb = cb; | |
6793 | } | |
6794 | ||
6b31e073 | 6795 | static void |
623540e4 EJ |
6796 | dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, |
6797 | void *aux) | |
6b31e073 RW |
6798 | { |
6799 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
623540e4 | 6800 | dp->upcall_aux = aux; |
6b31e073 RW |
6801 | dp->upcall_cb = cb; |
6802 | } | |
6803 | ||
324c8374 IM |
6804 | static void |
6805 | dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd, | |
b010be17 | 6806 | bool purge) |
324c8374 IM |
6807 | { |
6808 | struct tx_port *tx; | |
6809 | struct dp_netdev_port *port; | |
6810 | long long interval; | |
6811 | ||
57eebbb4 | 6812 | HMAP_FOR_EACH (tx, node, &pmd->send_port_cache) { |
9f7a3035 | 6813 | if (!tx->port->dynamic_txqs) { |
324c8374 IM |
6814 | continue; |
6815 | } | |
b010be17 | 6816 | interval = pmd->ctx.now - tx->last_used; |
05f9e707 | 6817 | if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT)) { |
324c8374 IM |
6818 | port = tx->port; |
6819 | ovs_mutex_lock(&port->txq_used_mutex); | |
6820 | port->txq_used[tx->qid]--; | |
6821 | ovs_mutex_unlock(&port->txq_used_mutex); | |
6822 | tx->qid = -1; | |
6823 | } | |
6824 | } | |
6825 | } | |
6826 | ||
6827 | static int | |
6828 | dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd, | |
b010be17 | 6829 | struct tx_port *tx) |
324c8374 IM |
6830 | { |
6831 | struct dp_netdev_port *port; | |
6832 | long long interval; | |
6833 | int i, min_cnt, min_qid; | |
6834 | ||
b010be17 IM |
6835 | interval = pmd->ctx.now - tx->last_used; |
6836 | tx->last_used = pmd->ctx.now; | |
324c8374 | 6837 | |
05f9e707 | 6838 | if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT)) { |
324c8374 IM |
6839 | return tx->qid; |
6840 | } | |
6841 | ||
6842 | port = tx->port; | |
6843 | ||
6844 | ovs_mutex_lock(&port->txq_used_mutex); | |
6845 | if (tx->qid >= 0) { | |
6846 | port->txq_used[tx->qid]--; | |
6847 | tx->qid = -1; | |
6848 | } | |
6849 | ||
6850 | min_cnt = -1; | |
6851 | min_qid = 0; | |
6852 | for (i = 0; i < netdev_n_txq(port->netdev); i++) { | |
6853 | if (port->txq_used[i] < min_cnt || min_cnt == -1) { | |
6854 | min_cnt = port->txq_used[i]; | |
6855 | min_qid = i; | |
6856 | } | |
6857 | } | |
6858 | ||
6859 | port->txq_used[min_qid]++; | |
6860 | tx->qid = min_qid; | |
6861 | ||
6862 | ovs_mutex_unlock(&port->txq_used_mutex); | |
6863 | ||
b010be17 | 6864 | dpif_netdev_xps_revalidate_pmd(pmd, false); |
324c8374 IM |
6865 | |
6866 | VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.", | |
6867 | pmd->core_id, tx->qid, netdev_get_name(tx->port->netdev)); | |
6868 | return min_qid; | |
6869 | } | |
6870 | ||
d0cca6c3 | 6871 | static struct tx_port * |
57eebbb4 DDP |
6872 | pmd_tnl_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd, |
6873 | odp_port_t port_no) | |
6874 | { | |
6875 | return tx_port_lookup(&pmd->tnl_port_cache, port_no); | |
6876 | } | |
6877 | ||
6878 | static struct tx_port * | |
6879 | pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd, | |
6880 | odp_port_t port_no) | |
d0cca6c3 | 6881 | { |
57eebbb4 | 6882 | return tx_port_lookup(&pmd->send_port_cache, port_no); |
d0cca6c3 DDP |
6883 | } |
6884 | ||
a36de779 | 6885 | static int |
d0cca6c3 | 6886 | push_tnl_action(const struct dp_netdev_pmd_thread *pmd, |
1895cc8d PS |
6887 | const struct nlattr *attr, |
6888 | struct dp_packet_batch *batch) | |
a36de779 | 6889 | { |
d0cca6c3 | 6890 | struct tx_port *tun_port; |
a36de779 | 6891 | const struct ovs_action_push_tnl *data; |
4c742796 | 6892 | int err; |
a36de779 PS |
6893 | |
6894 | data = nl_attr_get(attr); | |
6895 | ||
81765c00 | 6896 | tun_port = pmd_tnl_port_cache_lookup(pmd, data->tnl_port); |
a36de779 | 6897 | if (!tun_port) { |
4c742796 PS |
6898 | err = -EINVAL; |
6899 | goto error; | |
a36de779 | 6900 | } |
324c8374 | 6901 | err = netdev_push_header(tun_port->port->netdev, batch, data); |
4c742796 PS |
6902 | if (!err) { |
6903 | return 0; | |
6904 | } | |
6905 | error: | |
6906 | dp_packet_delete_batch(batch, true); | |
6907 | return err; | |
a36de779 PS |
6908 | } |
6909 | ||
66525ef3 PS |
6910 | static void |
6911 | dp_execute_userspace_action(struct dp_netdev_pmd_thread *pmd, | |
7d7ded7a | 6912 | struct dp_packet *packet, bool should_steal, |
66525ef3 PS |
6913 | struct flow *flow, ovs_u128 *ufid, |
6914 | struct ofpbuf *actions, | |
b010be17 | 6915 | const struct nlattr *userdata) |
66525ef3 PS |
6916 | { |
6917 | struct dp_packet_batch b; | |
6918 | int error; | |
6919 | ||
6920 | ofpbuf_clear(actions); | |
6921 | ||
6922 | error = dp_netdev_upcall(pmd, packet, flow, NULL, ufid, | |
6923 | DPIF_UC_ACTION, userdata, actions, | |
6924 | NULL); | |
6925 | if (!error || error == ENOSPC) { | |
72c84bc2 | 6926 | dp_packet_batch_init_packet(&b, packet); |
7d7ded7a | 6927 | dp_netdev_execute_actions(pmd, &b, should_steal, flow, |
b010be17 | 6928 | actions->data, actions->size); |
7d7ded7a | 6929 | } else if (should_steal) { |
66525ef3 PS |
6930 | dp_packet_delete(packet); |
6931 | } | |
6932 | } | |
6933 | ||
a36de779 | 6934 | static void |
1895cc8d | 6935 | dp_execute_cb(void *aux_, struct dp_packet_batch *packets_, |
7d7ded7a | 6936 | const struct nlattr *a, bool should_steal) |
4b27db64 | 6937 | OVS_NO_THREAD_SAFETY_ANALYSIS |
9080a111 JR |
6938 | { |
6939 | struct dp_netdev_execute_aux *aux = aux_; | |
623540e4 | 6940 | uint32_t *depth = recirc_depth_get(); |
28e2fa02 DDP |
6941 | struct dp_netdev_pmd_thread *pmd = aux->pmd; |
6942 | struct dp_netdev *dp = pmd->dp; | |
09f9da0b | 6943 | int type = nl_attr_type(a); |
d0cca6c3 | 6944 | struct tx_port *p; |
9080a111 | 6945 | |
09f9da0b JR |
6946 | switch ((enum ovs_action_attr)type) { |
6947 | case OVS_ACTION_ATTR_OUTPUT: | |
57eebbb4 | 6948 | p = pmd_send_port_cache_lookup(pmd, nl_attr_get_odp_port(a)); |
26a5075b | 6949 | if (OVS_LIKELY(p)) { |
009e0033 IM |
6950 | struct dp_packet *packet; |
6951 | struct dp_packet_batch out; | |
347ba9bb | 6952 | |
7d7ded7a | 6953 | if (!should_steal) { |
009e0033 IM |
6954 | dp_packet_batch_clone(&out, packets_); |
6955 | dp_packet_batch_reset_cutlen(packets_); | |
6956 | packets_ = &out; | |
324c8374 | 6957 | } |
009e0033 | 6958 | dp_packet_batch_apply_cutlen(packets_); |
347ba9bb | 6959 | |
009e0033 IM |
6960 | #ifdef DPDK_NETDEV |
6961 | if (OVS_UNLIKELY(!dp_packet_batch_is_empty(&p->output_pkts) | |
6962 | && packets_->packets[0]->source | |
6963 | != p->output_pkts.packets[0]->source)) { | |
6964 | /* XXX: netdev-dpdk assumes that all packets in a single | |
6965 | * output batch has the same source. Flush here to | |
6966 | * avoid memory access issues. */ | |
6967 | dp_netdev_pmd_flush_output_on_port(pmd, p); | |
6968 | } | |
6969 | #endif | |
c71ea3c4 IM |
6970 | if (dp_packet_batch_size(&p->output_pkts) |
6971 | + dp_packet_batch_size(packets_) > NETDEV_MAX_BURST) { | |
6972 | /* Flush here to avoid overflow. */ | |
009e0033 IM |
6973 | dp_netdev_pmd_flush_output_on_port(pmd, p); |
6974 | } | |
c71ea3c4 IM |
6975 | |
6976 | if (dp_packet_batch_is_empty(&p->output_pkts)) { | |
6977 | pmd->n_output_batches++; | |
6978 | } | |
6979 | ||
e883448e | 6980 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
58ed6df0 IM |
6981 | p->output_pkts_rxqs[dp_packet_batch_size(&p->output_pkts)] = |
6982 | pmd->ctx.last_rxq; | |
009e0033 IM |
6983 | dp_packet_batch_add(&p->output_pkts, packet); |
6984 | } | |
ac8c2081 | 6985 | return; |
8a4e3a85 | 6986 | } |
09f9da0b JR |
6987 | break; |
6988 | ||
a36de779 | 6989 | case OVS_ACTION_ATTR_TUNNEL_PUSH: |
47e1b3b6 IM |
6990 | if (should_steal) { |
6991 | /* We're requested to push tunnel header, but also we need to take | |
6992 | * the ownership of these packets. Thus, we can avoid performing | |
6993 | * the action, because the caller will not use the result anyway. | |
6994 | * Just break to free the batch. */ | |
6995 | break; | |
a36de779 | 6996 | } |
47e1b3b6 IM |
6997 | dp_packet_batch_apply_cutlen(packets_); |
6998 | push_tnl_action(pmd, a, packets_); | |
6999 | return; | |
a36de779 PS |
7000 | |
7001 | case OVS_ACTION_ATTR_TUNNEL_POP: | |
7002 | if (*depth < MAX_RECIRC_DEPTH) { | |
aaca4fe0 | 7003 | struct dp_packet_batch *orig_packets_ = packets_; |
8611f9a4 | 7004 | odp_port_t portno = nl_attr_get_odp_port(a); |
a36de779 | 7005 | |
57eebbb4 | 7006 | p = pmd_tnl_port_cache_lookup(pmd, portno); |
a36de779 | 7007 | if (p) { |
1895cc8d | 7008 | struct dp_packet_batch tnl_pkt; |
a36de779 | 7009 | |
7d7ded7a | 7010 | if (!should_steal) { |
aaca4fe0 WT |
7011 | dp_packet_batch_clone(&tnl_pkt, packets_); |
7012 | packets_ = &tnl_pkt; | |
7013 | dp_packet_batch_reset_cutlen(orig_packets_); | |
a36de779 PS |
7014 | } |
7015 | ||
aaca4fe0 WT |
7016 | dp_packet_batch_apply_cutlen(packets_); |
7017 | ||
324c8374 | 7018 | netdev_pop_header(p->port->netdev, packets_); |
72c84bc2 | 7019 | if (dp_packet_batch_is_empty(packets_)) { |
1c8f98d9 PS |
7020 | return; |
7021 | } | |
9235b479 | 7022 | |
72c84bc2 | 7023 | struct dp_packet *packet; |
e883448e | 7024 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
72c84bc2 | 7025 | packet->md.in_port.odp_port = portno; |
a36de779 | 7026 | } |
9235b479 PS |
7027 | |
7028 | (*depth)++; | |
7029 | dp_netdev_recirculate(pmd, packets_); | |
7030 | (*depth)--; | |
a36de779 PS |
7031 | return; |
7032 | } | |
7033 | } | |
7034 | break; | |
7035 | ||
623540e4 EJ |
7036 | case OVS_ACTION_ATTR_USERSPACE: |
7037 | if (!fat_rwlock_tryrdlock(&dp->upcall_rwlock)) { | |
aaca4fe0 | 7038 | struct dp_packet_batch *orig_packets_ = packets_; |
623540e4 | 7039 | const struct nlattr *userdata; |
aaca4fe0 | 7040 | struct dp_packet_batch usr_pkt; |
623540e4 EJ |
7041 | struct ofpbuf actions; |
7042 | struct flow flow; | |
7af12bd7 | 7043 | ovs_u128 ufid; |
aaca4fe0 | 7044 | bool clone = false; |
4fc65926 | 7045 | |
623540e4 EJ |
7046 | userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA); |
7047 | ofpbuf_init(&actions, 0); | |
8cbf4f47 | 7048 | |
aaca4fe0 | 7049 | if (packets_->trunc) { |
7d7ded7a | 7050 | if (!should_steal) { |
aaca4fe0 WT |
7051 | dp_packet_batch_clone(&usr_pkt, packets_); |
7052 | packets_ = &usr_pkt; | |
aaca4fe0 WT |
7053 | clone = true; |
7054 | dp_packet_batch_reset_cutlen(orig_packets_); | |
7055 | } | |
7056 | ||
7057 | dp_packet_batch_apply_cutlen(packets_); | |
7058 | } | |
7059 | ||
72c84bc2 | 7060 | struct dp_packet *packet; |
e883448e | 7061 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
72c84bc2 | 7062 | flow_extract(packet, &flow); |
7af12bd7 | 7063 | dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid); |
7d7ded7a | 7064 | dp_execute_userspace_action(pmd, packet, should_steal, &flow, |
b010be17 | 7065 | &ufid, &actions, userdata); |
db73f716 | 7066 | } |
aaca4fe0 WT |
7067 | |
7068 | if (clone) { | |
7069 | dp_packet_delete_batch(packets_, true); | |
7070 | } | |
7071 | ||
623540e4 EJ |
7072 | ofpbuf_uninit(&actions); |
7073 | fat_rwlock_unlock(&dp->upcall_rwlock); | |
6b31e073 | 7074 | |
ac8c2081 DDP |
7075 | return; |
7076 | } | |
09f9da0b | 7077 | break; |
572f732a | 7078 | |
adcf00ba AZ |
7079 | case OVS_ACTION_ATTR_RECIRC: |
7080 | if (*depth < MAX_RECIRC_DEPTH) { | |
1895cc8d | 7081 | struct dp_packet_batch recirc_pkts; |
572f732a | 7082 | |
7d7ded7a | 7083 | if (!should_steal) { |
1895cc8d PS |
7084 | dp_packet_batch_clone(&recirc_pkts, packets_); |
7085 | packets_ = &recirc_pkts; | |
28e2fa02 | 7086 | } |
8cbf4f47 | 7087 | |
72c84bc2 | 7088 | struct dp_packet *packet; |
e883448e | 7089 | DP_PACKET_BATCH_FOR_EACH (i, packet, packets_) { |
72c84bc2 | 7090 | packet->md.recirc_id = nl_attr_get_u32(a); |
8cbf4f47 | 7091 | } |
28e2fa02 DDP |
7092 | |
7093 | (*depth)++; | |
1895cc8d | 7094 | dp_netdev_recirculate(pmd, packets_); |
adcf00ba AZ |
7095 | (*depth)--; |
7096 | ||
ac8c2081 | 7097 | return; |
adcf00ba | 7098 | } |
ac8c2081 DDP |
7099 | |
7100 | VLOG_WARN("Packet dropped. Max recirculation depth exceeded."); | |
572f732a | 7101 | break; |
572f732a | 7102 | |
5cf3edb3 DDP |
7103 | case OVS_ACTION_ATTR_CT: { |
7104 | const struct nlattr *b; | |
a76a37ef | 7105 | bool force = false; |
5cf3edb3 DDP |
7106 | bool commit = false; |
7107 | unsigned int left; | |
7108 | uint16_t zone = 0; | |
7109 | const char *helper = NULL; | |
7110 | const uint32_t *setmark = NULL; | |
7111 | const struct ovs_key_ct_labels *setlabel = NULL; | |
4cddb1f0 DB |
7112 | struct nat_action_info_t nat_action_info; |
7113 | struct nat_action_info_t *nat_action_info_ref = NULL; | |
7114 | bool nat_config = false; | |
5cf3edb3 DDP |
7115 | |
7116 | NL_ATTR_FOR_EACH_UNSAFE (b, left, nl_attr_get(a), | |
7117 | nl_attr_get_size(a)) { | |
7118 | enum ovs_ct_attr sub_type = nl_attr_type(b); | |
7119 | ||
7120 | switch(sub_type) { | |
b80e259f | 7121 | case OVS_CT_ATTR_FORCE_COMMIT: |
a76a37ef JR |
7122 | force = true; |
7123 | /* fall through. */ | |
5cf3edb3 DDP |
7124 | case OVS_CT_ATTR_COMMIT: |
7125 | commit = true; | |
7126 | break; | |
7127 | case OVS_CT_ATTR_ZONE: | |
7128 | zone = nl_attr_get_u16(b); | |
7129 | break; | |
7130 | case OVS_CT_ATTR_HELPER: | |
7131 | helper = nl_attr_get_string(b); | |
7132 | break; | |
7133 | case OVS_CT_ATTR_MARK: | |
7134 | setmark = nl_attr_get(b); | |
7135 | break; | |
7136 | case OVS_CT_ATTR_LABELS: | |
7137 | setlabel = nl_attr_get(b); | |
7138 | break; | |
8e83854c JR |
7139 | case OVS_CT_ATTR_EVENTMASK: |
7140 | /* Silently ignored, as userspace datapath does not generate | |
7141 | * netlink events. */ | |
7142 | break; | |
4cddb1f0 DB |
7143 | case OVS_CT_ATTR_NAT: { |
7144 | const struct nlattr *b_nest; | |
7145 | unsigned int left_nest; | |
7146 | bool ip_min_specified = false; | |
7147 | bool proto_num_min_specified = false; | |
7148 | bool ip_max_specified = false; | |
7149 | bool proto_num_max_specified = false; | |
7150 | memset(&nat_action_info, 0, sizeof nat_action_info); | |
7151 | nat_action_info_ref = &nat_action_info; | |
7152 | ||
7153 | NL_NESTED_FOR_EACH_UNSAFE (b_nest, left_nest, b) { | |
7154 | enum ovs_nat_attr sub_type_nest = nl_attr_type(b_nest); | |
7155 | ||
7156 | switch (sub_type_nest) { | |
7157 | case OVS_NAT_ATTR_SRC: | |
7158 | case OVS_NAT_ATTR_DST: | |
7159 | nat_config = true; | |
7160 | nat_action_info.nat_action |= | |
7161 | ((sub_type_nest == OVS_NAT_ATTR_SRC) | |
7162 | ? NAT_ACTION_SRC : NAT_ACTION_DST); | |
7163 | break; | |
7164 | case OVS_NAT_ATTR_IP_MIN: | |
7165 | memcpy(&nat_action_info.min_addr, | |
7166 | nl_attr_get(b_nest), | |
7167 | nl_attr_get_size(b_nest)); | |
7168 | ip_min_specified = true; | |
7169 | break; | |
7170 | case OVS_NAT_ATTR_IP_MAX: | |
7171 | memcpy(&nat_action_info.max_addr, | |
7172 | nl_attr_get(b_nest), | |
7173 | nl_attr_get_size(b_nest)); | |
7174 | ip_max_specified = true; | |
7175 | break; | |
7176 | case OVS_NAT_ATTR_PROTO_MIN: | |
7177 | nat_action_info.min_port = | |
7178 | nl_attr_get_u16(b_nest); | |
7179 | proto_num_min_specified = true; | |
7180 | break; | |
7181 | case OVS_NAT_ATTR_PROTO_MAX: | |
7182 | nat_action_info.max_port = | |
7183 | nl_attr_get_u16(b_nest); | |
7184 | proto_num_max_specified = true; | |
7185 | break; | |
7186 | case OVS_NAT_ATTR_PERSISTENT: | |
7187 | case OVS_NAT_ATTR_PROTO_HASH: | |
7188 | case OVS_NAT_ATTR_PROTO_RANDOM: | |
7189 | break; | |
7190 | case OVS_NAT_ATTR_UNSPEC: | |
7191 | case __OVS_NAT_ATTR_MAX: | |
7192 | OVS_NOT_REACHED(); | |
7193 | } | |
7194 | } | |
7195 | ||
7196 | if (ip_min_specified && !ip_max_specified) { | |
7197 | nat_action_info.max_addr = nat_action_info.min_addr; | |
7198 | } | |
7199 | if (proto_num_min_specified && !proto_num_max_specified) { | |
7200 | nat_action_info.max_port = nat_action_info.min_port; | |
7201 | } | |
7202 | if (proto_num_min_specified || proto_num_max_specified) { | |
7203 | if (nat_action_info.nat_action & NAT_ACTION_SRC) { | |
7204 | nat_action_info.nat_action |= NAT_ACTION_SRC_PORT; | |
7205 | } else if (nat_action_info.nat_action & NAT_ACTION_DST) { | |
7206 | nat_action_info.nat_action |= NAT_ACTION_DST_PORT; | |
7207 | } | |
7208 | } | |
7209 | break; | |
7210 | } | |
5cf3edb3 DDP |
7211 | case OVS_CT_ATTR_UNSPEC: |
7212 | case __OVS_CT_ATTR_MAX: | |
7213 | OVS_NOT_REACHED(); | |
7214 | } | |
7215 | } | |
7216 | ||
4cddb1f0 DB |
7217 | /* We won't be able to function properly in this case, hence |
7218 | * complain loudly. */ | |
7219 | if (nat_config && !commit) { | |
7220 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); | |
7221 | VLOG_WARN_RL(&rl, "NAT specified without commit."); | |
7222 | } | |
7223 | ||
a76a37ef | 7224 | conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type, force, |
bd7d93f8 | 7225 | commit, zone, setmark, setlabel, aux->flow->tp_src, |
b010be17 | 7226 | aux->flow->tp_dst, helper, nat_action_info_ref, |
05f9e707 | 7227 | pmd->ctx.now / 1000); |
07659514 | 7228 | break; |
5cf3edb3 | 7229 | } |
07659514 | 7230 | |
5dddf960 | 7231 | case OVS_ACTION_ATTR_METER: |
4b27db64 | 7232 | dp_netdev_run_meter(pmd->dp, packets_, nl_attr_get_u32(a), |
b010be17 | 7233 | pmd->ctx.now); |
4b27db64 JR |
7234 | break; |
7235 | ||
09f9da0b JR |
7236 | case OVS_ACTION_ATTR_PUSH_VLAN: |
7237 | case OVS_ACTION_ATTR_POP_VLAN: | |
7238 | case OVS_ACTION_ATTR_PUSH_MPLS: | |
7239 | case OVS_ACTION_ATTR_POP_MPLS: | |
7240 | case OVS_ACTION_ATTR_SET: | |
6d670e7f | 7241 | case OVS_ACTION_ATTR_SET_MASKED: |
09f9da0b | 7242 | case OVS_ACTION_ATTR_SAMPLE: |
53e1d6f1 | 7243 | case OVS_ACTION_ATTR_HASH: |
09f9da0b | 7244 | case OVS_ACTION_ATTR_UNSPEC: |
aaca4fe0 | 7245 | case OVS_ACTION_ATTR_TRUNC: |
6fcecb85 YY |
7246 | case OVS_ACTION_ATTR_PUSH_ETH: |
7247 | case OVS_ACTION_ATTR_POP_ETH: | |
535e3acf | 7248 | case OVS_ACTION_ATTR_CLONE: |
f59cb331 YY |
7249 | case OVS_ACTION_ATTR_PUSH_NSH: |
7250 | case OVS_ACTION_ATTR_POP_NSH: | |
1fe178d2 | 7251 | case OVS_ACTION_ATTR_CT_CLEAR: |
09f9da0b JR |
7252 | case __OVS_ACTION_ATTR_MAX: |
7253 | OVS_NOT_REACHED(); | |
da546e07 | 7254 | } |
ac8c2081 | 7255 | |
7d7ded7a | 7256 | dp_packet_delete_batch(packets_, should_steal); |
98403001 BP |
7257 | } |
7258 | ||
4edb9ae9 | 7259 | static void |
65f13b50 | 7260 | dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, |
1895cc8d | 7261 | struct dp_packet_batch *packets, |
7d7ded7a | 7262 | bool should_steal, const struct flow *flow, |
b010be17 | 7263 | const struct nlattr *actions, size_t actions_len) |
72865317 | 7264 | { |
b010be17 | 7265 | struct dp_netdev_execute_aux aux = { pmd, flow }; |
9080a111 | 7266 | |
7d7ded7a | 7267 | odp_execute_actions(&aux, packets, should_steal, actions, |
8cbf4f47 | 7268 | actions_len, dp_execute_cb); |
72865317 BP |
7269 | } |
7270 | ||
4d4e68ed DDP |
7271 | struct dp_netdev_ct_dump { |
7272 | struct ct_dpif_dump_state up; | |
7273 | struct conntrack_dump dump; | |
7274 | struct conntrack *ct; | |
7275 | struct dp_netdev *dp; | |
7276 | }; | |
7277 | ||
7278 | static int | |
7279 | dpif_netdev_ct_dump_start(struct dpif *dpif, struct ct_dpif_dump_state **dump_, | |
ded30c74 | 7280 | const uint16_t *pzone, int *ptot_bkts) |
4d4e68ed DDP |
7281 | { |
7282 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7283 | struct dp_netdev_ct_dump *dump; | |
7284 | ||
7285 | dump = xzalloc(sizeof *dump); | |
7286 | dump->dp = dp; | |
7287 | dump->ct = &dp->conntrack; | |
7288 | ||
ded30c74 | 7289 | conntrack_dump_start(&dp->conntrack, &dump->dump, pzone, ptot_bkts); |
4d4e68ed DDP |
7290 | |
7291 | *dump_ = &dump->up; | |
7292 | ||
7293 | return 0; | |
7294 | } | |
7295 | ||
7296 | static int | |
7297 | dpif_netdev_ct_dump_next(struct dpif *dpif OVS_UNUSED, | |
7298 | struct ct_dpif_dump_state *dump_, | |
7299 | struct ct_dpif_entry *entry) | |
7300 | { | |
7301 | struct dp_netdev_ct_dump *dump; | |
7302 | ||
7303 | INIT_CONTAINER(dump, dump_, up); | |
7304 | ||
7305 | return conntrack_dump_next(&dump->dump, entry); | |
7306 | } | |
7307 | ||
7308 | static int | |
7309 | dpif_netdev_ct_dump_done(struct dpif *dpif OVS_UNUSED, | |
7310 | struct ct_dpif_dump_state *dump_) | |
7311 | { | |
7312 | struct dp_netdev_ct_dump *dump; | |
7313 | int err; | |
7314 | ||
7315 | INIT_CONTAINER(dump, dump_, up); | |
7316 | ||
7317 | err = conntrack_dump_done(&dump->dump); | |
7318 | ||
7319 | free(dump); | |
7320 | ||
7321 | return err; | |
7322 | } | |
7323 | ||
5d9cbb4c | 7324 | static int |
817a7657 YHW |
7325 | dpif_netdev_ct_flush(struct dpif *dpif, const uint16_t *zone, |
7326 | const struct ct_dpif_tuple *tuple) | |
5d9cbb4c DDP |
7327 | { |
7328 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7329 | ||
817a7657 | 7330 | if (tuple) { |
271e48a0 | 7331 | return conntrack_flush_tuple(&dp->conntrack, tuple, zone ? *zone : 0); |
817a7657 | 7332 | } |
5d9cbb4c DDP |
7333 | return conntrack_flush(&dp->conntrack, zone); |
7334 | } | |
7335 | ||
c92339ad DB |
7336 | static int |
7337 | dpif_netdev_ct_set_maxconns(struct dpif *dpif, uint32_t maxconns) | |
7338 | { | |
7339 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7340 | ||
7341 | return conntrack_set_maxconns(&dp->conntrack, maxconns); | |
7342 | } | |
7343 | ||
7344 | static int | |
7345 | dpif_netdev_ct_get_maxconns(struct dpif *dpif, uint32_t *maxconns) | |
7346 | { | |
7347 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7348 | ||
7349 | return conntrack_get_maxconns(&dp->conntrack, maxconns); | |
7350 | } | |
7351 | ||
875075b3 DB |
7352 | static int |
7353 | dpif_netdev_ct_get_nconns(struct dpif *dpif, uint32_t *nconns) | |
7354 | { | |
7355 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7356 | ||
7357 | return conntrack_get_nconns(&dp->conntrack, nconns); | |
7358 | } | |
7359 | ||
4ea96698 DB |
7360 | static int |
7361 | dpif_netdev_ipf_set_enabled(struct dpif *dpif, bool v6, bool enable) | |
7362 | { | |
7363 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7364 | return ipf_set_enabled(conntrack_ipf_ctx(&dp->conntrack), v6, enable); | |
7365 | } | |
7366 | ||
7367 | static int | |
7368 | dpif_netdev_ipf_set_min_frag(struct dpif *dpif, bool v6, uint32_t min_frag) | |
7369 | { | |
7370 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7371 | return ipf_set_min_frag(conntrack_ipf_ctx(&dp->conntrack), v6, min_frag); | |
7372 | } | |
7373 | ||
7374 | static int | |
7375 | dpif_netdev_ipf_set_max_nfrags(struct dpif *dpif, uint32_t max_frags) | |
7376 | { | |
7377 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7378 | return ipf_set_max_nfrags(conntrack_ipf_ctx(&dp->conntrack), max_frags); | |
7379 | } | |
7380 | ||
7381 | /* Adjust this function if 'dpif_ipf_status' and 'ipf_status' were to | |
7382 | * diverge. */ | |
7383 | static int | |
7384 | dpif_netdev_ipf_get_status(struct dpif *dpif, | |
7385 | struct dpif_ipf_status *dpif_ipf_status) | |
7386 | { | |
7387 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7388 | ipf_get_status(conntrack_ipf_ctx(&dp->conntrack), | |
7389 | (struct ipf_status *) dpif_ipf_status); | |
7390 | return 0; | |
7391 | } | |
7392 | ||
7393 | static int | |
7394 | dpif_netdev_ipf_dump_start(struct dpif *dpif OVS_UNUSED, | |
7395 | struct ipf_dump_ctx **ipf_dump_ctx) | |
7396 | { | |
7397 | return ipf_dump_start(ipf_dump_ctx); | |
7398 | } | |
7399 | ||
7400 | static int | |
7401 | dpif_netdev_ipf_dump_next(struct dpif *dpif, void *ipf_dump_ctx, char **dump) | |
7402 | { | |
7403 | struct dp_netdev *dp = get_dp_netdev(dpif); | |
7404 | return ipf_dump_next(conntrack_ipf_ctx(&dp->conntrack), ipf_dump_ctx, | |
7405 | dump); | |
7406 | } | |
7407 | ||
7408 | static int | |
7409 | dpif_netdev_ipf_dump_done(struct dpif *dpif OVS_UNUSED, void *ipf_dump_ctx) | |
7410 | { | |
7411 | return ipf_dump_done(ipf_dump_ctx); | |
7412 | ||
7413 | } | |
7414 | ||
72865317 | 7415 | const struct dpif_class dpif_netdev_class = { |
72865317 | 7416 | "netdev", |
6553d06b | 7417 | dpif_netdev_init, |
2197d7ab | 7418 | dpif_netdev_enumerate, |
0aeaabc8 | 7419 | dpif_netdev_port_open_type, |
72865317 BP |
7420 | dpif_netdev_open, |
7421 | dpif_netdev_close, | |
7dab847a | 7422 | dpif_netdev_destroy, |
e4cfed38 PS |
7423 | dpif_netdev_run, |
7424 | dpif_netdev_wait, | |
72865317 | 7425 | dpif_netdev_get_stats, |
72865317 BP |
7426 | dpif_netdev_port_add, |
7427 | dpif_netdev_port_del, | |
3eb67853 | 7428 | dpif_netdev_port_set_config, |
72865317 BP |
7429 | dpif_netdev_port_query_by_number, |
7430 | dpif_netdev_port_query_by_name, | |
98403001 | 7431 | NULL, /* port_get_pid */ |
b0ec0f27 BP |
7432 | dpif_netdev_port_dump_start, |
7433 | dpif_netdev_port_dump_next, | |
7434 | dpif_netdev_port_dump_done, | |
72865317 BP |
7435 | dpif_netdev_port_poll, |
7436 | dpif_netdev_port_poll_wait, | |
72865317 | 7437 | dpif_netdev_flow_flush, |
ac64794a BP |
7438 | dpif_netdev_flow_dump_create, |
7439 | dpif_netdev_flow_dump_destroy, | |
7440 | dpif_netdev_flow_dump_thread_create, | |
7441 | dpif_netdev_flow_dump_thread_destroy, | |
704a1e09 | 7442 | dpif_netdev_flow_dump_next, |
1a0c894a | 7443 | dpif_netdev_operate, |
6b31e073 RW |
7444 | NULL, /* recv_set */ |
7445 | NULL, /* handlers_set */ | |
d4f6865c | 7446 | dpif_netdev_set_config, |
5bf93d67 | 7447 | dpif_netdev_queue_to_priority, |
6b31e073 RW |
7448 | NULL, /* recv */ |
7449 | NULL, /* recv_wait */ | |
7450 | NULL, /* recv_purge */ | |
e4e74c3a | 7451 | dpif_netdev_register_dp_purge_cb, |
6b31e073 RW |
7452 | dpif_netdev_register_upcall_cb, |
7453 | dpif_netdev_enable_upcall, | |
7454 | dpif_netdev_disable_upcall, | |
b5cbbcf6 | 7455 | dpif_netdev_get_datapath_version, |
4d4e68ed DDP |
7456 | dpif_netdev_ct_dump_start, |
7457 | dpif_netdev_ct_dump_next, | |
7458 | dpif_netdev_ct_dump_done, | |
5d9cbb4c | 7459 | dpif_netdev_ct_flush, |
c92339ad DB |
7460 | dpif_netdev_ct_set_maxconns, |
7461 | dpif_netdev_ct_get_maxconns, | |
875075b3 | 7462 | dpif_netdev_ct_get_nconns, |
cd015a11 YHW |
7463 | NULL, /* ct_set_limits */ |
7464 | NULL, /* ct_get_limits */ | |
7465 | NULL, /* ct_del_limits */ | |
4ea96698 DB |
7466 | dpif_netdev_ipf_set_enabled, |
7467 | dpif_netdev_ipf_set_min_frag, | |
7468 | dpif_netdev_ipf_set_max_nfrags, | |
7469 | dpif_netdev_ipf_get_status, | |
7470 | dpif_netdev_ipf_dump_start, | |
7471 | dpif_netdev_ipf_dump_next, | |
7472 | dpif_netdev_ipf_dump_done, | |
5dddf960 JR |
7473 | dpif_netdev_meter_get_features, |
7474 | dpif_netdev_meter_set, | |
7475 | dpif_netdev_meter_get, | |
7476 | dpif_netdev_meter_del, | |
72865317 | 7477 | }; |
614c4892 | 7478 | |
74cc3969 BP |
7479 | static void |
7480 | dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED, | |
7481 | const char *argv[], void *aux OVS_UNUSED) | |
7482 | { | |
e9985d6a | 7483 | struct dp_netdev_port *port; |
74cc3969 | 7484 | struct dp_netdev *dp; |
ff073a71 | 7485 | odp_port_t port_no; |
74cc3969 | 7486 | |
8a4e3a85 | 7487 | ovs_mutex_lock(&dp_netdev_mutex); |
74cc3969 BP |
7488 | dp = shash_find_data(&dp_netdevs, argv[1]); |
7489 | if (!dp || !dpif_netdev_class_is_dummy(dp->class)) { | |
8a4e3a85 | 7490 | ovs_mutex_unlock(&dp_netdev_mutex); |
74cc3969 BP |
7491 | unixctl_command_reply_error(conn, "unknown datapath or not a dummy"); |
7492 | return; | |
7493 | } | |
8a4e3a85 BP |
7494 | ovs_refcount_ref(&dp->ref_cnt); |
7495 | ovs_mutex_unlock(&dp_netdev_mutex); | |
74cc3969 | 7496 | |
59e6d833 | 7497 | ovs_mutex_lock(&dp->port_mutex); |
e9985d6a | 7498 | if (get_port_by_name(dp, argv[2], &port)) { |
74cc3969 | 7499 | unixctl_command_reply_error(conn, "unknown port"); |
8a4e3a85 | 7500 | goto exit; |
74cc3969 BP |
7501 | } |
7502 | ||
ff073a71 BP |
7503 | port_no = u32_to_odp(atoi(argv[3])); |
7504 | if (!port_no || port_no == ODPP_NONE) { | |
74cc3969 | 7505 | unixctl_command_reply_error(conn, "bad port number"); |
8a4e3a85 | 7506 | goto exit; |
74cc3969 | 7507 | } |
ff073a71 | 7508 | if (dp_netdev_lookup_port(dp, port_no)) { |
74cc3969 | 7509 | unixctl_command_reply_error(conn, "port number already in use"); |
8a4e3a85 | 7510 | goto exit; |
74cc3969 | 7511 | } |
59e6d833 | 7512 | |
e9985d6a DDP |
7513 | /* Remove port. */ |
7514 | hmap_remove(&dp->ports, &port->node); | |
e32971b8 | 7515 | reconfigure_datapath(dp); |
59e6d833 | 7516 | |
e9985d6a DDP |
7517 | /* Reinsert with new port number. */ |
7518 | port->port_no = port_no; | |
7519 | hmap_insert(&dp->ports, &port->node, hash_port_no(port_no)); | |
e32971b8 | 7520 | reconfigure_datapath(dp); |
59e6d833 | 7521 | |
d33ed218 | 7522 | seq_change(dp->port_seq); |
74cc3969 | 7523 | unixctl_command_reply(conn, NULL); |
8a4e3a85 BP |
7524 | |
7525 | exit: | |
59e6d833 | 7526 | ovs_mutex_unlock(&dp->port_mutex); |
8a4e3a85 | 7527 | dp_netdev_unref(dp); |
74cc3969 BP |
7528 | } |
7529 | ||
0cbfe35d BP |
7530 | static void |
7531 | dpif_dummy_register__(const char *type) | |
7532 | { | |
7533 | struct dpif_class *class; | |
7534 | ||
7535 | class = xmalloc(sizeof *class); | |
7536 | *class = dpif_netdev_class; | |
7537 | class->type = xstrdup(type); | |
7538 | dp_register_provider(class); | |
7539 | } | |
7540 | ||
8420c7ad BP |
7541 | static void |
7542 | dpif_dummy_override(const char *type) | |
7543 | { | |
65d43fdc YT |
7544 | int error; |
7545 | ||
7546 | /* | |
7547 | * Ignore EAFNOSUPPORT to allow --enable-dummy=system with | |
7548 | * a userland-only build. It's useful for testsuite. | |
7549 | */ | |
7550 | error = dp_unregister_provider(type); | |
7551 | if (error == 0 || error == EAFNOSUPPORT) { | |
8420c7ad BP |
7552 | dpif_dummy_register__(type); |
7553 | } | |
7554 | } | |
7555 | ||
614c4892 | 7556 | void |
8420c7ad | 7557 | dpif_dummy_register(enum dummy_level level) |
614c4892 | 7558 | { |
8420c7ad | 7559 | if (level == DUMMY_OVERRIDE_ALL) { |
0cbfe35d BP |
7560 | struct sset types; |
7561 | const char *type; | |
7562 | ||
7563 | sset_init(&types); | |
7564 | dp_enumerate_types(&types); | |
7565 | SSET_FOR_EACH (type, &types) { | |
8420c7ad | 7566 | dpif_dummy_override(type); |
0cbfe35d BP |
7567 | } |
7568 | sset_destroy(&types); | |
8420c7ad BP |
7569 | } else if (level == DUMMY_OVERRIDE_SYSTEM) { |
7570 | dpif_dummy_override("system"); | |
614c4892 | 7571 | } |
0cbfe35d BP |
7572 | |
7573 | dpif_dummy_register__("dummy"); | |
74cc3969 BP |
7574 | |
7575 | unixctl_command_register("dpif-dummy/change-port-number", | |
74467d5c | 7576 | "dp port new-number", |
74cc3969 | 7577 | 3, 3, dpif_dummy_change_port_number, NULL); |
614c4892 | 7578 | } |
0de8783a JR |
7579 | \f |
7580 | /* Datapath Classifier. */ | |
7581 | ||
7582 | /* A set of rules that all have the same fields wildcarded. */ | |
7583 | struct dpcls_subtable { | |
7584 | /* The fields are only used by writers. */ | |
7585 | struct cmap_node cmap_node OVS_GUARDED; /* Within dpcls 'subtables_map'. */ | |
7586 | ||
7587 | /* These fields are accessed by readers. */ | |
7588 | struct cmap rules; /* Contains "struct dpcls_rule"s. */ | |
3453b4d6 JS |
7589 | uint32_t hit_cnt; /* Number of match hits in subtable in current |
7590 | optimization interval. */ | |
0de8783a JR |
7591 | struct netdev_flow_key mask; /* Wildcards for fields (const). */ |
7592 | /* 'mask' must be the last field, additional space is allocated here. */ | |
7593 | }; | |
7594 | ||
7595 | /* Initializes 'cls' as a classifier that initially contains no classification | |
7596 | * rules. */ | |
7597 | static void | |
7598 | dpcls_init(struct dpcls *cls) | |
7599 | { | |
7600 | cmap_init(&cls->subtables_map); | |
da9cfca6 | 7601 | pvector_init(&cls->subtables); |
0de8783a JR |
7602 | } |
7603 | ||
7604 | static void | |
7605 | dpcls_destroy_subtable(struct dpcls *cls, struct dpcls_subtable *subtable) | |
7606 | { | |
3453b4d6 | 7607 | VLOG_DBG("Destroying subtable %p for in_port %d", subtable, cls->in_port); |
da9cfca6 | 7608 | pvector_remove(&cls->subtables, subtable); |
0de8783a JR |
7609 | cmap_remove(&cls->subtables_map, &subtable->cmap_node, |
7610 | subtable->mask.hash); | |
7611 | cmap_destroy(&subtable->rules); | |
7612 | ovsrcu_postpone(free, subtable); | |
7613 | } | |
7614 | ||
7615 | /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the | |
7616 | * caller's responsibility. | |
7617 | * May only be called after all the readers have been terminated. */ | |
7618 | static void | |
7619 | dpcls_destroy(struct dpcls *cls) | |
7620 | { | |
7621 | if (cls) { | |
7622 | struct dpcls_subtable *subtable; | |
7623 | ||
7624 | CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) { | |
361d808d | 7625 | ovs_assert(cmap_count(&subtable->rules) == 0); |
0de8783a JR |
7626 | dpcls_destroy_subtable(cls, subtable); |
7627 | } | |
7628 | cmap_destroy(&cls->subtables_map); | |
da9cfca6 | 7629 | pvector_destroy(&cls->subtables); |
0de8783a JR |
7630 | } |
7631 | } | |
7632 | ||
7633 | static struct dpcls_subtable * | |
7634 | dpcls_create_subtable(struct dpcls *cls, const struct netdev_flow_key *mask) | |
7635 | { | |
7636 | struct dpcls_subtable *subtable; | |
7637 | ||
7638 | /* Need to add one. */ | |
caeb4906 JR |
7639 | subtable = xmalloc(sizeof *subtable |
7640 | - sizeof subtable->mask.mf + mask->len); | |
0de8783a | 7641 | cmap_init(&subtable->rules); |
3453b4d6 | 7642 | subtable->hit_cnt = 0; |
0de8783a JR |
7643 | netdev_flow_key_clone(&subtable->mask, mask); |
7644 | cmap_insert(&cls->subtables_map, &subtable->cmap_node, mask->hash); | |
3453b4d6 | 7645 | /* Add the new subtable at the end of the pvector (with no hits yet) */ |
da9cfca6 | 7646 | pvector_insert(&cls->subtables, subtable, 0); |
84dbfb2b | 7647 | VLOG_DBG("Creating %"PRIuSIZE". subtable %p for in_port %d", |
3453b4d6 | 7648 | cmap_count(&cls->subtables_map), subtable, cls->in_port); |
da9cfca6 | 7649 | pvector_publish(&cls->subtables); |
0de8783a JR |
7650 | |
7651 | return subtable; | |
7652 | } | |
7653 | ||
7654 | static inline struct dpcls_subtable * | |
7655 | dpcls_find_subtable(struct dpcls *cls, const struct netdev_flow_key *mask) | |
7656 | { | |
7657 | struct dpcls_subtable *subtable; | |
7658 | ||
7659 | CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, mask->hash, | |
7660 | &cls->subtables_map) { | |
7661 | if (netdev_flow_key_equal(&subtable->mask, mask)) { | |
7662 | return subtable; | |
7663 | } | |
7664 | } | |
7665 | return dpcls_create_subtable(cls, mask); | |
7666 | } | |
7667 | ||
3453b4d6 JS |
7668 | |
7669 | /* Periodically sort the dpcls subtable vectors according to hit counts */ | |
7670 | static void | |
7671 | dpcls_sort_subtable_vector(struct dpcls *cls) | |
7672 | { | |
7673 | struct pvector *pvec = &cls->subtables; | |
7674 | struct dpcls_subtable *subtable; | |
7675 | ||
7676 | PVECTOR_FOR_EACH (subtable, pvec) { | |
7677 | pvector_change_priority(pvec, subtable, subtable->hit_cnt); | |
7678 | subtable->hit_cnt = 0; | |
7679 | } | |
7680 | pvector_publish(pvec); | |
7681 | } | |
7682 | ||
7683 | static inline void | |
4809891b KT |
7684 | dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, |
7685 | struct polled_queue *poll_list, int poll_cnt) | |
3453b4d6 JS |
7686 | { |
7687 | struct dpcls *cls; | |
5bf84282 NK |
7688 | uint64_t tot_idle = 0, tot_proc = 0; |
7689 | unsigned int pmd_load = 0; | |
3453b4d6 | 7690 | |
b010be17 | 7691 | if (pmd->ctx.now > pmd->rxq_next_cycle_store) { |
2a2c67b4 | 7692 | uint64_t curr_tsc; |
5bf84282 NK |
7693 | struct pmd_auto_lb *pmd_alb = &pmd->dp->pmd_alb; |
7694 | if (pmd_alb->is_enabled && !pmd->isolated | |
7695 | && (pmd->perf_stats.counters.n[PMD_CYCLES_ITER_IDLE] >= | |
7696 | pmd->prev_stats[PMD_CYCLES_ITER_IDLE]) | |
7697 | && (pmd->perf_stats.counters.n[PMD_CYCLES_ITER_BUSY] >= | |
7698 | pmd->prev_stats[PMD_CYCLES_ITER_BUSY])) | |
7699 | { | |
7700 | tot_idle = pmd->perf_stats.counters.n[PMD_CYCLES_ITER_IDLE] - | |
7701 | pmd->prev_stats[PMD_CYCLES_ITER_IDLE]; | |
7702 | tot_proc = pmd->perf_stats.counters.n[PMD_CYCLES_ITER_BUSY] - | |
7703 | pmd->prev_stats[PMD_CYCLES_ITER_BUSY]; | |
7704 | ||
7705 | if (tot_proc) { | |
7706 | pmd_load = ((tot_proc * 100) / (tot_idle + tot_proc)); | |
7707 | } | |
7708 | ||
7709 | if (pmd_load >= ALB_PMD_LOAD_THRESHOLD) { | |
7710 | atomic_count_inc(&pmd->pmd_overloaded); | |
7711 | } else { | |
7712 | atomic_count_set(&pmd->pmd_overloaded, 0); | |
7713 | } | |
7714 | } | |
7715 | ||
7716 | pmd->prev_stats[PMD_CYCLES_ITER_IDLE] = | |
7717 | pmd->perf_stats.counters.n[PMD_CYCLES_ITER_IDLE]; | |
7718 | pmd->prev_stats[PMD_CYCLES_ITER_BUSY] = | |
7719 | pmd->perf_stats.counters.n[PMD_CYCLES_ITER_BUSY]; | |
7720 | ||
4809891b KT |
7721 | /* Get the cycles that were used to process each queue and store. */ |
7722 | for (unsigned i = 0; i < poll_cnt; i++) { | |
7723 | uint64_t rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq, | |
7724 | RXQ_CYCLES_PROC_CURR); | |
7725 | dp_netdev_rxq_set_intrvl_cycles(poll_list[i].rxq, rxq_cyc_curr); | |
7726 | dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR, | |
7727 | 0); | |
7728 | } | |
2a2c67b4 KT |
7729 | curr_tsc = cycles_counter_update(&pmd->perf_stats); |
7730 | if (pmd->intrvl_tsc_prev) { | |
7731 | /* There is a prev timestamp, store a new intrvl cycle count. */ | |
7732 | atomic_store_relaxed(&pmd->intrvl_cycles, | |
7733 | curr_tsc - pmd->intrvl_tsc_prev); | |
7734 | } | |
7735 | pmd->intrvl_tsc_prev = curr_tsc; | |
4809891b | 7736 | /* Start new measuring interval */ |
b010be17 | 7737 | pmd->rxq_next_cycle_store = pmd->ctx.now + PMD_RXQ_INTERVAL_LEN; |
4809891b KT |
7738 | } |
7739 | ||
b010be17 | 7740 | if (pmd->ctx.now > pmd->next_optimization) { |
3453b4d6 JS |
7741 | /* Try to obtain the flow lock to block out revalidator threads. |
7742 | * If not possible, just try next time. */ | |
7743 | if (!ovs_mutex_trylock(&pmd->flow_mutex)) { | |
7744 | /* Optimize each classifier */ | |
7745 | CMAP_FOR_EACH (cls, node, &pmd->classifiers) { | |
7746 | dpcls_sort_subtable_vector(cls); | |
7747 | } | |
7748 | ovs_mutex_unlock(&pmd->flow_mutex); | |
7749 | /* Start new measuring interval */ | |
b010be17 IM |
7750 | pmd->next_optimization = pmd->ctx.now |
7751 | + DPCLS_OPTIMIZATION_INTERVAL; | |
3453b4d6 JS |
7752 | } |
7753 | } | |
7754 | } | |
7755 | ||
0de8783a JR |
7756 | /* Insert 'rule' into 'cls'. */ |
7757 | static void | |
7758 | dpcls_insert(struct dpcls *cls, struct dpcls_rule *rule, | |
7759 | const struct netdev_flow_key *mask) | |
7760 | { | |
7761 | struct dpcls_subtable *subtable = dpcls_find_subtable(cls, mask); | |
7762 | ||
3453b4d6 | 7763 | /* Refer to subtable's mask, also for later removal. */ |
0de8783a JR |
7764 | rule->mask = &subtable->mask; |
7765 | cmap_insert(&subtable->rules, &rule->cmap_node, rule->flow.hash); | |
7766 | } | |
7767 | ||
7768 | /* Removes 'rule' from 'cls', also destructing the 'rule'. */ | |
7769 | static void | |
7770 | dpcls_remove(struct dpcls *cls, struct dpcls_rule *rule) | |
7771 | { | |
7772 | struct dpcls_subtable *subtable; | |
7773 | ||
7774 | ovs_assert(rule->mask); | |
7775 | ||
3453b4d6 | 7776 | /* Get subtable from reference in rule->mask. */ |
0de8783a | 7777 | INIT_CONTAINER(subtable, rule->mask, mask); |
0de8783a JR |
7778 | if (cmap_remove(&subtable->rules, &rule->cmap_node, rule->flow.hash) |
7779 | == 0) { | |
3453b4d6 | 7780 | /* Delete empty subtable. */ |
0de8783a | 7781 | dpcls_destroy_subtable(cls, subtable); |
da9cfca6 | 7782 | pvector_publish(&cls->subtables); |
0de8783a JR |
7783 | } |
7784 | } | |
7785 | ||
361d808d JR |
7786 | /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit |
7787 | * in 'mask' the values in 'key' and 'target' are the same. */ | |
60d8ccae | 7788 | static bool |
0de8783a JR |
7789 | dpcls_rule_matches_key(const struct dpcls_rule *rule, |
7790 | const struct netdev_flow_key *target) | |
7791 | { | |
09b0fa9c JR |
7792 | const uint64_t *keyp = miniflow_get_values(&rule->flow.mf); |
7793 | const uint64_t *maskp = miniflow_get_values(&rule->mask->mf); | |
5fcff47b | 7794 | uint64_t value; |
0de8783a | 7795 | |
5fcff47b JR |
7796 | NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) { |
7797 | if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) { | |
0de8783a JR |
7798 | return false; |
7799 | } | |
7800 | } | |
7801 | return true; | |
7802 | } | |
7803 | ||
5b1c9c78 FA |
7804 | /* For each miniflow in 'keys' performs a classifier lookup writing the result |
7805 | * into the corresponding slot in 'rules'. If a particular entry in 'keys' is | |
0de8783a JR |
7806 | * NULL it is skipped. |
7807 | * | |
7808 | * This function is optimized for use in the userspace datapath and therefore | |
7809 | * does not implement a lot of features available in the standard | |
7810 | * classifier_lookup() function. Specifically, it does not implement | |
7811 | * priorities, instead returning any rule which matches the flow. | |
7812 | * | |
5b1c9c78 | 7813 | * Returns true if all miniflows found a corresponding rule. */ |
0de8783a | 7814 | static bool |
60d8ccae | 7815 | dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key *keys[], |
3453b4d6 JS |
7816 | struct dpcls_rule **rules, const size_t cnt, |
7817 | int *num_lookups_p) | |
0de8783a | 7818 | { |
5b1c9c78 | 7819 | /* The received 'cnt' miniflows are the search-keys that will be processed |
63906f18 BB |
7820 | * to find a matching entry into the available subtables. |
7821 | * The number of bits in map_type is equal to NETDEV_MAX_BURST. */ | |
7822 | typedef uint32_t map_type; | |
0de8783a | 7823 | #define MAP_BITS (sizeof(map_type) * CHAR_BIT) |
63906f18 | 7824 | BUILD_ASSERT_DECL(MAP_BITS >= NETDEV_MAX_BURST); |
0de8783a | 7825 | |
0de8783a JR |
7826 | struct dpcls_subtable *subtable; |
7827 | ||
63906f18 BB |
7828 | map_type keys_map = TYPE_MAXIMUM(map_type); /* Set all bits. */ |
7829 | map_type found_map; | |
7830 | uint32_t hashes[MAP_BITS]; | |
7831 | const struct cmap_node *nodes[MAP_BITS]; | |
7832 | ||
7833 | if (cnt != MAP_BITS) { | |
7834 | keys_map >>= MAP_BITS - cnt; /* Clear extra bits. */ | |
0de8783a JR |
7835 | } |
7836 | memset(rules, 0, cnt * sizeof *rules); | |
7837 | ||
3453b4d6 JS |
7838 | int lookups_match = 0, subtable_pos = 1; |
7839 | ||
5b1c9c78 FA |
7840 | /* The Datapath classifier - aka dpcls - is composed of subtables. |
7841 | * Subtables are dynamically created as needed when new rules are inserted. | |
7842 | * Each subtable collects rules with matches on a specific subset of packet | |
7843 | * fields as defined by the subtable's mask. We proceed to process every | |
7844 | * search-key against each subtable, but when a match is found for a | |
7845 | * search-key, the search for that key can stop because the rules are | |
7846 | * non-overlapping. */ | |
da9cfca6 | 7847 | PVECTOR_FOR_EACH (subtable, &cls->subtables) { |
63906f18 BB |
7848 | int i; |
7849 | ||
7850 | /* Compute hashes for the remaining keys. Each search-key is | |
7851 | * masked with the subtable's mask to avoid hashing the wildcarded | |
7852 | * bits. */ | |
7853 | ULLONG_FOR_EACH_1(i, keys_map) { | |
60d8ccae | 7854 | hashes[i] = netdev_flow_key_hash_in_mask(keys[i], |
63906f18 BB |
7855 | &subtable->mask); |
7856 | } | |
7857 | /* Lookup. */ | |
7858 | found_map = cmap_find_batch(&subtable->rules, keys_map, hashes, nodes); | |
7859 | /* Check results. When the i-th bit of found_map is set, it means | |
7860 | * that a set of nodes with a matching hash value was found for the | |
7861 | * i-th search-key. Due to possible hash collisions we need to check | |
7862 | * which of the found rules, if any, really matches our masked | |
7863 | * search-key. */ | |
7864 | ULLONG_FOR_EACH_1(i, found_map) { | |
7865 | struct dpcls_rule *rule; | |
7866 | ||
7867 | CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) { | |
60d8ccae | 7868 | if (OVS_LIKELY(dpcls_rule_matches_key(rule, keys[i]))) { |
63906f18 BB |
7869 | rules[i] = rule; |
7870 | /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap | |
7871 | * within one second optimization interval. */ | |
7872 | subtable->hit_cnt++; | |
7873 | lookups_match += subtable_pos; | |
7874 | goto next; | |
0de8783a | 7875 | } |
0de8783a | 7876 | } |
63906f18 BB |
7877 | /* None of the found rules was a match. Reset the i-th bit to |
7878 | * keep searching this key in the next subtable. */ | |
7879 | ULLONG_SET0(found_map, i); /* Did not match. */ | |
7880 | next: | |
7881 | ; /* Keep Sparse happy. */ | |
0de8783a | 7882 | } |
63906f18 BB |
7883 | keys_map &= ~found_map; /* Clear the found rules. */ |
7884 | if (!keys_map) { | |
3453b4d6 JS |
7885 | if (num_lookups_p) { |
7886 | *num_lookups_p = lookups_match; | |
7887 | } | |
0de8783a JR |
7888 | return true; /* All found. */ |
7889 | } | |
3453b4d6 JS |
7890 | subtable_pos++; |
7891 | } | |
7892 | if (num_lookups_p) { | |
7893 | *num_lookups_p = lookups_match; | |
0de8783a JR |
7894 | } |
7895 | return false; /* Some misses. */ | |
7896 | } |