]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/ofproto-dpif-upcall.c
ofproto: Rename struct rule's evict lock and use it more widely.
[mirror_ovs.git] / ofproto / ofproto-dpif-upcall.c
1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15 #include <config.h>
16 #include "ofproto-dpif-upcall.h"
17
18 #include <errno.h>
19 #include <stdbool.h>
20 #include <inttypes.h>
21
22 #include "coverage.h"
23 #include "dynamic-string.h"
24 #include "dpif.h"
25 #include "fail-open.h"
26 #include "latch.h"
27 #include "seq.h"
28 #include "list.h"
29 #include "netlink.h"
30 #include "ofpbuf.h"
31 #include "ofproto-dpif.h"
32 #include "packets.h"
33 #include "poll-loop.h"
34 #include "vlog.h"
35
36 #define MAX_QUEUE_LENGTH 512
37
38 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
39
40 COVERAGE_DEFINE(upcall_queue_overflow);
41 COVERAGE_DEFINE(drop_queue_overflow);
42 COVERAGE_DEFINE(miss_queue_overflow);
43 COVERAGE_DEFINE(fmb_queue_overflow);
44
45 /* A thread that processes each upcall handed to it by the dispatcher thread,
46 * forwards the upcall's packet, and then queues it to the main ofproto_dpif
47 * to possibly set up a kernel flow as a cache. */
48 struct handler {
49 struct udpif *udpif; /* Parent udpif. */
50 pthread_t thread; /* Thread ID. */
51
52 struct ovs_mutex mutex; /* Mutex guarding the following. */
53
54 /* Atomic queue of unprocessed miss upcalls. */
55 struct list upcalls OVS_GUARDED;
56 size_t n_upcalls OVS_GUARDED;
57
58 size_t n_new_upcalls; /* Only changed by the dispatcher. */
59
60 pthread_cond_t wake_cond; /* Wakes 'thread' while holding
61 'mutex'. */
62 };
63
64 /* An upcall handler for ofproto_dpif.
65 *
66 * udpif is implemented as a "dispatcher" thread that reads upcalls from the
67 * kernel. It processes each upcall just enough to figure out its next
68 * destination. For a "miss" upcall (MISS_UPCALL), this is one of several
69 * "handler" threads (see struct handler). Other upcalls are queued to the
70 * main ofproto_dpif. */
71 struct udpif {
72 struct dpif *dpif; /* Datapath handle. */
73 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
74
75 uint32_t secret; /* Random seed for upcall hash. */
76
77 pthread_t dispatcher; /* Dispatcher thread ID. */
78
79 struct handler *handlers; /* Miss handlers. */
80 size_t n_handlers;
81
82 /* Atomic queue of unprocessed drop keys. */
83 struct ovs_mutex drop_key_mutex;
84 struct list drop_keys OVS_GUARDED;
85 size_t n_drop_keys OVS_GUARDED;
86
87 /* Atomic queue of special upcalls for ofproto-dpif to process. */
88 struct ovs_mutex upcall_mutex;
89 struct list upcalls OVS_GUARDED;
90 size_t n_upcalls OVS_GUARDED;
91
92 /* Atomic queue of flow_miss_batches. */
93 struct ovs_mutex fmb_mutex;
94 struct list fmbs OVS_GUARDED;
95 size_t n_fmbs OVS_GUARDED;
96
97 /* Number of times udpif_revalidate() has been called. */
98 atomic_uint reval_seq;
99
100 struct seq *wait_seq;
101 uint64_t last_seq;
102
103 struct latch exit_latch; /* Tells child threads to exit. */
104 };
105
106 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
107
108 static void recv_upcalls(struct udpif *);
109 static void handle_miss_upcalls(struct udpif *, struct list *upcalls);
110 static void miss_destroy(struct flow_miss *);
111 static void *udpif_dispatcher(void *);
112 static void *udpif_miss_handler(void *);
113
114 struct udpif *
115 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
116 {
117 struct udpif *udpif = xzalloc(sizeof *udpif);
118
119 udpif->dpif = dpif;
120 udpif->backer = backer;
121 udpif->secret = random_uint32();
122 udpif->wait_seq = seq_create();
123 latch_init(&udpif->exit_latch);
124 list_init(&udpif->drop_keys);
125 list_init(&udpif->upcalls);
126 list_init(&udpif->fmbs);
127 atomic_init(&udpif->reval_seq, 0);
128 ovs_mutex_init(&udpif->drop_key_mutex);
129 ovs_mutex_init(&udpif->upcall_mutex);
130 ovs_mutex_init(&udpif->fmb_mutex);
131
132 return udpif;
133 }
134
135 void
136 udpif_destroy(struct udpif *udpif)
137 {
138 struct flow_miss_batch *fmb;
139 struct drop_key *drop_key;
140 struct upcall *upcall;
141
142 udpif_recv_set(udpif, 0, false);
143
144 while ((drop_key = drop_key_next(udpif))) {
145 drop_key_destroy(drop_key);
146 }
147
148 while ((upcall = upcall_next(udpif))) {
149 upcall_destroy(upcall);
150 }
151
152 while ((fmb = flow_miss_batch_next(udpif))) {
153 flow_miss_batch_destroy(fmb);
154 }
155
156 ovs_mutex_destroy(&udpif->drop_key_mutex);
157 ovs_mutex_destroy(&udpif->upcall_mutex);
158 ovs_mutex_destroy(&udpif->fmb_mutex);
159 latch_destroy(&udpif->exit_latch);
160 seq_destroy(udpif->wait_seq);
161 free(udpif);
162 }
163
164 /* Tells 'udpif' to begin or stop handling flow misses depending on the value
165 * of 'enable'. 'n_handlers' is the number of miss_handler threads to create.
166 * Passing 'n_handlers' as zero is equivalent to passing 'enable' as false. */
167 void
168 udpif_recv_set(struct udpif *udpif, size_t n_handlers, bool enable)
169 {
170 n_handlers = enable ? n_handlers : 0;
171 n_handlers = MIN(n_handlers, 64);
172
173 /* Stop the old threads (if any). */
174 if (udpif->handlers && udpif->n_handlers != n_handlers) {
175 size_t i;
176
177 latch_set(&udpif->exit_latch);
178
179 /* Wake the handlers so they can exit. */
180 for (i = 0; i < udpif->n_handlers; i++) {
181 struct handler *handler = &udpif->handlers[i];
182
183 ovs_mutex_lock(&handler->mutex);
184 xpthread_cond_signal(&handler->wake_cond);
185 ovs_mutex_unlock(&handler->mutex);
186 }
187
188 xpthread_join(udpif->dispatcher, NULL);
189 for (i = 0; i < udpif->n_handlers; i++) {
190 struct handler *handler = &udpif->handlers[i];
191 struct upcall *miss, *next;
192
193 xpthread_join(handler->thread, NULL);
194
195 ovs_mutex_lock(&handler->mutex);
196 LIST_FOR_EACH_SAFE (miss, next, list_node, &handler->upcalls) {
197 list_remove(&miss->list_node);
198 upcall_destroy(miss);
199 }
200 ovs_mutex_unlock(&handler->mutex);
201 ovs_mutex_destroy(&handler->mutex);
202
203 xpthread_cond_destroy(&handler->wake_cond);
204 }
205 latch_poll(&udpif->exit_latch);
206
207 free(udpif->handlers);
208 udpif->handlers = NULL;
209 udpif->n_handlers = 0;
210 }
211
212 /* Start new threads (if necessary). */
213 if (!udpif->handlers && n_handlers) {
214 size_t i;
215
216 udpif->n_handlers = n_handlers;
217 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
218 for (i = 0; i < udpif->n_handlers; i++) {
219 struct handler *handler = &udpif->handlers[i];
220
221 handler->udpif = udpif;
222 list_init(&handler->upcalls);
223 xpthread_cond_init(&handler->wake_cond, NULL);
224 ovs_mutex_init(&handler->mutex);
225 xpthread_create(&handler->thread, NULL, udpif_miss_handler, handler);
226 }
227 xpthread_create(&udpif->dispatcher, NULL, udpif_dispatcher, udpif);
228 }
229 }
230
231 void
232 udpif_run(struct udpif *udpif)
233 {
234 udpif->last_seq = seq_read(udpif->wait_seq);
235 }
236
237 void
238 udpif_wait(struct udpif *udpif)
239 {
240 ovs_mutex_lock(&udpif->drop_key_mutex);
241 if (udpif->n_drop_keys) {
242 poll_immediate_wake();
243 }
244 ovs_mutex_unlock(&udpif->drop_key_mutex);
245
246 ovs_mutex_lock(&udpif->upcall_mutex);
247 if (udpif->n_upcalls) {
248 poll_immediate_wake();
249 }
250 ovs_mutex_unlock(&udpif->upcall_mutex);
251
252 ovs_mutex_lock(&udpif->fmb_mutex);
253 if (udpif->n_fmbs) {
254 poll_immediate_wake();
255 }
256 ovs_mutex_unlock(&udpif->fmb_mutex);
257
258 seq_wait(udpif->wait_seq, udpif->last_seq);
259 }
260
261 /* Notifies 'udpif' that something changed which may render previous
262 * xlate_actions() results invalid. */
263 void
264 udpif_revalidate(struct udpif *udpif)
265 {
266 struct flow_miss_batch *fmb, *next_fmb;
267 unsigned int junk;
268
269 /* Since we remove each miss on revalidation, their statistics won't be
270 * accounted to the appropriate 'facet's in the upper layer. In most
271 * cases, this is alright because we've already pushed the stats to the
272 * relevant rules. However, NetFlow requires absolute packet counts on
273 * 'facet's which could now be incorrect. */
274 ovs_mutex_lock(&udpif->fmb_mutex);
275 atomic_add(&udpif->reval_seq, 1, &junk);
276 LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &udpif->fmbs) {
277 list_remove(&fmb->list_node);
278 flow_miss_batch_destroy(fmb);
279 udpif->n_fmbs--;
280 }
281 ovs_mutex_unlock(&udpif->fmb_mutex);
282 udpif_drop_key_clear(udpif);
283 }
284
285 /* Retreives the next upcall which ofproto-dpif is responsible for handling.
286 * The caller is responsible for destroying the returned upcall with
287 * upcall_destroy(). */
288 struct upcall *
289 upcall_next(struct udpif *udpif)
290 {
291 struct upcall *next = NULL;
292
293 ovs_mutex_lock(&udpif->upcall_mutex);
294 if (udpif->n_upcalls) {
295 udpif->n_upcalls--;
296 next = CONTAINER_OF(list_pop_front(&udpif->upcalls), struct upcall,
297 list_node);
298 }
299 ovs_mutex_unlock(&udpif->upcall_mutex);
300 return next;
301 }
302
303 /* Destroys and deallocates 'upcall'. */
304 void
305 upcall_destroy(struct upcall *upcall)
306 {
307 if (upcall) {
308 ofpbuf_uninit(&upcall->upcall_buf);
309 free(upcall);
310 }
311 }
312
313 /* Retreives the next batch of processed flow misses for 'udpif' to install.
314 * The caller is responsible for destroying it with flow_miss_batch_destroy().
315 */
316 struct flow_miss_batch *
317 flow_miss_batch_next(struct udpif *udpif)
318 {
319 struct flow_miss_batch *next = NULL;
320
321 ovs_mutex_lock(&udpif->fmb_mutex);
322 if (udpif->n_fmbs) {
323 udpif->n_fmbs--;
324 next = CONTAINER_OF(list_pop_front(&udpif->fmbs),
325 struct flow_miss_batch, list_node);
326 }
327 ovs_mutex_unlock(&udpif->fmb_mutex);
328 return next;
329 }
330
331 /* Destroys and deallocates 'fmb'. */
332 void
333 flow_miss_batch_destroy(struct flow_miss_batch *fmb)
334 {
335 struct flow_miss *miss, *next;
336
337 if (!fmb) {
338 return;
339 }
340
341 HMAP_FOR_EACH_SAFE (miss, next, hmap_node, &fmb->misses) {
342 hmap_remove(&fmb->misses, &miss->hmap_node);
343 miss_destroy(miss);
344 }
345
346 hmap_destroy(&fmb->misses);
347 free(fmb);
348 }
349
350 /* Discards any flow miss batches queued up in 'udpif' for 'ofproto' (because
351 * 'ofproto' is being destroyed).
352 *
353 * 'ofproto''s xports must already have been removed, otherwise new flow miss
354 * batches could still end up getting queued. */
355 void
356 flow_miss_batch_ofproto_destroyed(struct udpif *udpif,
357 const struct ofproto_dpif *ofproto)
358 {
359 struct flow_miss_batch *fmb, *next_fmb;
360
361 ovs_mutex_lock(&udpif->fmb_mutex);
362 LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &udpif->fmbs) {
363 struct flow_miss *miss, *next_miss;
364
365 HMAP_FOR_EACH_SAFE (miss, next_miss, hmap_node, &fmb->misses) {
366 if (miss->ofproto == ofproto) {
367 hmap_remove(&fmb->misses, &miss->hmap_node);
368 miss_destroy(miss);
369 }
370 }
371
372 if (hmap_is_empty(&fmb->misses)) {
373 list_remove(&fmb->list_node);
374 flow_miss_batch_destroy(fmb);
375 udpif->n_fmbs--;
376 }
377 }
378 ovs_mutex_unlock(&udpif->fmb_mutex);
379 }
380
381 /* Retreives the next drop key which ofproto-dpif needs to process. The caller
382 * is responsible for destroying it with drop_key_destroy(). */
383 struct drop_key *
384 drop_key_next(struct udpif *udpif)
385 {
386 struct drop_key *next = NULL;
387
388 ovs_mutex_lock(&udpif->drop_key_mutex);
389 if (udpif->n_drop_keys) {
390 udpif->n_drop_keys--;
391 next = CONTAINER_OF(list_pop_front(&udpif->drop_keys), struct drop_key,
392 list_node);
393 }
394 ovs_mutex_unlock(&udpif->drop_key_mutex);
395 return next;
396 }
397
398 /* Destorys and deallocates 'drop_key'. */
399 void
400 drop_key_destroy(struct drop_key *drop_key)
401 {
402 if (drop_key) {
403 free(drop_key->key);
404 free(drop_key);
405 }
406 }
407
408 /* Clears all drop keys waiting to be processed by drop_key_next(). */
409 void
410 udpif_drop_key_clear(struct udpif *udpif)
411 {
412 struct drop_key *drop_key, *next;
413
414 ovs_mutex_lock(&udpif->drop_key_mutex);
415 LIST_FOR_EACH_SAFE (drop_key, next, list_node, &udpif->drop_keys) {
416 list_remove(&drop_key->list_node);
417 drop_key_destroy(drop_key);
418 udpif->n_drop_keys--;
419 }
420 ovs_mutex_unlock(&udpif->drop_key_mutex);
421 }
422 \f
423 /* The dispatcher thread is responsible for receving upcalls from the kernel,
424 * assigning the miss upcalls to a miss_handler thread, and assigning the more
425 * complex ones to ofproto-dpif directly. */
426 static void *
427 udpif_dispatcher(void *arg)
428 {
429 struct udpif *udpif = arg;
430
431 set_subprogram_name("dispatcher");
432 while (!latch_is_set(&udpif->exit_latch)) {
433 recv_upcalls(udpif);
434 dpif_recv_wait(udpif->dpif);
435 latch_wait(&udpif->exit_latch);
436 poll_block();
437 }
438
439 return NULL;
440 }
441
442 /* The miss handler thread is responsible for processing miss upcalls retreived
443 * by the dispatcher thread. Once finished it passes the processed miss
444 * upcalls to ofproto-dpif where they're installed in the datapath. */
445 static void *
446 udpif_miss_handler(void *arg)
447 {
448 struct list misses = LIST_INITIALIZER(&misses);
449 struct handler *handler = arg;
450
451 set_subprogram_name("miss_handler");
452 for (;;) {
453 size_t i;
454
455 ovs_mutex_lock(&handler->mutex);
456
457 if (latch_is_set(&handler->udpif->exit_latch)) {
458 ovs_mutex_unlock(&handler->mutex);
459 return NULL;
460 }
461
462 if (!handler->n_upcalls) {
463 ovs_mutex_cond_wait(&handler->wake_cond, &handler->mutex);
464 }
465
466 for (i = 0; i < FLOW_MISS_MAX_BATCH; i++) {
467 if (handler->n_upcalls) {
468 handler->n_upcalls--;
469 list_push_back(&misses, list_pop_front(&handler->upcalls));
470 } else {
471 break;
472 }
473 }
474 ovs_mutex_unlock(&handler->mutex);
475
476 handle_miss_upcalls(handler->udpif, &misses);
477 }
478 }
479 \f
480 static void
481 miss_destroy(struct flow_miss *miss)
482 {
483 struct upcall *upcall, *next;
484
485 LIST_FOR_EACH_SAFE (upcall, next, list_node, &miss->upcalls) {
486 list_remove(&upcall->list_node);
487 upcall_destroy(upcall);
488 }
489 xlate_out_uninit(&miss->xout);
490 }
491
492 static enum upcall_type
493 classify_upcall(const struct upcall *upcall)
494 {
495 const struct dpif_upcall *dpif_upcall = &upcall->dpif_upcall;
496 union user_action_cookie cookie;
497 size_t userdata_len;
498
499 /* First look at the upcall type. */
500 switch (dpif_upcall->type) {
501 case DPIF_UC_ACTION:
502 break;
503
504 case DPIF_UC_MISS:
505 return MISS_UPCALL;
506
507 case DPIF_N_UC_TYPES:
508 default:
509 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32,
510 dpif_upcall->type);
511 return BAD_UPCALL;
512 }
513
514 /* "action" upcalls need a closer look. */
515 if (!dpif_upcall->userdata) {
516 VLOG_WARN_RL(&rl, "action upcall missing cookie");
517 return BAD_UPCALL;
518 }
519 userdata_len = nl_attr_get_size(dpif_upcall->userdata);
520 if (userdata_len < sizeof cookie.type
521 || userdata_len > sizeof cookie) {
522 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu",
523 userdata_len);
524 return BAD_UPCALL;
525 }
526 memset(&cookie, 0, sizeof cookie);
527 memcpy(&cookie, nl_attr_get(dpif_upcall->userdata), userdata_len);
528 if (userdata_len == sizeof cookie.sflow
529 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
530 return SFLOW_UPCALL;
531 } else if (userdata_len == sizeof cookie.slow_path
532 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
533 return MISS_UPCALL;
534 } else if (userdata_len == sizeof cookie.flow_sample
535 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
536 return FLOW_SAMPLE_UPCALL;
537 } else if (userdata_len == sizeof cookie.ipfix
538 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
539 return IPFIX_UPCALL;
540 } else {
541 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
542 " and size %zu", cookie.type, userdata_len);
543 return BAD_UPCALL;
544 }
545 }
546
547 static void
548 recv_upcalls(struct udpif *udpif)
549 {
550 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 60);
551 size_t n_udpif_new_upcalls = 0;
552 struct handler *handler;
553 int n;
554
555 for (;;) {
556 struct upcall *upcall;
557 int error;
558
559 upcall = xmalloc(sizeof *upcall);
560 ofpbuf_use_stub(&upcall->upcall_buf, upcall->upcall_stub,
561 sizeof upcall->upcall_stub);
562 error = dpif_recv(udpif->dpif, &upcall->dpif_upcall,
563 &upcall->upcall_buf);
564 if (error) {
565 upcall_destroy(upcall);
566 break;
567 }
568
569 upcall->type = classify_upcall(upcall);
570 if (upcall->type == BAD_UPCALL) {
571 upcall_destroy(upcall);
572 } else if (upcall->type == MISS_UPCALL) {
573 struct dpif_upcall *dupcall = &upcall->dpif_upcall;
574 uint32_t hash = udpif->secret;
575 struct nlattr *nla;
576 size_t n_bytes, left;
577
578 n_bytes = 0;
579 NL_ATTR_FOR_EACH (nla, left, dupcall->key, dupcall->key_len) {
580 enum ovs_key_attr type = nl_attr_type(nla);
581 if (type == OVS_KEY_ATTR_IN_PORT
582 || type == OVS_KEY_ATTR_TCP
583 || type == OVS_KEY_ATTR_UDP) {
584 if (nl_attr_get_size(nla) == 4) {
585 ovs_be32 attr = nl_attr_get_be32(nla);
586 hash = mhash_add(hash, (OVS_FORCE uint32_t) attr);
587 n_bytes += 4;
588 } else {
589 VLOG_WARN("Netlink attribute with incorrect size.");
590 }
591 }
592 }
593 hash = mhash_finish(hash, n_bytes);
594
595 handler = &udpif->handlers[hash % udpif->n_handlers];
596
597 ovs_mutex_lock(&handler->mutex);
598 if (handler->n_upcalls < MAX_QUEUE_LENGTH) {
599 list_push_back(&handler->upcalls, &upcall->list_node);
600 handler->n_new_upcalls = ++handler->n_upcalls;
601
602 if (handler->n_new_upcalls >= FLOW_MISS_MAX_BATCH) {
603 xpthread_cond_signal(&handler->wake_cond);
604 }
605 ovs_mutex_unlock(&handler->mutex);
606 if (!VLOG_DROP_DBG(&rl)) {
607 struct ds ds = DS_EMPTY_INITIALIZER;
608
609 odp_flow_key_format(upcall->dpif_upcall.key,
610 upcall->dpif_upcall.key_len,
611 &ds);
612 VLOG_DBG("dispatcher: miss enqueue (%s)", ds_cstr(&ds));
613 ds_destroy(&ds);
614 }
615 } else {
616 ovs_mutex_unlock(&handler->mutex);
617 COVERAGE_INC(miss_queue_overflow);
618 upcall_destroy(upcall);
619 }
620 } else {
621 ovs_mutex_lock(&udpif->upcall_mutex);
622 if (udpif->n_upcalls < MAX_QUEUE_LENGTH) {
623 n_udpif_new_upcalls = ++udpif->n_upcalls;
624 list_push_back(&udpif->upcalls, &upcall->list_node);
625 ovs_mutex_unlock(&udpif->upcall_mutex);
626
627 if (n_udpif_new_upcalls >= FLOW_MISS_MAX_BATCH) {
628 seq_change(udpif->wait_seq);
629 }
630 } else {
631 ovs_mutex_unlock(&udpif->upcall_mutex);
632 COVERAGE_INC(upcall_queue_overflow);
633 upcall_destroy(upcall);
634 }
635 }
636 }
637 for (n = 0; n < udpif->n_handlers; ++n) {
638 handler = &udpif->handlers[n];
639 if (handler->n_new_upcalls) {
640 handler->n_new_upcalls = 0;
641 ovs_mutex_lock(&handler->mutex);
642 xpthread_cond_signal(&handler->wake_cond);
643 ovs_mutex_unlock(&handler->mutex);
644 }
645 }
646 if (n_udpif_new_upcalls) {
647 seq_change(udpif->wait_seq);
648 }
649 }
650
651 static struct flow_miss *
652 flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
653 const struct flow *flow, uint32_t hash)
654 {
655 struct flow_miss *miss;
656
657 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
658 if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
659 return miss;
660 }
661 }
662
663 return NULL;
664 }
665
666 /* Executes flow miss 'miss'. May add any required datapath operations
667 * to 'ops', incrementing '*n_ops' for each new op. */
668 static void
669 execute_flow_miss(struct flow_miss *miss, struct dpif_op *ops, size_t *n_ops)
670 {
671 struct ofproto_dpif *ofproto = miss->ofproto;
672 struct flow_wildcards wc;
673 struct rule_dpif *rule;
674 struct ofpbuf *packet;
675 struct xlate_in xin;
676
677 memset(&miss->stats, 0, sizeof miss->stats);
678 miss->stats.used = time_msec();
679 LIST_FOR_EACH (packet, list_node, &miss->packets) {
680 miss->stats.tcp_flags |= packet_get_tcp_flags(packet, &miss->flow);
681 miss->stats.n_bytes += packet->size;
682 miss->stats.n_packets++;
683 }
684
685 flow_wildcards_init_catchall(&wc);
686 rule_dpif_lookup(ofproto, &miss->flow, &wc, &rule);
687 rule_dpif_credit_stats(rule, &miss->stats);
688 xlate_in_init(&xin, ofproto, &miss->flow, rule, miss->stats.tcp_flags,
689 NULL);
690 xin.may_learn = true;
691 xin.resubmit_stats = &miss->stats;
692 xlate_actions(&xin, &miss->xout);
693 flow_wildcards_or(&miss->xout.wc, &miss->xout.wc, &wc);
694
695 if (rule_dpif_fail_open(rule)) {
696 LIST_FOR_EACH (packet, list_node, &miss->packets) {
697 struct ofputil_packet_in *pin;
698
699 /* Extra-special case for fail-open mode.
700 *
701 * We are in fail-open mode and the packet matched the fail-open
702 * rule, but we are connected to a controller too. We should send
703 * the packet up to the controller in the hope that it will try to
704 * set up a flow and thereby allow us to exit fail-open.
705 *
706 * See the top-level comment in fail-open.c for more information. */
707 pin = xmalloc(sizeof(*pin));
708 pin->packet = xmemdup(packet->data, packet->size);
709 pin->packet_len = packet->size;
710 pin->reason = OFPR_NO_MATCH;
711 pin->controller_id = 0;
712 pin->table_id = 0;
713 pin->cookie = 0;
714 pin->send_len = 0; /* Not used for flow table misses. */
715 flow_get_metadata(&miss->flow, &pin->fmd);
716 ofproto_dpif_send_packet_in(ofproto, pin);
717 }
718 }
719
720 if (miss->xout.slow) {
721 LIST_FOR_EACH (packet, list_node, &miss->packets) {
722 struct xlate_in xin;
723
724 xlate_in_init(&xin, miss->ofproto, &miss->flow, rule, 0, packet);
725 xlate_actions_for_side_effects(&xin);
726 }
727 }
728 rule_dpif_release(rule);
729
730 if (miss->xout.odp_actions.size) {
731 LIST_FOR_EACH (packet, list_node, &miss->packets) {
732 struct dpif_op *op = &ops[*n_ops];
733 struct dpif_execute *execute = &op->u.execute;
734
735 if (miss->flow.in_port.ofp_port
736 != vsp_realdev_to_vlandev(miss->ofproto,
737 miss->flow.in_port.ofp_port,
738 miss->flow.vlan_tci)) {
739 /* This packet was received on a VLAN splinter port. We
740 * added a VLAN to the packet to make the packet resemble
741 * the flow, but the actions were composed assuming that
742 * the packet contained no VLAN. So, we must remove the
743 * VLAN header from the packet before trying to execute the
744 * actions. */
745 eth_pop_vlan(packet);
746 }
747
748 op->type = DPIF_OP_EXECUTE;
749 execute->key = miss->key;
750 execute->key_len = miss->key_len;
751 execute->packet = packet;
752 execute->actions = miss->xout.odp_actions.data;
753 execute->actions_len = miss->xout.odp_actions.size;
754
755 (*n_ops)++;
756 }
757 }
758 }
759
760 static void
761 handle_miss_upcalls(struct udpif *udpif, struct list *upcalls)
762 {
763 struct dpif_op *opsp[FLOW_MISS_MAX_BATCH];
764 struct dpif_op ops[FLOW_MISS_MAX_BATCH];
765 unsigned int old_reval_seq, new_reval_seq;
766 struct upcall *upcall, *next;
767 struct flow_miss_batch *fmb;
768 size_t n_upcalls, n_ops, i;
769 struct flow_miss *miss;
770
771 atomic_read(&udpif->reval_seq, &old_reval_seq);
772
773 /* Construct the to-do list.
774 *
775 * This just amounts to extracting the flow from each packet and sticking
776 * the packets that have the same flow in the same "flow_miss" structure so
777 * that we can process them together. */
778 fmb = xmalloc(sizeof *fmb);
779 hmap_init(&fmb->misses);
780 n_upcalls = 0;
781 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
782 struct dpif_upcall *dupcall = &upcall->dpif_upcall;
783 struct flow_miss *miss = &fmb->miss_buf[n_upcalls];
784 struct flow_miss *existing_miss;
785 struct ofproto_dpif *ofproto;
786 odp_port_t odp_in_port;
787 struct flow flow;
788 uint32_t hash;
789 int error;
790
791 error = xlate_receive(udpif->backer, dupcall->packet, dupcall->key,
792 dupcall->key_len, &flow, &miss->key_fitness,
793 &ofproto, &odp_in_port);
794
795 if (error == ENODEV) {
796 struct drop_key *drop_key;
797
798 /* Received packet on datapath port for which we couldn't
799 * associate an ofproto. This can happen if a port is removed
800 * while traffic is being received. Print a rate-limited message
801 * in case it happens frequently. Install a drop flow so
802 * that future packets of the flow are inexpensively dropped
803 * in the kernel. */
804 VLOG_INFO_RL(&rl, "received packet on unassociated datapath port "
805 "%"PRIu32, odp_in_port);
806
807 drop_key = xmalloc(sizeof *drop_key);
808 drop_key->key = xmemdup(dupcall->key, dupcall->key_len);
809 drop_key->key_len = dupcall->key_len;
810
811 ovs_mutex_lock(&udpif->drop_key_mutex);
812 if (udpif->n_drop_keys < MAX_QUEUE_LENGTH) {
813 udpif->n_drop_keys++;
814 list_push_back(&udpif->drop_keys, &drop_key->list_node);
815 ovs_mutex_unlock(&udpif->drop_key_mutex);
816 seq_change(udpif->wait_seq);
817 } else {
818 ovs_mutex_unlock(&udpif->drop_key_mutex);
819 COVERAGE_INC(drop_queue_overflow);
820 drop_key_destroy(drop_key);
821 }
822 continue;
823 } else if (error) {
824 continue;
825 }
826
827 flow_extract(dupcall->packet, flow.skb_priority, flow.pkt_mark,
828 &flow.tunnel, &flow.in_port, &miss->flow);
829
830 /* Add other packets to a to-do list. */
831 hash = flow_hash(&miss->flow, 0);
832 existing_miss = flow_miss_find(&fmb->misses, ofproto, &miss->flow, hash);
833 if (!existing_miss) {
834 hmap_insert(&fmb->misses, &miss->hmap_node, hash);
835 miss->ofproto = ofproto;
836 miss->key = dupcall->key;
837 miss->key_len = dupcall->key_len;
838 miss->upcall_type = dupcall->type;
839 list_init(&miss->packets);
840 list_init(&miss->upcalls);
841
842 n_upcalls++;
843 } else {
844 miss = existing_miss;
845 }
846 list_push_back(&miss->packets, &dupcall->packet->list_node);
847
848 list_remove(&upcall->list_node);
849 list_push_back(&miss->upcalls, &upcall->list_node);
850 }
851
852 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
853 list_remove(&upcall->list_node);
854 upcall_destroy(upcall);
855 }
856
857 /* Process each element in the to-do list, constructing the set of
858 * operations to batch. */
859 n_ops = 0;
860 HMAP_FOR_EACH (miss, hmap_node, &fmb->misses) {
861 execute_flow_miss(miss, ops, &n_ops);
862 }
863 ovs_assert(n_ops <= ARRAY_SIZE(ops));
864
865 /* Execute batch. */
866 for (i = 0; i < n_ops; i++) {
867 opsp[i] = &ops[i];
868 }
869 dpif_operate(udpif->dpif, opsp, n_ops);
870
871 ovs_mutex_lock(&udpif->fmb_mutex);
872 atomic_read(&udpif->reval_seq, &new_reval_seq);
873 if (old_reval_seq != new_reval_seq) {
874 /* udpif_revalidate() was called as we were calculating the actions.
875 * To be safe, we need to assume all the misses need revalidation. */
876 ovs_mutex_unlock(&udpif->fmb_mutex);
877 flow_miss_batch_destroy(fmb);
878 } else if (udpif->n_fmbs < MAX_QUEUE_LENGTH) {
879 udpif->n_fmbs++;
880 list_push_back(&udpif->fmbs, &fmb->list_node);
881 ovs_mutex_unlock(&udpif->fmb_mutex);
882 seq_change(udpif->wait_seq);
883 } else {
884 COVERAGE_INC(fmb_queue_overflow);
885 ovs_mutex_unlock(&udpif->fmb_mutex);
886 flow_miss_batch_destroy(fmb);
887 }
888 }