]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto-dpif-upcall.c
netdev-dummy: Stop overriding patch vport type with dummy.
[mirror_ovs.git] / ofproto / ofproto-dpif-upcall.c
CommitLineData
e1ec7dd4
EJ
1/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
14
15#include <config.h>
16#include "ofproto-dpif-upcall.h"
17
18#include <errno.h>
19#include <stdbool.h>
20#include <inttypes.h>
21
22#include "coverage.h"
23#include "dynamic-string.h"
24#include "dpif.h"
25#include "fail-open.h"
26#include "latch.h"
27#include "seq.h"
28#include "list.h"
29#include "netlink.h"
30#include "ofpbuf.h"
31#include "ofproto-dpif.h"
32#include "packets.h"
33#include "poll-loop.h"
34#include "vlog.h"
35
36#define MAX_QUEUE_LENGTH 512
37
38VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
39
40COVERAGE_DEFINE(upcall_queue_overflow);
41COVERAGE_DEFINE(drop_queue_overflow);
42COVERAGE_DEFINE(miss_queue_overflow);
43COVERAGE_DEFINE(fmb_queue_overflow);
44
45/* A thread that processes each upcall handed to it by the dispatcher thread,
46 * forwards the upcall's packet, and then queues it to the main ofproto_dpif
47 * to possibly set up a kernel flow as a cache. */
48struct handler {
49 struct udpif *udpif; /* Parent udpif. */
50 pthread_t thread; /* Thread ID. */
51
52 struct ovs_mutex mutex; /* Mutex guarding the following. */
53
54 /* Atomic queue of unprocessed miss upcalls. */
55 struct list upcalls OVS_GUARDED;
56 size_t n_upcalls OVS_GUARDED;
57
58 pthread_cond_t wake_cond; /* Wakes 'thread' while holding
59 'mutex'. */
60};
61
62/* An upcall handler for ofproto_dpif.
63 *
64 * udpif is implemented as a "dispatcher" thread that reads upcalls from the
65 * kernel. It processes each upcall just enough to figure out its next
66 * destination. For a "miss" upcall (MISS_UPCALL), this is one of several
67 * "handler" threads (see struct handler). Other upcalls are queued to the
68 * main ofproto_dpif. */
69struct udpif {
70 struct dpif *dpif; /* Datapath handle. */
71 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
72
73 uint32_t secret; /* Random seed for upcall hash. */
74
75 pthread_t dispatcher; /* Dispatcher thread ID. */
76
77 struct handler *handlers; /* Miss handlers. */
78 size_t n_handlers;
79
80 /* Atomic queue of unprocessed drop keys. */
81 struct ovs_mutex drop_key_mutex;
82 struct list drop_keys OVS_GUARDED;
83 size_t n_drop_keys OVS_GUARDED;
84
85 /* Atomic queue of special upcalls for ofproto-dpif to process. */
86 struct ovs_mutex upcall_mutex;
87 struct list upcalls OVS_GUARDED;
88 size_t n_upcalls OVS_GUARDED;
89
90 /* Atomic queue of flow_miss_batches. */
91 struct ovs_mutex fmb_mutex;
92 struct list fmbs OVS_GUARDED;
93 size_t n_fmbs OVS_GUARDED;
94
95 /* Number of times udpif_revalidate() has been called. */
96 atomic_uint reval_seq;
97
98 struct seq *wait_seq;
99 uint64_t last_seq;
100
101 struct latch exit_latch; /* Tells child threads to exit. */
102};
103
104static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
105
106static void recv_upcalls(struct udpif *);
107static void handle_miss_upcalls(struct udpif *, struct list *upcalls);
108static void miss_destroy(struct flow_miss *);
109static void *udpif_dispatcher(void *);
110static void *udpif_miss_handler(void *);
111
112struct udpif *
113udpif_create(struct dpif_backer *backer, struct dpif *dpif)
114{
115 struct udpif *udpif = xzalloc(sizeof *udpif);
116
117 udpif->dpif = dpif;
118 udpif->backer = backer;
119 udpif->secret = random_uint32();
120 udpif->wait_seq = seq_create();
121 latch_init(&udpif->exit_latch);
122 list_init(&udpif->drop_keys);
123 list_init(&udpif->upcalls);
124 list_init(&udpif->fmbs);
125 atomic_init(&udpif->reval_seq, 0);
834d6caf
BP
126 ovs_mutex_init(&udpif->drop_key_mutex);
127 ovs_mutex_init(&udpif->upcall_mutex);
128 ovs_mutex_init(&udpif->fmb_mutex);
e1ec7dd4
EJ
129
130 return udpif;
131}
132
133void
134udpif_destroy(struct udpif *udpif)
135{
136 struct flow_miss_batch *fmb;
137 struct drop_key *drop_key;
138 struct upcall *upcall;
139
140 udpif_recv_set(udpif, 0, false);
141
142 while ((drop_key = drop_key_next(udpif))) {
143 drop_key_destroy(drop_key);
144 }
145
146 while ((upcall = upcall_next(udpif))) {
147 upcall_destroy(upcall);
148 }
149
150 while ((fmb = flow_miss_batch_next(udpif))) {
151 flow_miss_batch_destroy(fmb);
152 }
153
154 ovs_mutex_destroy(&udpif->drop_key_mutex);
155 ovs_mutex_destroy(&udpif->upcall_mutex);
156 ovs_mutex_destroy(&udpif->fmb_mutex);
157 latch_destroy(&udpif->exit_latch);
158 seq_destroy(udpif->wait_seq);
159 free(udpif);
160}
161
162/* Tells 'udpif' to begin or stop handling flow misses depending on the value
163 * of 'enable'. 'n_handlers' is the number of miss_handler threads to create.
164 * Passing 'n_handlers' as zero is equivalent to passing 'enable' as false. */
165void
166udpif_recv_set(struct udpif *udpif, size_t n_handlers, bool enable)
167{
168 n_handlers = enable ? n_handlers : 0;
169 n_handlers = MIN(n_handlers, 64);
170
171 /* Stop the old threads (if any). */
172 if (udpif->handlers && udpif->n_handlers != n_handlers) {
173 size_t i;
174
175 latch_set(&udpif->exit_latch);
176
177 /* Wake the handlers so they can exit. */
178 for (i = 0; i < udpif->n_handlers; i++) {
179 struct handler *handler = &udpif->handlers[i];
180
181 ovs_mutex_lock(&handler->mutex);
182 xpthread_cond_signal(&handler->wake_cond);
183 ovs_mutex_unlock(&handler->mutex);
184 }
185
186 xpthread_join(udpif->dispatcher, NULL);
187 for (i = 0; i < udpif->n_handlers; i++) {
188 struct handler *handler = &udpif->handlers[i];
189 struct upcall *miss, *next;
190
191 xpthread_join(handler->thread, NULL);
192
193 ovs_mutex_lock(&handler->mutex);
194 LIST_FOR_EACH_SAFE (miss, next, list_node, &handler->upcalls) {
195 list_remove(&miss->list_node);
196 upcall_destroy(miss);
197 }
198 ovs_mutex_unlock(&handler->mutex);
199 ovs_mutex_destroy(&handler->mutex);
200
201 xpthread_cond_destroy(&handler->wake_cond);
202 }
203 latch_poll(&udpif->exit_latch);
204
205 free(udpif->handlers);
206 udpif->handlers = NULL;
207 udpif->n_handlers = 0;
208 }
209
210 /* Start new threads (if necessary). */
211 if (!udpif->handlers && n_handlers) {
212 size_t i;
213
214 udpif->n_handlers = n_handlers;
215 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
216 for (i = 0; i < udpif->n_handlers; i++) {
217 struct handler *handler = &udpif->handlers[i];
218
219 handler->udpif = udpif;
220 list_init(&handler->upcalls);
221 xpthread_cond_init(&handler->wake_cond, NULL);
834d6caf 222 ovs_mutex_init(&handler->mutex);
e1ec7dd4
EJ
223 xpthread_create(&handler->thread, NULL, udpif_miss_handler, handler);
224 }
225 xpthread_create(&udpif->dispatcher, NULL, udpif_dispatcher, udpif);
226 }
227}
228
229void
230udpif_run(struct udpif *udpif)
231{
232 udpif->last_seq = seq_read(udpif->wait_seq);
233}
234
235void
236udpif_wait(struct udpif *udpif)
237{
238 ovs_mutex_lock(&udpif->drop_key_mutex);
239 if (udpif->n_drop_keys) {
240 poll_immediate_wake();
241 }
242 ovs_mutex_unlock(&udpif->drop_key_mutex);
243
244 ovs_mutex_lock(&udpif->upcall_mutex);
245 if (udpif->n_upcalls) {
246 poll_immediate_wake();
247 }
248 ovs_mutex_unlock(&udpif->upcall_mutex);
249
250 ovs_mutex_lock(&udpif->fmb_mutex);
251 if (udpif->n_fmbs) {
252 poll_immediate_wake();
253 }
254 ovs_mutex_unlock(&udpif->fmb_mutex);
255
256 seq_wait(udpif->wait_seq, udpif->last_seq);
257}
258
259/* Notifies 'udpif' that something changed which may render previous
260 * xlate_actions() results invalid. */
261void
262udpif_revalidate(struct udpif *udpif)
263{
264 struct flow_miss_batch *fmb, *next_fmb;
265 unsigned int junk;
266
267 /* Since we remove each miss on revalidation, their statistics won't be
268 * accounted to the appropriate 'facet's in the upper layer. In most
269 * cases, this is alright because we've already pushed the stats to the
270 * relevant rules. However, NetFlow requires absolute packet counts on
271 * 'facet's which could now be incorrect. */
272 ovs_mutex_lock(&udpif->fmb_mutex);
273 atomic_add(&udpif->reval_seq, 1, &junk);
274 LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &udpif->fmbs) {
275 list_remove(&fmb->list_node);
276 flow_miss_batch_destroy(fmb);
277 udpif->n_fmbs--;
278 }
279 ovs_mutex_unlock(&udpif->fmb_mutex);
280 udpif_drop_key_clear(udpif);
281}
282
283/* Retreives the next upcall which ofproto-dpif is responsible for handling.
284 * The caller is responsible for destroying the returned upcall with
285 * upcall_destroy(). */
286struct upcall *
287upcall_next(struct udpif *udpif)
288{
289 struct upcall *next = NULL;
290
291 ovs_mutex_lock(&udpif->upcall_mutex);
292 if (udpif->n_upcalls) {
293 udpif->n_upcalls--;
294 next = CONTAINER_OF(list_pop_front(&udpif->upcalls), struct upcall,
295 list_node);
296 }
297 ovs_mutex_unlock(&udpif->upcall_mutex);
298 return next;
299}
300
301/* Destroys and deallocates 'upcall'. */
302void
303upcall_destroy(struct upcall *upcall)
304{
305 if (upcall) {
306 ofpbuf_uninit(&upcall->upcall_buf);
307 free(upcall);
308 }
309}
310
311/* Retreives the next batch of processed flow misses for 'udpif' to install.
312 * The caller is responsible for destroying it with flow_miss_batch_destroy().
313 */
314struct flow_miss_batch *
315flow_miss_batch_next(struct udpif *udpif)
316{
317 struct flow_miss_batch *next = NULL;
318
319 ovs_mutex_lock(&udpif->fmb_mutex);
320 if (udpif->n_fmbs) {
321 udpif->n_fmbs--;
322 next = CONTAINER_OF(list_pop_front(&udpif->fmbs),
323 struct flow_miss_batch, list_node);
324 }
325 ovs_mutex_unlock(&udpif->fmb_mutex);
326 return next;
327}
328
329/* Destroys and deallocates 'fmb'. */
330void
331flow_miss_batch_destroy(struct flow_miss_batch *fmb)
332{
333 struct flow_miss *miss, *next;
334
335 if (!fmb) {
336 return;
337 }
338
339 HMAP_FOR_EACH_SAFE (miss, next, hmap_node, &fmb->misses) {
340 hmap_remove(&fmb->misses, &miss->hmap_node);
341 miss_destroy(miss);
342 }
343
344 hmap_destroy(&fmb->misses);
345 free(fmb);
346}
347
348/* Retreives the next drop key which ofproto-dpif needs to process. The caller
349 * is responsible for destroying it with drop_key_destroy(). */
350struct drop_key *
351drop_key_next(struct udpif *udpif)
352{
353 struct drop_key *next = NULL;
354
355 ovs_mutex_lock(&udpif->drop_key_mutex);
356 if (udpif->n_drop_keys) {
357 udpif->n_drop_keys--;
358 next = CONTAINER_OF(list_pop_front(&udpif->drop_keys), struct drop_key,
359 list_node);
360 }
361 ovs_mutex_unlock(&udpif->drop_key_mutex);
362 return next;
363}
364
365/* Destorys and deallocates 'drop_key'. */
366void
367drop_key_destroy(struct drop_key *drop_key)
368{
369 if (drop_key) {
370 free(drop_key->key);
371 free(drop_key);
372 }
373}
374
375/* Clears all drop keys waiting to be processed by drop_key_next(). */
376void
377udpif_drop_key_clear(struct udpif *udpif)
378{
379 struct drop_key *drop_key, *next;
380
381 ovs_mutex_lock(&udpif->drop_key_mutex);
382 LIST_FOR_EACH_SAFE (drop_key, next, list_node, &udpif->drop_keys) {
383 list_remove(&drop_key->list_node);
384 drop_key_destroy(drop_key);
385 udpif->n_drop_keys--;
386 }
387 ovs_mutex_unlock(&udpif->drop_key_mutex);
388}
389\f
390/* The dispatcher thread is responsible for receving upcalls from the kernel,
391 * assigning the miss upcalls to a miss_handler thread, and assigning the more
392 * complex ones to ofproto-dpif directly. */
393static void *
394udpif_dispatcher(void *arg)
395{
396 struct udpif *udpif = arg;
397
398 set_subprogram_name("dispatcher");
399 while (!latch_is_set(&udpif->exit_latch)) {
400 recv_upcalls(udpif);
401 dpif_recv_wait(udpif->dpif);
402 latch_wait(&udpif->exit_latch);
403 poll_block();
404 }
405
406 return NULL;
407}
408
409/* The miss handler thread is responsible for processing miss upcalls retreived
410 * by the dispatcher thread. Once finished it passes the processed miss
411 * upcalls to ofproto-dpif where they're installed in the datapath. */
412static void *
413udpif_miss_handler(void *arg)
414{
415 struct list misses = LIST_INITIALIZER(&misses);
416 struct handler *handler = arg;
417
418 set_subprogram_name("miss_handler");
419 for (;;) {
420 size_t i;
421
422 ovs_mutex_lock(&handler->mutex);
423
424 if (latch_is_set(&handler->udpif->exit_latch)) {
425 ovs_mutex_unlock(&handler->mutex);
426 return NULL;
427 }
428
429 if (!handler->n_upcalls) {
430 ovs_mutex_cond_wait(&handler->wake_cond, &handler->mutex);
431 }
432
433 for (i = 0; i < FLOW_MISS_MAX_BATCH; i++) {
434 if (handler->n_upcalls) {
435 handler->n_upcalls--;
436 list_push_back(&misses, list_pop_front(&handler->upcalls));
437 } else {
438 break;
439 }
440 }
441 ovs_mutex_unlock(&handler->mutex);
442
443 handle_miss_upcalls(handler->udpif, &misses);
444 }
445}
446\f
447static void
448miss_destroy(struct flow_miss *miss)
449{
450 struct upcall *upcall, *next;
451
452 LIST_FOR_EACH_SAFE (upcall, next, list_node, &miss->upcalls) {
453 list_remove(&upcall->list_node);
454 upcall_destroy(upcall);
455 }
456 xlate_out_uninit(&miss->xout);
457}
458
459static enum upcall_type
460classify_upcall(const struct upcall *upcall)
461{
462 const struct dpif_upcall *dpif_upcall = &upcall->dpif_upcall;
463 union user_action_cookie cookie;
464 size_t userdata_len;
465
466 /* First look at the upcall type. */
467 switch (dpif_upcall->type) {
468 case DPIF_UC_ACTION:
469 break;
470
471 case DPIF_UC_MISS:
472 return MISS_UPCALL;
473
474 case DPIF_N_UC_TYPES:
475 default:
476 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32,
477 dpif_upcall->type);
478 return BAD_UPCALL;
479 }
480
481 /* "action" upcalls need a closer look. */
482 if (!dpif_upcall->userdata) {
483 VLOG_WARN_RL(&rl, "action upcall missing cookie");
484 return BAD_UPCALL;
485 }
486 userdata_len = nl_attr_get_size(dpif_upcall->userdata);
487 if (userdata_len < sizeof cookie.type
488 || userdata_len > sizeof cookie) {
489 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu",
490 userdata_len);
491 return BAD_UPCALL;
492 }
493 memset(&cookie, 0, sizeof cookie);
494 memcpy(&cookie, nl_attr_get(dpif_upcall->userdata), userdata_len);
495 if (userdata_len == sizeof cookie.sflow
496 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
497 return SFLOW_UPCALL;
498 } else if (userdata_len == sizeof cookie.slow_path
499 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
500 return MISS_UPCALL;
501 } else if (userdata_len == sizeof cookie.flow_sample
502 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
503 return FLOW_SAMPLE_UPCALL;
504 } else if (userdata_len == sizeof cookie.ipfix
505 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
506 return IPFIX_UPCALL;
507 } else {
508 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
509 " and size %zu", cookie.type, userdata_len);
510 return BAD_UPCALL;
511 }
512}
513
514static void
515recv_upcalls(struct udpif *udpif)
516{
517 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 60);
518 for (;;) {
519 struct upcall *upcall;
520 int error;
521
522 upcall = xmalloc(sizeof *upcall);
523 ofpbuf_use_stub(&upcall->upcall_buf, upcall->upcall_stub,
524 sizeof upcall->upcall_stub);
525 error = dpif_recv(udpif->dpif, &upcall->dpif_upcall,
526 &upcall->upcall_buf);
527 if (error) {
528 upcall_destroy(upcall);
529 break;
530 }
531
532 upcall->type = classify_upcall(upcall);
533 if (upcall->type == BAD_UPCALL) {
534 upcall_destroy(upcall);
535 } else if (upcall->type == MISS_UPCALL) {
536 struct dpif_upcall *dupcall = &upcall->dpif_upcall;
537 uint32_t hash = udpif->secret;
538 struct handler *handler;
539 struct nlattr *nla;
540 size_t n_bytes, left;
541
542 n_bytes = 0;
543 NL_ATTR_FOR_EACH (nla, left, dupcall->key, dupcall->key_len) {
544 enum ovs_key_attr type = nl_attr_type(nla);
545 if (type == OVS_KEY_ATTR_IN_PORT
546 || type == OVS_KEY_ATTR_TCP
547 || type == OVS_KEY_ATTR_UDP) {
548 if (nl_attr_get_size(nla) == 4) {
549 ovs_be32 attr = nl_attr_get_be32(nla);
bf19526b 550 hash = mhash_add(hash, (OVS_FORCE uint32_t) attr);
e1ec7dd4
EJ
551 n_bytes += 4;
552 } else {
553 VLOG_WARN("Netlink attribute with incorrect size.");
554 }
555 }
556 }
dfbdea46
JR
557 hash = mhash_finish(hash, n_bytes);
558
559 handler = &udpif->handlers[hash % udpif->n_handlers];
560
561 ovs_mutex_lock(&handler->mutex);
562 if (handler->n_upcalls < MAX_QUEUE_LENGTH) {
563 list_push_back(&handler->upcalls, &upcall->list_node);
564 handler->n_upcalls++;
565 xpthread_cond_signal(&handler->wake_cond);
566 ovs_mutex_unlock(&handler->mutex);
567 if (!VLOG_DROP_DBG(&rl)) {
568 struct ds ds = DS_EMPTY_INITIALIZER;
569
570 odp_flow_key_format(upcall->dpif_upcall.key,
571 upcall->dpif_upcall.key_len,
572 &ds);
573 VLOG_DBG("dispatcher: miss enqueue (%s)", ds_cstr(&ds));
574 ds_destroy(&ds);
575 }
576 } else {
577 ovs_mutex_unlock(&handler->mutex);
578 COVERAGE_INC(miss_queue_overflow);
579 upcall_destroy(upcall);
580 }
e1ec7dd4
EJ
581 } else {
582 ovs_mutex_lock(&udpif->upcall_mutex);
583 if (udpif->n_upcalls < MAX_QUEUE_LENGTH) {
584 udpif->n_upcalls++;
585 list_push_back(&udpif->upcalls, &upcall->list_node);
586 ovs_mutex_unlock(&udpif->upcall_mutex);
587 seq_change(udpif->wait_seq);
588 } else {
589 ovs_mutex_unlock(&udpif->upcall_mutex);
590 COVERAGE_INC(upcall_queue_overflow);
591 upcall_destroy(upcall);
592 }
593 }
594 }
595}
596
597static struct flow_miss *
598flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
599 const struct flow *flow, uint32_t hash)
600{
601 struct flow_miss *miss;
602
603 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
604 if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
605 return miss;
606 }
607 }
608
609 return NULL;
610}
611
612/* Executes flow miss 'miss'. May add any required datapath operations
613 * to 'ops', incrementing '*n_ops' for each new op. */
614static void
615execute_flow_miss(struct flow_miss *miss, struct dpif_op *ops, size_t *n_ops)
616{
617 struct ofproto_dpif *ofproto = miss->ofproto;
618 struct flow_wildcards wc;
619 struct rule_dpif *rule;
620 struct ofpbuf *packet;
621 struct xlate_in xin;
622
623 memset(&miss->stats, 0, sizeof miss->stats);
624 miss->stats.used = time_msec();
625 LIST_FOR_EACH (packet, list_node, &miss->packets) {
626 miss->stats.tcp_flags |= packet_get_tcp_flags(packet, &miss->flow);
627 miss->stats.n_bytes += packet->size;
628 miss->stats.n_packets++;
629 }
630
631 flow_wildcards_init_catchall(&wc);
632 rule_dpif_lookup(ofproto, &miss->flow, &wc, &rule);
633 rule_credit_stats(rule, &miss->stats);
634 xlate_in_init(&xin, ofproto, &miss->flow, rule, miss->stats.tcp_flags,
635 NULL);
636 xin.may_learn = true;
637 xin.resubmit_stats = &miss->stats;
638 xlate_actions(&xin, &miss->xout);
639 flow_wildcards_or(&miss->xout.wc, &miss->xout.wc, &wc);
640
641 if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
d5ff77e2
YT
642 LIST_FOR_EACH (packet, list_node, &miss->packets) {
643 struct ofputil_packet_in *pin;
644
645 /* Extra-special case for fail-open mode.
646 *
647 * We are in fail-open mode and the packet matched the fail-open
648 * rule, but we are connected to a controller too. We should send
649 * the packet up to the controller in the hope that it will try to
650 * set up a flow and thereby allow us to exit fail-open.
651 *
652 * See the top-level comment in fail-open.c for more information. */
653 pin = xmalloc(sizeof(*pin));
654 pin->packet = xmemdup(packet->data, packet->size);
655 pin->packet_len = packet->size;
656 pin->reason = OFPR_NO_MATCH;
657 pin->controller_id = 0;
658 pin->table_id = 0;
659 pin->cookie = 0;
660 pin->send_len = 0; /* Not used for flow table misses. */
661 flow_get_metadata(&miss->flow, &pin->fmd);
662 ofproto_dpif_send_packet_in(ofproto, pin);
663 }
e1ec7dd4
EJ
664 }
665
666 if (miss->xout.slow) {
667 LIST_FOR_EACH (packet, list_node, &miss->packets) {
668 struct xlate_in xin;
669
670 xlate_in_init(&xin, miss->ofproto, &miss->flow, rule, 0, packet);
671 xlate_actions_for_side_effects(&xin);
672 }
673 }
674 rule_release(rule);
675
676 if (miss->xout.odp_actions.size) {
677 LIST_FOR_EACH (packet, list_node, &miss->packets) {
678 struct dpif_op *op = &ops[*n_ops];
679 struct dpif_execute *execute = &op->u.execute;
680
681 if (miss->flow.in_port.ofp_port
682 != vsp_realdev_to_vlandev(miss->ofproto,
683 miss->flow.in_port.ofp_port,
684 miss->flow.vlan_tci)) {
685 /* This packet was received on a VLAN splinter port. We
686 * added a VLAN to the packet to make the packet resemble
687 * the flow, but the actions were composed assuming that
688 * the packet contained no VLAN. So, we must remove the
689 * VLAN header from the packet before trying to execute the
690 * actions. */
691 eth_pop_vlan(packet);
692 }
693
694 op->type = DPIF_OP_EXECUTE;
695 execute->key = miss->key;
696 execute->key_len = miss->key_len;
697 execute->packet = packet;
698 execute->actions = miss->xout.odp_actions.data;
699 execute->actions_len = miss->xout.odp_actions.size;
700
701 (*n_ops)++;
702 }
703 }
704}
705
706static void
707handle_miss_upcalls(struct udpif *udpif, struct list *upcalls)
708{
709 struct dpif_op *opsp[FLOW_MISS_MAX_BATCH];
710 struct dpif_op ops[FLOW_MISS_MAX_BATCH];
711 unsigned int old_reval_seq, new_reval_seq;
712 struct upcall *upcall, *next;
713 struct flow_miss_batch *fmb;
714 size_t n_upcalls, n_ops, i;
715 struct flow_miss *miss;
716
717 atomic_read(&udpif->reval_seq, &old_reval_seq);
718
719 /* Construct the to-do list.
720 *
721 * This just amounts to extracting the flow from each packet and sticking
722 * the packets that have the same flow in the same "flow_miss" structure so
723 * that we can process them together. */
724 fmb = xmalloc(sizeof *fmb);
725 hmap_init(&fmb->misses);
726 n_upcalls = 0;
727 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
728 struct dpif_upcall *dupcall = &upcall->dpif_upcall;
729 struct flow_miss *miss = &fmb->miss_buf[n_upcalls];
730 struct flow_miss *existing_miss;
731 struct ofproto_dpif *ofproto;
732 odp_port_t odp_in_port;
733 struct flow flow;
734 uint32_t hash;
735 int error;
736
737 error = xlate_receive(udpif->backer, dupcall->packet, dupcall->key,
738 dupcall->key_len, &flow, &miss->key_fitness,
739 &ofproto, &odp_in_port);
740
741 if (error == ENODEV) {
742 struct drop_key *drop_key;
743
744 /* Received packet on datapath port for which we couldn't
745 * associate an ofproto. This can happen if a port is removed
746 * while traffic is being received. Print a rate-limited message
747 * in case it happens frequently. Install a drop flow so
748 * that future packets of the flow are inexpensively dropped
749 * in the kernel. */
750 VLOG_INFO_RL(&rl, "received packet on unassociated datapath port "
751 "%"PRIu32, odp_in_port);
752
753 drop_key = xmalloc(sizeof *drop_key);
754 drop_key->key = xmemdup(dupcall->key, dupcall->key_len);
755 drop_key->key_len = dupcall->key_len;
756
757 ovs_mutex_lock(&udpif->drop_key_mutex);
758 if (udpif->n_drop_keys < MAX_QUEUE_LENGTH) {
759 udpif->n_drop_keys++;
760 list_push_back(&udpif->drop_keys, &drop_key->list_node);
761 ovs_mutex_unlock(&udpif->drop_key_mutex);
762 seq_change(udpif->wait_seq);
763 } else {
764 ovs_mutex_unlock(&udpif->drop_key_mutex);
765 COVERAGE_INC(drop_queue_overflow);
766 drop_key_destroy(drop_key);
767 }
768 continue;
769 } else if (error) {
770 continue;
771 }
772
1362e248 773 flow_extract(dupcall->packet, flow.skb_priority, flow.pkt_mark,
e1ec7dd4
EJ
774 &flow.tunnel, &flow.in_port, &miss->flow);
775
776 /* Add other packets to a to-do list. */
777 hash = flow_hash(&miss->flow, 0);
778 existing_miss = flow_miss_find(&fmb->misses, ofproto, &miss->flow, hash);
779 if (!existing_miss) {
780 hmap_insert(&fmb->misses, &miss->hmap_node, hash);
781 miss->ofproto = ofproto;
782 miss->key = dupcall->key;
783 miss->key_len = dupcall->key_len;
784 miss->upcall_type = dupcall->type;
785 list_init(&miss->packets);
786 list_init(&miss->upcalls);
787
788 n_upcalls++;
789 } else {
790 miss = existing_miss;
791 }
792 list_push_back(&miss->packets, &dupcall->packet->list_node);
793
794 list_remove(&upcall->list_node);
795 list_push_back(&miss->upcalls, &upcall->list_node);
796 }
797
798 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
799 list_remove(&upcall->list_node);
800 upcall_destroy(upcall);
801 }
802
803 /* Process each element in the to-do list, constructing the set of
804 * operations to batch. */
805 n_ops = 0;
806 HMAP_FOR_EACH (miss, hmap_node, &fmb->misses) {
807 execute_flow_miss(miss, ops, &n_ops);
808 }
809 ovs_assert(n_ops <= ARRAY_SIZE(ops));
810
811 /* Execute batch. */
812 for (i = 0; i < n_ops; i++) {
813 opsp[i] = &ops[i];
814 }
815 dpif_operate(udpif->dpif, opsp, n_ops);
816
817 ovs_mutex_lock(&udpif->fmb_mutex);
818 atomic_read(&udpif->reval_seq, &new_reval_seq);
819 if (old_reval_seq != new_reval_seq) {
820 /* udpif_revalidate() was called as we were calculating the actions.
821 * To be safe, we need to assume all the misses need revalidation. */
822 ovs_mutex_unlock(&udpif->fmb_mutex);
823 flow_miss_batch_destroy(fmb);
824 } else if (udpif->n_fmbs < MAX_QUEUE_LENGTH) {
825 udpif->n_fmbs++;
826 list_push_back(&udpif->fmbs, &fmb->list_node);
827 ovs_mutex_unlock(&udpif->fmb_mutex);
828 seq_change(udpif->wait_seq);
829 } else {
830 COVERAGE_INC(fmb_queue_overflow);
831 ovs_mutex_unlock(&udpif->fmb_mutex);
832 flow_miss_batch_destroy(fmb);
833 }
834}