]> git.proxmox.com Git - ovs.git/blob - lib/dpif-netdev.c
6ef749566ff8e17f4a3cf48fefea2a9568425591
[ovs.git] / lib / dpif-netdev.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <netinet/in.h>
25 #include <sys/socket.h>
26 #include <net/if.h>
27 #include <stdint.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/ioctl.h>
31 #include <sys/stat.h>
32 #include <unistd.h>
33
34 #include "classifier.h"
35 #include "csum.h"
36 #include "dpif.h"
37 #include "dpif-provider.h"
38 #include "dummy.h"
39 #include "dynamic-string.h"
40 #include "flow.h"
41 #include "hmap.h"
42 #include "list.h"
43 #include "meta-flow.h"
44 #include "netdev.h"
45 #include "netdev-vport.h"
46 #include "netlink.h"
47 #include "odp-execute.h"
48 #include "odp-util.h"
49 #include "ofp-print.h"
50 #include "ofpbuf.h"
51 #include "packets.h"
52 #include "poll-loop.h"
53 #include "random.h"
54 #include "seq.h"
55 #include "shash.h"
56 #include "sset.h"
57 #include "timeval.h"
58 #include "unixctl.h"
59 #include "util.h"
60 #include "vlog.h"
61
62 VLOG_DEFINE_THIS_MODULE(dpif_netdev);
63
64 /* By default, choose a priority in the middle. */
65 #define NETDEV_RULE_PRIORITY 0x8000
66
67 /* Configuration parameters. */
68 enum { MAX_PORTS = 256 }; /* Maximum number of ports. */
69 enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
70
71 /* Enough headroom to add a vlan tag, plus an extra 2 bytes to allow IP
72 * headers to be aligned on a 4-byte boundary. */
73 enum { DP_NETDEV_HEADROOM = 2 + VLAN_HEADER_LEN };
74
75 /* Queues. */
76 enum { N_QUEUES = 2 }; /* Number of queues for dpif_recv(). */
77 enum { MAX_QUEUE_LEN = 128 }; /* Maximum number of packets per queue. */
78 enum { QUEUE_MASK = MAX_QUEUE_LEN - 1 };
79 BUILD_ASSERT_DECL(IS_POW2(MAX_QUEUE_LEN));
80
81 struct dp_netdev_upcall {
82 struct dpif_upcall upcall; /* Queued upcall information. */
83 struct ofpbuf buf; /* ofpbuf instance for upcall.packet. */
84 };
85
86 struct dp_netdev_queue {
87 struct dp_netdev_upcall upcalls[MAX_QUEUE_LEN];
88 unsigned int head, tail;
89 };
90
91 /* Datapath based on the network device interface from netdev.h. */
92 struct dp_netdev {
93 const struct dpif_class *class;
94 char *name;
95 int open_cnt;
96 bool destroyed;
97 int max_mtu; /* Maximum MTU of any port added so far. */
98
99 struct dp_netdev_queue queues[N_QUEUES];
100 struct classifier cls; /* Classifier. */
101 struct hmap flow_table; /* Flow table. */
102 struct seq *queue_seq; /* Incremented whenever a packet is queued. */
103
104 /* Statistics. */
105 long long int n_hit; /* Number of flow table matches. */
106 long long int n_missed; /* Number of flow table misses. */
107 long long int n_lost; /* Number of misses not passed to client. */
108
109 /* Ports. */
110 struct dp_netdev_port *ports[MAX_PORTS];
111 struct list port_list;
112 struct seq *port_seq; /* Incremented whenever a port changes. */
113 };
114
115 /* A port in a netdev-based datapath. */
116 struct dp_netdev_port {
117 odp_port_t port_no; /* Index into dp_netdev's 'ports'. */
118 struct list node; /* Element in dp_netdev's 'port_list'. */
119 struct netdev *netdev;
120 struct netdev_saved_flags *sf;
121 struct netdev_rx *rx;
122 char *type; /* Port type as requested by user. */
123 };
124
125 /* A flow in dp_netdev's 'flow_table'. */
126 struct dp_netdev_flow {
127 /* Packet classification. */
128 struct cls_rule cr; /* In owning dp_netdev's 'cls'. */
129
130 /* Hash table index by unmasked flow.*/
131 struct hmap_node node; /* In owning dp_netdev's 'flow_table'. */
132 struct flow flow; /* The flow that created this entry. */
133
134 /* Statistics. */
135 long long int used; /* Last used time, in monotonic msecs. */
136 long long int packet_count; /* Number of packets matched. */
137 long long int byte_count; /* Number of bytes matched. */
138 uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
139
140 /* Actions. */
141 struct nlattr *actions;
142 size_t actions_len;
143 };
144
145 /* Interface to netdev-based datapath. */
146 struct dpif_netdev {
147 struct dpif dpif;
148 struct dp_netdev *dp;
149 uint64_t last_port_seq;
150 };
151
152 /* All netdev-based datapaths. */
153 static struct shash dp_netdevs = SHASH_INITIALIZER(&dp_netdevs);
154
155 /* Global lock for all data. */
156 static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
157
158 static int get_port_by_number(struct dp_netdev *, odp_port_t port_no,
159 struct dp_netdev_port **portp);
160 static int get_port_by_name(struct dp_netdev *, const char *devname,
161 struct dp_netdev_port **portp);
162 static void dp_netdev_free(struct dp_netdev *);
163 static void dp_netdev_flow_flush(struct dp_netdev *);
164 static int do_add_port(struct dp_netdev *, const char *devname,
165 const char *type, odp_port_t port_no);
166 static int do_del_port(struct dp_netdev *, odp_port_t port_no);
167 static int dpif_netdev_open(const struct dpif_class *, const char *name,
168 bool create, struct dpif **);
169 static int dp_netdev_output_userspace(struct dp_netdev *, struct ofpbuf *,
170 int queue_no, const struct flow *,
171 const struct nlattr *userdata);
172 static void dp_netdev_execute_actions(struct dp_netdev *, const struct flow *,
173 struct ofpbuf *, struct pkt_metadata *,
174 const struct nlattr *actions,
175 size_t actions_len);
176 static void dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet,
177 struct pkt_metadata *md);
178
179 static struct dpif_netdev *
180 dpif_netdev_cast(const struct dpif *dpif)
181 {
182 ovs_assert(dpif->dpif_class->open == dpif_netdev_open);
183 return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
184 }
185
186 static struct dp_netdev *
187 get_dp_netdev(const struct dpif *dpif)
188 {
189 return dpif_netdev_cast(dpif)->dp;
190 }
191
192 static int
193 dpif_netdev_enumerate(struct sset *all_dps)
194 {
195 struct shash_node *node;
196
197 ovs_mutex_lock(&dp_netdev_mutex);
198 SHASH_FOR_EACH(node, &dp_netdevs) {
199 sset_add(all_dps, node->name);
200 }
201 ovs_mutex_unlock(&dp_netdev_mutex);
202
203 return 0;
204 }
205
206 static bool
207 dpif_netdev_class_is_dummy(const struct dpif_class *class)
208 {
209 return class != &dpif_netdev_class;
210 }
211
212 static const char *
213 dpif_netdev_port_open_type(const struct dpif_class *class, const char *type)
214 {
215 return strcmp(type, "internal") ? type
216 : dpif_netdev_class_is_dummy(class) ? "dummy"
217 : "tap";
218 }
219
220 static struct dpif *
221 create_dpif_netdev(struct dp_netdev *dp)
222 {
223 uint16_t netflow_id = hash_string(dp->name, 0);
224 struct dpif_netdev *dpif;
225
226 dp->open_cnt++;
227
228 dpif = xmalloc(sizeof *dpif);
229 dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id);
230 dpif->dp = dp;
231 dpif->last_port_seq = seq_read(dp->port_seq);
232
233 return &dpif->dpif;
234 }
235
236 /* Choose an unused, non-zero port number and return it on success.
237 * Return ODPP_NONE on failure. */
238 static odp_port_t
239 choose_port(struct dp_netdev *dp, const char *name)
240 {
241 uint32_t port_no;
242
243 if (dp->class != &dpif_netdev_class) {
244 const char *p;
245 int start_no = 0;
246
247 /* If the port name begins with "br", start the number search at
248 * 100 to make writing tests easier. */
249 if (!strncmp(name, "br", 2)) {
250 start_no = 100;
251 }
252
253 /* If the port name contains a number, try to assign that port number.
254 * This can make writing unit tests easier because port numbers are
255 * predictable. */
256 for (p = name; *p != '\0'; p++) {
257 if (isdigit((unsigned char) *p)) {
258 port_no = start_no + strtol(p, NULL, 10);
259 if (port_no > 0 && port_no < MAX_PORTS
260 && !dp->ports[port_no]) {
261 return u32_to_odp(port_no);
262 }
263 break;
264 }
265 }
266 }
267
268 for (port_no = 1; port_no < MAX_PORTS; port_no++) {
269 if (!dp->ports[port_no]) {
270 return u32_to_odp(port_no);
271 }
272 }
273
274 return ODPP_NONE;
275 }
276
277 static int
278 create_dp_netdev(const char *name, const struct dpif_class *class,
279 struct dp_netdev **dpp)
280 {
281 struct dp_netdev *dp;
282 int error;
283 int i;
284
285 dp = xzalloc(sizeof *dp);
286 dp->class = class;
287 dp->name = xstrdup(name);
288 dp->open_cnt = 0;
289 dp->max_mtu = ETH_PAYLOAD_MAX;
290 for (i = 0; i < N_QUEUES; i++) {
291 dp->queues[i].head = dp->queues[i].tail = 0;
292 }
293 dp->queue_seq = seq_create();
294 classifier_init(&dp->cls, NULL);
295 hmap_init(&dp->flow_table);
296 list_init(&dp->port_list);
297 dp->port_seq = seq_create();
298
299 error = do_add_port(dp, name, "internal", ODPP_LOCAL);
300 if (error) {
301 dp_netdev_free(dp);
302 return error;
303 }
304
305 shash_add(&dp_netdevs, name, dp);
306
307 *dpp = dp;
308 return 0;
309 }
310
311 static int
312 dpif_netdev_open(const struct dpif_class *class, const char *name,
313 bool create, struct dpif **dpifp)
314 {
315 struct dp_netdev *dp;
316 int error;
317
318 ovs_mutex_lock(&dp_netdev_mutex);
319 dp = shash_find_data(&dp_netdevs, name);
320 if (!dp) {
321 error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
322 } else {
323 error = (dp->class != class ? EINVAL
324 : create ? EEXIST
325 : 0);
326 }
327 if (!error) {
328 *dpifp = create_dpif_netdev(dp);
329 }
330 ovs_mutex_unlock(&dp_netdev_mutex);
331
332 return error;
333 }
334
335 static void
336 dp_netdev_purge_queues(struct dp_netdev *dp)
337 {
338 int i;
339
340 for (i = 0; i < N_QUEUES; i++) {
341 struct dp_netdev_queue *q = &dp->queues[i];
342
343 while (q->tail != q->head) {
344 struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK];
345 ofpbuf_uninit(&u->upcall.packet);
346 ofpbuf_uninit(&u->buf);
347 }
348 }
349 }
350
351 static void
352 dp_netdev_free(struct dp_netdev *dp)
353 {
354 struct dp_netdev_port *port, *next;
355
356 dp_netdev_flow_flush(dp);
357 LIST_FOR_EACH_SAFE (port, next, node, &dp->port_list) {
358 do_del_port(dp, port->port_no);
359 }
360 dp_netdev_purge_queues(dp);
361 seq_destroy(dp->queue_seq);
362 classifier_destroy(&dp->cls);
363 hmap_destroy(&dp->flow_table);
364 seq_destroy(dp->port_seq);
365 free(dp->name);
366 free(dp);
367 }
368
369 static void
370 dpif_netdev_close(struct dpif *dpif)
371 {
372 struct dp_netdev *dp = get_dp_netdev(dpif);
373
374 ovs_mutex_lock(&dp_netdev_mutex);
375
376 ovs_assert(dp->open_cnt > 0);
377 if (--dp->open_cnt == 0 && dp->destroyed) {
378 shash_find_and_delete(&dp_netdevs, dp->name);
379 dp_netdev_free(dp);
380 }
381 free(dpif);
382
383 ovs_mutex_unlock(&dp_netdev_mutex);
384 }
385
386 static int
387 dpif_netdev_destroy(struct dpif *dpif)
388 {
389 struct dp_netdev *dp = get_dp_netdev(dpif);
390
391 ovs_mutex_lock(&dp_netdev_mutex);
392 dp->destroyed = true;
393 ovs_mutex_unlock(&dp_netdev_mutex);
394
395 return 0;
396 }
397
398 static int
399 dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
400 {
401 struct dp_netdev *dp = get_dp_netdev(dpif);
402
403 ovs_mutex_lock(&dp_netdev_mutex);
404 stats->n_flows = hmap_count(&dp->flow_table);
405 stats->n_hit = dp->n_hit;
406 stats->n_missed = dp->n_missed;
407 stats->n_lost = dp->n_lost;
408 stats->n_masks = UINT32_MAX;
409 stats->n_mask_hit = UINT64_MAX;
410 ovs_mutex_unlock(&dp_netdev_mutex);
411
412 return 0;
413 }
414
415 static int
416 do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
417 odp_port_t port_no)
418 {
419 struct netdev_saved_flags *sf;
420 struct dp_netdev_port *port;
421 struct netdev *netdev;
422 struct netdev_rx *rx;
423 enum netdev_flags flags;
424 const char *open_type;
425 int mtu;
426 int error;
427
428 /* XXX reject devices already in some dp_netdev. */
429
430 /* Open and validate network device. */
431 open_type = dpif_netdev_port_open_type(dp->class, type);
432 error = netdev_open(devname, open_type, &netdev);
433 if (error) {
434 return error;
435 }
436 /* XXX reject non-Ethernet devices */
437
438 netdev_get_flags(netdev, &flags);
439 if (flags & NETDEV_LOOPBACK) {
440 VLOG_ERR("%s: cannot add a loopback device", devname);
441 netdev_close(netdev);
442 return EINVAL;
443 }
444
445 error = netdev_rx_open(netdev, &rx);
446 if (error
447 && !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) {
448 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
449 devname, ovs_strerror(errno));
450 netdev_close(netdev);
451 return error;
452 }
453
454 error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf);
455 if (error) {
456 netdev_rx_close(rx);
457 netdev_close(netdev);
458 return error;
459 }
460
461 port = xmalloc(sizeof *port);
462 port->port_no = port_no;
463 port->netdev = netdev;
464 port->sf = sf;
465 port->rx = rx;
466 port->type = xstrdup(type);
467
468 error = netdev_get_mtu(netdev, &mtu);
469 if (!error && mtu > dp->max_mtu) {
470 dp->max_mtu = mtu;
471 }
472
473 list_push_back(&dp->port_list, &port->node);
474 dp->ports[odp_to_u32(port_no)] = port;
475 seq_change(dp->port_seq);
476
477 return 0;
478 }
479
480 static int
481 dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
482 odp_port_t *port_nop)
483 {
484 struct dp_netdev *dp = get_dp_netdev(dpif);
485 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
486 const char *dpif_port;
487 odp_port_t port_no;
488 int error;
489
490 ovs_mutex_lock(&dp_netdev_mutex);
491 dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
492 if (*port_nop != ODPP_NONE) {
493 uint32_t port_idx = odp_to_u32(*port_nop);
494 if (port_idx >= MAX_PORTS) {
495 error = EFBIG;
496 } else if (dp->ports[port_idx]) {
497 error = EBUSY;
498 } else {
499 error = 0;
500 port_no = *port_nop;
501 }
502 } else {
503 port_no = choose_port(dp, dpif_port);
504 error = port_no == ODPP_NONE ? EFBIG : 0;
505 }
506 if (!error) {
507 *port_nop = port_no;
508 error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
509 }
510 ovs_mutex_unlock(&dp_netdev_mutex);
511
512 return error;
513 }
514
515 static int
516 dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
517 {
518 struct dp_netdev *dp = get_dp_netdev(dpif);
519 int error;
520
521 ovs_mutex_lock(&dp_netdev_mutex);
522 error = port_no == ODPP_LOCAL ? EINVAL : do_del_port(dp, port_no);
523 ovs_mutex_unlock(&dp_netdev_mutex);
524
525 return error;
526 }
527
528 static bool
529 is_valid_port_number(odp_port_t port_no)
530 {
531 return odp_to_u32(port_no) < MAX_PORTS;
532 }
533
534 static int
535 get_port_by_number(struct dp_netdev *dp,
536 odp_port_t port_no, struct dp_netdev_port **portp)
537 {
538 if (!is_valid_port_number(port_no)) {
539 *portp = NULL;
540 return EINVAL;
541 } else {
542 *portp = dp->ports[odp_to_u32(port_no)];
543 return *portp ? 0 : ENOENT;
544 }
545 }
546
547 static int
548 get_port_by_name(struct dp_netdev *dp,
549 const char *devname, struct dp_netdev_port **portp)
550 {
551 struct dp_netdev_port *port;
552
553 LIST_FOR_EACH (port, node, &dp->port_list) {
554 if (!strcmp(netdev_get_name(port->netdev), devname)) {
555 *portp = port;
556 return 0;
557 }
558 }
559 return ENOENT;
560 }
561
562 static int
563 do_del_port(struct dp_netdev *dp, odp_port_t port_no)
564 {
565 struct dp_netdev_port *port;
566 int error;
567
568 error = get_port_by_number(dp, port_no, &port);
569 if (error) {
570 return error;
571 }
572
573 list_remove(&port->node);
574 dp->ports[odp_to_u32(port_no)] = NULL;
575 seq_change(dp->port_seq);
576
577 netdev_close(port->netdev);
578 netdev_restore_flags(port->sf);
579 netdev_rx_close(port->rx);
580 free(port->type);
581 free(port);
582
583 return 0;
584 }
585
586 static void
587 answer_port_query(const struct dp_netdev_port *port,
588 struct dpif_port *dpif_port)
589 {
590 dpif_port->name = xstrdup(netdev_get_name(port->netdev));
591 dpif_port->type = xstrdup(port->type);
592 dpif_port->port_no = port->port_no;
593 }
594
595 static int
596 dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
597 struct dpif_port *dpif_port)
598 {
599 struct dp_netdev *dp = get_dp_netdev(dpif);
600 struct dp_netdev_port *port;
601 int error;
602
603 ovs_mutex_lock(&dp_netdev_mutex);
604 error = get_port_by_number(dp, port_no, &port);
605 if (!error && dpif_port) {
606 answer_port_query(port, dpif_port);
607 }
608 ovs_mutex_unlock(&dp_netdev_mutex);
609
610 return error;
611 }
612
613 static int
614 dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
615 struct dpif_port *dpif_port)
616 {
617 struct dp_netdev *dp = get_dp_netdev(dpif);
618 struct dp_netdev_port *port;
619 int error;
620
621 ovs_mutex_lock(&dp_netdev_mutex);
622 error = get_port_by_name(dp, devname, &port);
623 if (!error && dpif_port) {
624 answer_port_query(port, dpif_port);
625 }
626 ovs_mutex_unlock(&dp_netdev_mutex);
627
628 return error;
629 }
630
631 static uint32_t
632 dpif_netdev_get_max_ports(const struct dpif *dpif OVS_UNUSED)
633 {
634 return MAX_PORTS;
635 }
636
637 static void
638 dp_netdev_free_flow(struct dp_netdev *dp, struct dp_netdev_flow *netdev_flow)
639 {
640 ovs_rwlock_wrlock(&dp->cls.rwlock);
641 classifier_remove(&dp->cls, &netdev_flow->cr);
642 ovs_rwlock_unlock(&dp->cls.rwlock);
643 cls_rule_destroy(&netdev_flow->cr);
644
645 hmap_remove(&dp->flow_table, &netdev_flow->node);
646 free(netdev_flow->actions);
647 free(netdev_flow);
648 }
649
650 static void
651 dp_netdev_flow_flush(struct dp_netdev *dp)
652 {
653 struct dp_netdev_flow *netdev_flow, *next;
654
655 HMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
656 dp_netdev_free_flow(dp, netdev_flow);
657 }
658 }
659
660 static int
661 dpif_netdev_flow_flush(struct dpif *dpif)
662 {
663 struct dp_netdev *dp = get_dp_netdev(dpif);
664
665 ovs_mutex_lock(&dp_netdev_mutex);
666 dp_netdev_flow_flush(dp);
667 ovs_mutex_unlock(&dp_netdev_mutex);
668
669 return 0;
670 }
671
672 struct dp_netdev_port_state {
673 odp_port_t port_no;
674 char *name;
675 };
676
677 static int
678 dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
679 {
680 *statep = xzalloc(sizeof(struct dp_netdev_port_state));
681 return 0;
682 }
683
684 static int
685 dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
686 struct dpif_port *dpif_port)
687 {
688 struct dp_netdev_port_state *state = state_;
689 struct dp_netdev *dp = get_dp_netdev(dpif);
690 uint32_t port_idx;
691
692 ovs_mutex_lock(&dp_netdev_mutex);
693 for (port_idx = odp_to_u32(state->port_no);
694 port_idx < MAX_PORTS; port_idx++) {
695 struct dp_netdev_port *port = dp->ports[port_idx];
696 if (port) {
697 free(state->name);
698 state->name = xstrdup(netdev_get_name(port->netdev));
699 dpif_port->name = state->name;
700 dpif_port->type = port->type;
701 dpif_port->port_no = port->port_no;
702 state->port_no = u32_to_odp(port_idx + 1);
703 ovs_mutex_unlock(&dp_netdev_mutex);
704
705 return 0;
706 }
707 }
708 ovs_mutex_unlock(&dp_netdev_mutex);
709
710 return EOF;
711 }
712
713 static int
714 dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
715 {
716 struct dp_netdev_port_state *state = state_;
717 free(state->name);
718 free(state);
719 return 0;
720 }
721
722 static int
723 dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
724 {
725 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
726 uint64_t new_port_seq;
727 int error;
728
729 ovs_mutex_lock(&dp_netdev_mutex);
730 new_port_seq = seq_read(dpif->dp->port_seq);
731 if (dpif->last_port_seq != new_port_seq) {
732 dpif->last_port_seq = new_port_seq;
733 error = ENOBUFS;
734 } else {
735 error = EAGAIN;
736 }
737 ovs_mutex_unlock(&dp_netdev_mutex);
738
739 return error;
740 }
741
742 static void
743 dpif_netdev_port_poll_wait(const struct dpif *dpif_)
744 {
745 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
746
747 ovs_mutex_lock(&dp_netdev_mutex);
748 seq_wait(dpif->dp->port_seq, dpif->last_port_seq);
749 ovs_mutex_unlock(&dp_netdev_mutex);
750 }
751
752 static struct dp_netdev_flow *
753 dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct flow *flow)
754 {
755 struct cls_rule *cr;
756
757 ovs_rwlock_wrlock(&dp->cls.rwlock);
758 cr = classifier_lookup(&dp->cls, flow, NULL);
759 ovs_rwlock_unlock(&dp->cls.rwlock);
760
761 return (cr
762 ? CONTAINER_OF(cr, struct dp_netdev_flow, cr)
763 : NULL);
764 }
765
766 static struct dp_netdev_flow *
767 dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow)
768 {
769 struct dp_netdev_flow *netdev_flow;
770
771 HMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0),
772 &dp->flow_table) {
773 if (flow_equal(&netdev_flow->flow, flow)) {
774 return netdev_flow;
775 }
776 }
777 return NULL;
778 }
779
780 static void
781 get_dpif_flow_stats(struct dp_netdev_flow *netdev_flow,
782 struct dpif_flow_stats *stats)
783 {
784 stats->n_packets = netdev_flow->packet_count;
785 stats->n_bytes = netdev_flow->byte_count;
786 stats->used = netdev_flow->used;
787 stats->tcp_flags = netdev_flow->tcp_flags;
788 }
789
790 static int
791 dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len,
792 const struct nlattr *mask_key,
793 uint32_t mask_key_len, const struct flow *flow,
794 struct flow *mask)
795 {
796 if (mask_key_len) {
797 if (odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow)) {
798 /* This should not happen: it indicates that
799 * odp_flow_key_from_mask() and odp_flow_key_to_mask()
800 * disagree on the acceptable form of a mask. Log the problem
801 * as an error, with enough details to enable debugging. */
802 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
803
804 if (!VLOG_DROP_ERR(&rl)) {
805 struct ds s;
806
807 ds_init(&s);
808 odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s,
809 true);
810 VLOG_ERR("internal error parsing flow mask %s", ds_cstr(&s));
811 ds_destroy(&s);
812 }
813
814 return EINVAL;
815 }
816 /* Force unwildcard the in_port. */
817 mask->in_port.odp_port = u32_to_odp(UINT32_MAX);
818 } else {
819 enum mf_field_id id;
820 /* No mask key, unwildcard everything except fields whose
821 * prerequisities are not met. */
822 memset(mask, 0x0, sizeof *mask);
823
824 for (id = 0; id < MFF_N_IDS; ++id) {
825 /* Skip registers and metadata. */
826 if (!(id >= MFF_REG0 && id < MFF_REG0 + FLOW_N_REGS)
827 && id != MFF_METADATA) {
828 const struct mf_field *mf = mf_from_id(id);
829 if (mf_are_prereqs_ok(mf, flow)) {
830 mf_mask_field(mf, mask);
831 }
832 }
833 }
834 }
835
836 return 0;
837 }
838
839 static int
840 dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len,
841 struct flow *flow)
842 {
843 odp_port_t in_port;
844
845 if (odp_flow_key_to_flow(key, key_len, flow)) {
846 /* This should not happen: it indicates that odp_flow_key_from_flow()
847 * and odp_flow_key_to_flow() disagree on the acceptable form of a
848 * flow. Log the problem as an error, with enough details to enable
849 * debugging. */
850 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
851
852 if (!VLOG_DROP_ERR(&rl)) {
853 struct ds s;
854
855 ds_init(&s);
856 odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
857 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
858 ds_destroy(&s);
859 }
860
861 return EINVAL;
862 }
863
864 in_port = flow->in_port.odp_port;
865 if (!is_valid_port_number(in_port) && in_port != ODPP_NONE) {
866 return EINVAL;
867 }
868
869 return 0;
870 }
871
872 static int
873 dpif_netdev_flow_get(const struct dpif *dpif,
874 const struct nlattr *nl_key, size_t nl_key_len,
875 struct ofpbuf **actionsp, struct dpif_flow_stats *stats)
876 {
877 struct dp_netdev *dp = get_dp_netdev(dpif);
878 struct dp_netdev_flow *netdev_flow;
879 struct flow key;
880 int error;
881
882 error = dpif_netdev_flow_from_nlattrs(nl_key, nl_key_len, &key);
883 if (error) {
884 return error;
885 }
886
887 ovs_mutex_lock(&dp_netdev_mutex);
888 netdev_flow = dp_netdev_find_flow(dp, &key);
889 if (netdev_flow) {
890 if (stats) {
891 get_dpif_flow_stats(netdev_flow, stats);
892 }
893 if (actionsp) {
894 *actionsp = ofpbuf_clone_data(netdev_flow->actions,
895 netdev_flow->actions_len);
896 }
897 } else {
898 error = ENOENT;
899 }
900 ovs_mutex_unlock(&dp_netdev_mutex);
901
902 return error;
903 }
904
905 static int
906 set_flow_actions(struct dp_netdev_flow *netdev_flow,
907 const struct nlattr *actions, size_t actions_len)
908 {
909 netdev_flow->actions = xrealloc(netdev_flow->actions, actions_len);
910 netdev_flow->actions_len = actions_len;
911 memcpy(netdev_flow->actions, actions, actions_len);
912 return 0;
913 }
914
915 static int
916 dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *flow,
917 const struct flow_wildcards *wc,
918 const struct nlattr *actions,
919 size_t actions_len)
920 {
921 struct dp_netdev_flow *netdev_flow;
922 struct match match;
923 int error;
924
925 netdev_flow = xzalloc(sizeof *netdev_flow);
926 netdev_flow->flow = *flow;
927
928 match_init(&match, flow, wc);
929 cls_rule_init(&netdev_flow->cr, &match, NETDEV_RULE_PRIORITY);
930 ovs_rwlock_wrlock(&dp->cls.rwlock);
931 classifier_insert(&dp->cls, &netdev_flow->cr);
932 ovs_rwlock_unlock(&dp->cls.rwlock);
933
934 error = set_flow_actions(netdev_flow, actions, actions_len);
935 if (error) {
936 ovs_rwlock_wrlock(&dp->cls.rwlock);
937 classifier_remove(&dp->cls, &netdev_flow->cr);
938 ovs_rwlock_unlock(&dp->cls.rwlock);
939 cls_rule_destroy(&netdev_flow->cr);
940
941 free(netdev_flow);
942 return error;
943 }
944
945 hmap_insert(&dp->flow_table, &netdev_flow->node, flow_hash(flow, 0));
946 return 0;
947 }
948
949 static void
950 clear_stats(struct dp_netdev_flow *netdev_flow)
951 {
952 netdev_flow->used = 0;
953 netdev_flow->packet_count = 0;
954 netdev_flow->byte_count = 0;
955 netdev_flow->tcp_flags = 0;
956 }
957
958 static int
959 dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
960 {
961 struct dp_netdev *dp = get_dp_netdev(dpif);
962 struct dp_netdev_flow *netdev_flow;
963 struct flow flow;
964 struct flow_wildcards wc;
965 int error;
966
967 error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &flow);
968 if (error) {
969 return error;
970 }
971 error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len,
972 put->mask, put->mask_len,
973 &flow, &wc.masks);
974 if (error) {
975 return error;
976 }
977
978 ovs_mutex_lock(&dp_netdev_mutex);
979 netdev_flow = dp_netdev_lookup_flow(dp, &flow);
980 if (!netdev_flow) {
981 if (put->flags & DPIF_FP_CREATE) {
982 if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
983 if (put->stats) {
984 memset(put->stats, 0, sizeof *put->stats);
985 }
986 error = dp_netdev_flow_add(dp, &flow, &wc, put->actions,
987 put->actions_len);
988 } else {
989 error = EFBIG;
990 }
991 } else {
992 error = ENOENT;
993 }
994 } else {
995 if (put->flags & DPIF_FP_MODIFY
996 && flow_equal(&flow, &netdev_flow->flow)) {
997 error = set_flow_actions(netdev_flow, put->actions,
998 put->actions_len);
999 if (!error) {
1000 if (put->stats) {
1001 get_dpif_flow_stats(netdev_flow, put->stats);
1002 }
1003 if (put->flags & DPIF_FP_ZERO_STATS) {
1004 clear_stats(netdev_flow);
1005 }
1006 }
1007 } else if (put->flags & DPIF_FP_CREATE) {
1008 error = EEXIST;
1009 } else {
1010 /* Overlapping flow. */
1011 error = EINVAL;
1012 }
1013 }
1014 ovs_mutex_unlock(&dp_netdev_mutex);
1015
1016 return error;
1017 }
1018
1019 static int
1020 dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
1021 {
1022 struct dp_netdev *dp = get_dp_netdev(dpif);
1023 struct dp_netdev_flow *netdev_flow;
1024 struct flow key;
1025 int error;
1026
1027 error = dpif_netdev_flow_from_nlattrs(del->key, del->key_len, &key);
1028 if (error) {
1029 return error;
1030 }
1031
1032 ovs_mutex_lock(&dp_netdev_mutex);
1033 netdev_flow = dp_netdev_find_flow(dp, &key);
1034 if (netdev_flow) {
1035 if (del->stats) {
1036 get_dpif_flow_stats(netdev_flow, del->stats);
1037 }
1038 dp_netdev_free_flow(dp, netdev_flow);
1039 } else {
1040 error = ENOENT;
1041 }
1042 ovs_mutex_unlock(&dp_netdev_mutex);
1043
1044 return error;
1045 }
1046
1047 struct dp_netdev_flow_state {
1048 uint32_t bucket;
1049 uint32_t offset;
1050 struct nlattr *actions;
1051 struct odputil_keybuf keybuf;
1052 struct odputil_keybuf maskbuf;
1053 struct dpif_flow_stats stats;
1054 };
1055
1056 static int
1057 dpif_netdev_flow_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
1058 {
1059 struct dp_netdev_flow_state *state;
1060
1061 *statep = state = xmalloc(sizeof *state);
1062 state->bucket = 0;
1063 state->offset = 0;
1064 state->actions = NULL;
1065 return 0;
1066 }
1067
1068 static int
1069 dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_,
1070 const struct nlattr **key, size_t *key_len,
1071 const struct nlattr **mask, size_t *mask_len,
1072 const struct nlattr **actions, size_t *actions_len,
1073 const struct dpif_flow_stats **stats)
1074 {
1075 struct dp_netdev_flow_state *state = state_;
1076 struct dp_netdev *dp = get_dp_netdev(dpif);
1077 struct dp_netdev_flow *netdev_flow;
1078 struct hmap_node *node;
1079
1080 ovs_mutex_lock(&dp_netdev_mutex);
1081 node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset);
1082 if (!node) {
1083 ovs_mutex_unlock(&dp_netdev_mutex);
1084 return EOF;
1085 }
1086
1087 netdev_flow = CONTAINER_OF(node, struct dp_netdev_flow, node);
1088
1089 if (key) {
1090 struct ofpbuf buf;
1091
1092 ofpbuf_use_stack(&buf, &state->keybuf, sizeof state->keybuf);
1093 odp_flow_key_from_flow(&buf, &netdev_flow->flow,
1094 netdev_flow->flow.in_port.odp_port);
1095
1096 *key = buf.data;
1097 *key_len = buf.size;
1098 }
1099
1100 if (key && mask) {
1101 struct ofpbuf buf;
1102 struct flow_wildcards wc;
1103
1104 ofpbuf_use_stack(&buf, &state->maskbuf, sizeof state->maskbuf);
1105 minimask_expand(&netdev_flow->cr.match.mask, &wc);
1106 odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
1107 odp_to_u32(wc.masks.in_port.odp_port));
1108
1109 *mask = buf.data;
1110 *mask_len = buf.size;
1111 }
1112
1113 if (actions) {
1114 free(state->actions);
1115 state->actions = xmemdup(netdev_flow->actions,
1116 netdev_flow->actions_len);
1117
1118 *actions = state->actions;
1119 *actions_len = netdev_flow->actions_len;
1120 }
1121
1122 if (stats) {
1123 get_dpif_flow_stats(netdev_flow, &state->stats);
1124 *stats = &state->stats;
1125 }
1126
1127 ovs_mutex_unlock(&dp_netdev_mutex);
1128 return 0;
1129 }
1130
1131 static int
1132 dpif_netdev_flow_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
1133 {
1134 struct dp_netdev_flow_state *state = state_;
1135
1136 free(state->actions);
1137 free(state);
1138 return 0;
1139 }
1140
1141 static int
1142 dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
1143 {
1144 struct dp_netdev *dp = get_dp_netdev(dpif);
1145 struct pkt_metadata *md = &execute->md;
1146 struct flow key;
1147
1148 if (execute->packet->size < ETH_HEADER_LEN ||
1149 execute->packet->size > UINT16_MAX) {
1150 return EINVAL;
1151 }
1152
1153 /* Extract flow key. */
1154 flow_extract(execute->packet, md->skb_priority, md->pkt_mark, &md->tunnel,
1155 (union flow_in_port *)&md->in_port, &key);
1156 ovs_mutex_lock(&dp_netdev_mutex);
1157 dp_netdev_execute_actions(dp, &key, execute->packet, md, execute->actions,
1158 execute->actions_len);
1159 ovs_mutex_unlock(&dp_netdev_mutex);
1160 return 0;
1161 }
1162
1163 static int
1164 dpif_netdev_recv_set(struct dpif *dpif OVS_UNUSED, bool enable OVS_UNUSED)
1165 {
1166 return 0;
1167 }
1168
1169 static int
1170 dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
1171 uint32_t queue_id, uint32_t *priority)
1172 {
1173 *priority = queue_id;
1174 return 0;
1175 }
1176
1177 static struct dp_netdev_queue *
1178 find_nonempty_queue(struct dpif *dpif)
1179 {
1180 struct dp_netdev *dp = get_dp_netdev(dpif);
1181 int i;
1182
1183 for (i = 0; i < N_QUEUES; i++) {
1184 struct dp_netdev_queue *q = &dp->queues[i];
1185 if (q->head != q->tail) {
1186 return q;
1187 }
1188 }
1189 return NULL;
1190 }
1191
1192 static int
1193 dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall,
1194 struct ofpbuf *buf)
1195 {
1196 struct dp_netdev_queue *q;
1197 int error;
1198
1199 ovs_mutex_lock(&dp_netdev_mutex);
1200 q = find_nonempty_queue(dpif);
1201 if (q) {
1202 struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK];
1203
1204 *upcall = u->upcall;
1205
1206 ofpbuf_uninit(buf);
1207 *buf = u->buf;
1208
1209 error = 0;
1210 } else {
1211 error = EAGAIN;
1212 }
1213 ovs_mutex_unlock(&dp_netdev_mutex);
1214
1215 return error;
1216 }
1217
1218 static void
1219 dpif_netdev_recv_wait(struct dpif *dpif)
1220 {
1221 struct dp_netdev *dp = get_dp_netdev(dpif);
1222 uint64_t seq;
1223
1224 ovs_mutex_lock(&dp_netdev_mutex);
1225 seq = seq_read(dp->queue_seq);
1226 if (find_nonempty_queue(dpif)) {
1227 poll_immediate_wake();
1228 } else {
1229 seq_wait(dp->queue_seq, seq);
1230 }
1231 ovs_mutex_unlock(&dp_netdev_mutex);
1232 }
1233
1234 static void
1235 dpif_netdev_recv_purge(struct dpif *dpif)
1236 {
1237 struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
1238 ovs_mutex_lock(&dp_netdev_mutex);
1239 dp_netdev_purge_queues(dpif_netdev->dp);
1240 ovs_mutex_unlock(&dp_netdev_mutex);
1241 }
1242 \f
1243 static void
1244 dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow,
1245 const struct ofpbuf *packet)
1246 {
1247 netdev_flow->used = time_msec();
1248 netdev_flow->packet_count++;
1249 netdev_flow->byte_count += packet->size;
1250 netdev_flow->tcp_flags |= packet_get_tcp_flags(packet, &netdev_flow->flow);
1251 }
1252
1253 static void
1254 dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet,
1255 struct pkt_metadata *md)
1256 {
1257 struct dp_netdev_flow *netdev_flow;
1258 struct flow key;
1259
1260 if (packet->size < ETH_HEADER_LEN) {
1261 return;
1262 }
1263 flow_extract(packet, md->skb_priority, md->pkt_mark, &md->tunnel,
1264 (union flow_in_port *)&md->in_port, &key);
1265 netdev_flow = dp_netdev_lookup_flow(dp, &key);
1266 if (netdev_flow) {
1267 dp_netdev_flow_used(netdev_flow, packet);
1268 dp_netdev_execute_actions(dp, &key, packet, md,
1269 netdev_flow->actions,
1270 netdev_flow->actions_len);
1271 dp->n_hit++;
1272 } else {
1273 dp->n_missed++;
1274 dp_netdev_output_userspace(dp, packet, DPIF_UC_MISS, &key, NULL);
1275 }
1276 }
1277
1278 static void
1279 dpif_netdev_run(struct dpif *dpif)
1280 {
1281 struct dp_netdev_port *port;
1282 struct dp_netdev *dp;
1283 struct ofpbuf packet;
1284 size_t buf_size;
1285
1286 ovs_mutex_lock(&dp_netdev_mutex);
1287 dp = get_dp_netdev(dpif);
1288 ofpbuf_init(&packet, 0);
1289
1290 buf_size = DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + dp->max_mtu;
1291
1292 LIST_FOR_EACH (port, node, &dp->port_list) {
1293 int error;
1294
1295 /* Reset packet contents. Packet data may have been stolen. */
1296 ofpbuf_clear(&packet);
1297 ofpbuf_reserve_with_tailroom(&packet, DP_NETDEV_HEADROOM, buf_size);
1298
1299 error = port->rx ? netdev_rx_recv(port->rx, &packet) : EOPNOTSUPP;
1300 if (!error) {
1301 struct pkt_metadata md = PKT_METADATA_INITIALIZER(port->port_no);
1302 dp_netdev_port_input(dp, &packet, &md);
1303 } else if (error != EAGAIN && error != EOPNOTSUPP) {
1304 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1305
1306 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
1307 netdev_get_name(port->netdev), ovs_strerror(error));
1308 }
1309 }
1310 ofpbuf_uninit(&packet);
1311 ovs_mutex_unlock(&dp_netdev_mutex);
1312 }
1313
1314 static void
1315 dpif_netdev_wait(struct dpif *dpif)
1316 {
1317 struct dp_netdev_port *port;
1318
1319 /* There is a race here, if thread A calls dpif_netdev_wait(dpif) and
1320 * thread B calls dpif_port_add(dpif) or dpif_port_remove(dpif) before
1321 * A makes it to poll_block().
1322 *
1323 * But I think it doesn't matter:
1324 *
1325 * - In the dpif_port_add() case, A will not wake up when a packet
1326 * arrives on the new port, but this would also happen if the
1327 * ordering were reversed.
1328 *
1329 * - In the dpif_port_remove() case, A might wake up spuriously, but
1330 * that is harmless. */
1331
1332 ovs_mutex_lock(&dp_netdev_mutex);
1333 LIST_FOR_EACH (port, node, &get_dp_netdev(dpif)->port_list) {
1334 if (port->rx) {
1335 netdev_rx_wait(port->rx);
1336 }
1337 }
1338 ovs_mutex_unlock(&dp_netdev_mutex);
1339 }
1340
1341 static void
1342 dp_netdev_output_port(struct dp_netdev *dp, struct ofpbuf *packet,
1343 odp_port_t out_port)
1344 {
1345 struct dp_netdev_port *p = dp->ports[odp_to_u32(out_port)];
1346 if (p) {
1347 netdev_send(p->netdev, packet);
1348 }
1349 }
1350
1351 static int
1352 dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet,
1353 int queue_no, const struct flow *flow,
1354 const struct nlattr *userdata)
1355 {
1356 struct dp_netdev_queue *q = &dp->queues[queue_no];
1357 if (q->head - q->tail < MAX_QUEUE_LEN) {
1358 struct dp_netdev_upcall *u = &q->upcalls[q->head++ & QUEUE_MASK];
1359 struct dpif_upcall *upcall = &u->upcall;
1360 struct ofpbuf *buf = &u->buf;
1361 size_t buf_size;
1362
1363 upcall->type = queue_no;
1364
1365 /* Allocate buffer big enough for everything. */
1366 buf_size = ODPUTIL_FLOW_KEY_BYTES;
1367 if (userdata) {
1368 buf_size += NLA_ALIGN(userdata->nla_len);
1369 }
1370 ofpbuf_init(buf, buf_size);
1371
1372 /* Put ODP flow. */
1373 odp_flow_key_from_flow(buf, flow, flow->in_port.odp_port);
1374 upcall->key = buf->data;
1375 upcall->key_len = buf->size;
1376
1377 /* Put userdata. */
1378 if (userdata) {
1379 upcall->userdata = ofpbuf_put(buf, userdata,
1380 NLA_ALIGN(userdata->nla_len));
1381 }
1382
1383 /* Steal packet data. */
1384 ovs_assert(packet->source == OFPBUF_MALLOC);
1385 upcall->packet = *packet;
1386 ofpbuf_use(packet, NULL, 0);
1387
1388 seq_change(dp->queue_seq);
1389
1390 return 0;
1391 } else {
1392 dp->n_lost++;
1393 return ENOBUFS;
1394 }
1395 }
1396
1397 struct dp_netdev_execute_aux {
1398 struct dp_netdev *dp;
1399 const struct flow *key;
1400 };
1401
1402 static void
1403 dp_execute_cb(void *aux_, struct ofpbuf *packet,
1404 const struct pkt_metadata *md OVS_UNUSED,
1405 const struct nlattr *a, bool may_steal)
1406 {
1407 struct dp_netdev_execute_aux *aux = aux_;
1408 int type = nl_attr_type(a);
1409
1410 switch ((enum ovs_action_attr)type) {
1411 case OVS_ACTION_ATTR_OUTPUT:
1412 dp_netdev_output_port(aux->dp, packet, u32_to_odp(nl_attr_get_u32(a)));
1413 break;
1414
1415 case OVS_ACTION_ATTR_USERSPACE: {
1416 const struct nlattr *userdata;
1417
1418 userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
1419
1420 /* Make a copy if we are not allowed to steal the packet's data. */
1421 if (!may_steal) {
1422 packet = ofpbuf_clone_with_headroom(packet, DP_NETDEV_HEADROOM);
1423 }
1424 dp_netdev_output_userspace(aux->dp, packet, DPIF_UC_ACTION, aux->key,
1425 userdata);
1426 if (!may_steal) {
1427 ofpbuf_uninit(packet);
1428 }
1429 break;
1430 }
1431 case OVS_ACTION_ATTR_PUSH_VLAN:
1432 case OVS_ACTION_ATTR_POP_VLAN:
1433 case OVS_ACTION_ATTR_PUSH_MPLS:
1434 case OVS_ACTION_ATTR_POP_MPLS:
1435 case OVS_ACTION_ATTR_SET:
1436 case OVS_ACTION_ATTR_SAMPLE:
1437 case OVS_ACTION_ATTR_UNSPEC:
1438 case __OVS_ACTION_ATTR_MAX:
1439 OVS_NOT_REACHED();
1440 }
1441 }
1442
1443 static void
1444 dp_netdev_execute_actions(struct dp_netdev *dp, const struct flow *key,
1445 struct ofpbuf *packet, struct pkt_metadata *md,
1446 const struct nlattr *actions, size_t actions_len)
1447 {
1448 struct dp_netdev_execute_aux aux = {dp, key};
1449
1450 odp_execute_actions(&aux, packet, md, actions, actions_len, dp_execute_cb);
1451 }
1452
1453 const struct dpif_class dpif_netdev_class = {
1454 "netdev",
1455 dpif_netdev_enumerate,
1456 dpif_netdev_port_open_type,
1457 dpif_netdev_open,
1458 dpif_netdev_close,
1459 dpif_netdev_destroy,
1460 dpif_netdev_run,
1461 dpif_netdev_wait,
1462 dpif_netdev_get_stats,
1463 dpif_netdev_port_add,
1464 dpif_netdev_port_del,
1465 dpif_netdev_port_query_by_number,
1466 dpif_netdev_port_query_by_name,
1467 dpif_netdev_get_max_ports,
1468 NULL, /* port_get_pid */
1469 dpif_netdev_port_dump_start,
1470 dpif_netdev_port_dump_next,
1471 dpif_netdev_port_dump_done,
1472 dpif_netdev_port_poll,
1473 dpif_netdev_port_poll_wait,
1474 dpif_netdev_flow_get,
1475 dpif_netdev_flow_put,
1476 dpif_netdev_flow_del,
1477 dpif_netdev_flow_flush,
1478 dpif_netdev_flow_dump_start,
1479 dpif_netdev_flow_dump_next,
1480 dpif_netdev_flow_dump_done,
1481 dpif_netdev_execute,
1482 NULL, /* operate */
1483 dpif_netdev_recv_set,
1484 dpif_netdev_queue_to_priority,
1485 dpif_netdev_recv,
1486 dpif_netdev_recv_wait,
1487 dpif_netdev_recv_purge,
1488 };
1489
1490 static void
1491 dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
1492 const char *argv[], void *aux OVS_UNUSED)
1493 {
1494 struct dp_netdev_port *port;
1495 struct dp_netdev *dp;
1496 int port_no;
1497
1498 dp = shash_find_data(&dp_netdevs, argv[1]);
1499 if (!dp || !dpif_netdev_class_is_dummy(dp->class)) {
1500 unixctl_command_reply_error(conn, "unknown datapath or not a dummy");
1501 return;
1502 }
1503
1504 if (get_port_by_name(dp, argv[2], &port)) {
1505 unixctl_command_reply_error(conn, "unknown port");
1506 return;
1507 }
1508
1509 port_no = atoi(argv[3]);
1510 if (port_no <= 0 || port_no >= MAX_PORTS) {
1511 unixctl_command_reply_error(conn, "bad port number");
1512 return;
1513 }
1514 if (dp->ports[port_no]) {
1515 unixctl_command_reply_error(conn, "port number already in use");
1516 return;
1517 }
1518 dp->ports[odp_to_u32(port->port_no)] = NULL;
1519 dp->ports[port_no] = port;
1520 port->port_no = u32_to_odp(port_no);
1521 seq_change(dp->port_seq);
1522 unixctl_command_reply(conn, NULL);
1523 }
1524
1525 static void
1526 dpif_dummy_register__(const char *type)
1527 {
1528 struct dpif_class *class;
1529
1530 class = xmalloc(sizeof *class);
1531 *class = dpif_netdev_class;
1532 class->type = xstrdup(type);
1533 dp_register_provider(class);
1534 }
1535
1536 void
1537 dpif_dummy_register(bool override)
1538 {
1539 if (override) {
1540 struct sset types;
1541 const char *type;
1542
1543 sset_init(&types);
1544 dp_enumerate_types(&types);
1545 SSET_FOR_EACH (type, &types) {
1546 if (!dp_unregister_provider(type)) {
1547 dpif_dummy_register__(type);
1548 }
1549 }
1550 sset_destroy(&types);
1551 }
1552
1553 dpif_dummy_register__("dummy");
1554
1555 unixctl_command_register("dpif-dummy/change-port-number",
1556 "DP PORT NEW-NUMBER",
1557 3, 3, dpif_dummy_change_port_number, NULL);
1558 }