]> git.proxmox.com Git - mirror_ovs.git/blame_incremental - ofproto/ofproto.c
dns-resolve: Improve on handling of system DNS nameserver
[mirror_ovs.git] / ofproto / ofproto.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2009-2017 Nicira, Inc.
3 * Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <config.h>
19#include <errno.h>
20#include <inttypes.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <unistd.h>
24
25#include "bitmap.h"
26#include "bundles.h"
27#include "byte-order.h"
28#include "classifier.h"
29#include "connectivity.h"
30#include "connmgr.h"
31#include "coverage.h"
32#include "dp-packet.h"
33#include "hash.h"
34#include "openvswitch/hmap.h"
35#include "netdev.h"
36#include "nx-match.h"
37#include "ofproto.h"
38#include "ofproto-provider.h"
39#include "openflow/nicira-ext.h"
40#include "openflow/openflow.h"
41#include "openvswitch/dynamic-string.h"
42#include "openvswitch/meta-flow.h"
43#include "openvswitch/ofp-actions.h"
44#include "openvswitch/ofp-bundle.h"
45#include "openvswitch/ofp-errors.h"
46#include "openvswitch/ofp-match.h"
47#include "openvswitch/ofp-msgs.h"
48#include "openvswitch/ofp-monitor.h"
49#include "openvswitch/ofp-print.h"
50#include "openvswitch/ofp-queue.h"
51#include "openvswitch/ofp-util.h"
52#include "openvswitch/ofpbuf.h"
53#include "openvswitch/vlog.h"
54#include "ovs-rcu.h"
55#include "packets.h"
56#include "pinsched.h"
57#include "openvswitch/poll-loop.h"
58#include "random.h"
59#include "seq.h"
60#include "openvswitch/shash.h"
61#include "simap.h"
62#include "smap.h"
63#include "sset.h"
64#include "timeval.h"
65#include "tun-metadata.h"
66#include "unaligned.h"
67#include "unixctl.h"
68#include "util.h"
69
70VLOG_DEFINE_THIS_MODULE(ofproto);
71
72COVERAGE_DEFINE(ofproto_flush);
73COVERAGE_DEFINE(ofproto_packet_out);
74COVERAGE_DEFINE(ofproto_queue_req);
75COVERAGE_DEFINE(ofproto_recv_openflow);
76COVERAGE_DEFINE(ofproto_reinit_ports);
77COVERAGE_DEFINE(ofproto_update_port);
78
79/* Default fields to use for prefix tries in each flow table, unless something
80 * else is configured. */
81const enum mf_field_id default_prefix_fields[2] =
82 { MFF_IPV4_DST, MFF_IPV4_SRC };
83
84/* oftable. */
85static void oftable_init(struct oftable *);
86static void oftable_destroy(struct oftable *);
87
88static void oftable_set_name(struct oftable *, const char *name);
89
90static enum ofperr evict_rules_from_table(struct oftable *)
91 OVS_REQUIRES(ofproto_mutex);
92static void oftable_configure_eviction(struct oftable *,
93 unsigned int eviction,
94 const struct mf_subfield *fields,
95 size_t n_fields)
96 OVS_REQUIRES(ofproto_mutex);
97
98/* This is the only combination of OpenFlow eviction flags that OVS supports: a
99 * combination of OF1.4+ importance, the remaining lifetime of the flow, and
100 * fairness based on user-specified fields. */
101#define OFPROTO_EVICTION_FLAGS \
102 (OFPTMPEF14_OTHER | OFPTMPEF14_IMPORTANCE | OFPTMPEF14_LIFETIME)
103
104/* A set of rules within a single OpenFlow table (oftable) that have the same
105 * values for the oftable's eviction_fields. A rule to be evicted, when one is
106 * needed, is taken from the eviction group that contains the greatest number
107 * of rules.
108 *
109 * An oftable owns any number of eviction groups, each of which contains any
110 * number of rules.
111 *
112 * Membership in an eviction group is imprecise, based on the hash of the
113 * oftable's eviction_fields (in the eviction_group's id_node.hash member).
114 * That is, if two rules have different eviction_fields, but those
115 * eviction_fields hash to the same value, then they will belong to the same
116 * eviction_group anyway.
117 *
118 * (When eviction is not enabled on an oftable, we don't track any eviction
119 * groups, to save time and space.) */
120struct eviction_group {
121 struct hmap_node id_node; /* In oftable's "eviction_groups_by_id". */
122 struct heap_node size_node; /* In oftable's "eviction_groups_by_size". */
123 struct heap rules; /* Contains "struct rule"s. */
124};
125
126static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep)
127 OVS_REQUIRES(ofproto_mutex);
128static uint64_t rule_eviction_priority(struct ofproto *ofproto, struct rule *)
129 OVS_REQUIRES(ofproto_mutex);
130static void eviction_group_add_rule(struct rule *)
131 OVS_REQUIRES(ofproto_mutex);
132static void eviction_group_remove_rule(struct rule *)
133 OVS_REQUIRES(ofproto_mutex);
134
135static void rule_criteria_init(struct rule_criteria *, uint8_t table_id,
136 const struct minimatch *match, int priority,
137 ovs_version_t version,
138 ovs_be64 cookie, ovs_be64 cookie_mask,
139 ofp_port_t out_port, uint32_t out_group);
140static void rule_criteria_require_rw(struct rule_criteria *,
141 bool can_write_readonly);
142static void rule_criteria_destroy(struct rule_criteria *);
143
144static enum ofperr collect_rules_loose(struct ofproto *,
145 const struct rule_criteria *,
146 struct rule_collection *);
147
148struct learned_cookie {
149 union {
150 /* In struct ofproto's 'learned_cookies' hmap. */
151 struct hmap_node hmap_node OVS_GUARDED_BY(ofproto_mutex);
152
153 /* In 'dead_cookies' list when removed from hmap. */
154 struct ovs_list list_node;
155 };
156
157 /* Key. */
158 ovs_be64 cookie OVS_GUARDED_BY(ofproto_mutex);
159 uint8_t table_id OVS_GUARDED_BY(ofproto_mutex);
160
161 /* Number of references from "learn" actions.
162 *
163 * When this drops to 0, all of the flows in 'table_id' with the specified
164 * 'cookie' are deleted. */
165 int n OVS_GUARDED_BY(ofproto_mutex);
166};
167
168static const struct ofpact_learn *next_learn_with_delete(
169 const struct rule_actions *, const struct ofpact_learn *start);
170
171static void learned_cookies_inc(struct ofproto *, const struct rule_actions *)
172 OVS_REQUIRES(ofproto_mutex);
173static void learned_cookies_dec(struct ofproto *, const struct rule_actions *,
174 struct ovs_list *dead_cookies)
175 OVS_REQUIRES(ofproto_mutex);
176static void learned_cookies_flush(struct ofproto *, struct ovs_list *dead_cookies)
177 OVS_REQUIRES(ofproto_mutex);
178
179/* ofport. */
180static void ofport_destroy__(struct ofport *) OVS_EXCLUDED(ofproto_mutex);
181static void ofport_destroy(struct ofport *, bool del);
182static bool ofport_is_mtu_overridden(const struct ofproto *,
183 const struct ofport *);
184
185static int update_port(struct ofproto *, const char *devname);
186static int init_ports(struct ofproto *);
187static void reinit_ports(struct ofproto *);
188
189static long long int ofport_get_usage(const struct ofproto *,
190 ofp_port_t ofp_port);
191static void ofport_set_usage(struct ofproto *, ofp_port_t ofp_port,
192 long long int last_used);
193static void ofport_remove_usage(struct ofproto *, ofp_port_t ofp_port);
194
195/* Ofport usage.
196 *
197 * Keeps track of the currently used and recently used ofport values and is
198 * used to prevent immediate recycling of ofport values. */
199struct ofport_usage {
200 struct hmap_node hmap_node; /* In struct ofproto's "ofport_usage" hmap. */
201 ofp_port_t ofp_port; /* OpenFlow port number. */
202 long long int last_used; /* Last time the 'ofp_port' was used. LLONG_MAX
203 represents in-use ofports. */
204};
205
206/* rule. */
207static void ofproto_rule_send_removed(struct rule *)
208 OVS_EXCLUDED(ofproto_mutex);
209static bool rule_is_readonly(const struct rule *);
210static void ofproto_rule_insert__(struct ofproto *, struct rule *)
211 OVS_REQUIRES(ofproto_mutex);
212static void ofproto_rule_remove__(struct ofproto *, struct rule *)
213 OVS_REQUIRES(ofproto_mutex);
214
215/* The source of an OpenFlow request.
216 *
217 * A table modification request can be generated externally, via OpenFlow, or
218 * internally through a function call. This structure indicates the source of
219 * an OpenFlow-generated table modification. For an internal flow_mod, it
220 * isn't meaningful and thus supplied as NULL. */
221struct openflow_mod_requester {
222 struct ofconn *ofconn; /* Connection on which flow_mod arrived. */
223 const struct ofp_header *request;
224};
225
226/* OpenFlow. */
227static enum ofperr ofproto_rule_create(struct ofproto *, struct cls_rule *,
228 uint8_t table_id, ovs_be64 new_cookie,
229 uint16_t idle_timeout,
230 uint16_t hard_timeout,
231 enum ofputil_flow_mod_flags flags,
232 uint16_t importance,
233 const struct ofpact *ofpacts,
234 size_t ofpacts_len,
235 uint64_t match_tlv_bitmap,
236 uint64_t ofpacts_tlv_bitmap,
237 struct rule **new_rule)
238 OVS_NO_THREAD_SAFETY_ANALYSIS;
239
240static void replace_rule_start(struct ofproto *, struct ofproto_flow_mod *,
241 struct rule *old_rule, struct rule *new_rule)
242 OVS_REQUIRES(ofproto_mutex);
243
244static void replace_rule_revert(struct ofproto *, struct rule *old_rule,
245 struct rule *new_rule)
246 OVS_REQUIRES(ofproto_mutex);
247
248static void replace_rule_finish(struct ofproto *, struct ofproto_flow_mod *,
249 const struct openflow_mod_requester *,
250 struct rule *old_rule, struct rule *new_rule,
251 struct ovs_list *dead_cookies)
252 OVS_REQUIRES(ofproto_mutex);
253static void delete_flows__(struct rule_collection *,
254 enum ofp_flow_removed_reason,
255 const struct openflow_mod_requester *)
256 OVS_REQUIRES(ofproto_mutex);
257
258static void ofproto_group_delete_all__(struct ofproto *)
259 OVS_REQUIRES(ofproto_mutex);
260static bool ofproto_group_exists(const struct ofproto *, uint32_t group_id);
261static void handle_openflow(struct ofconn *, const struct ofpbuf *);
262static enum ofperr ofproto_flow_mod_init(struct ofproto *,
263 struct ofproto_flow_mod *,
264 const struct ofputil_flow_mod *fm,
265 struct rule *)
266 OVS_EXCLUDED(ofproto_mutex);
267static enum ofperr ofproto_flow_mod_start(struct ofproto *,
268 struct ofproto_flow_mod *)
269 OVS_REQUIRES(ofproto_mutex);
270static void ofproto_flow_mod_revert(struct ofproto *,
271 struct ofproto_flow_mod *)
272 OVS_REQUIRES(ofproto_mutex);
273static void ofproto_flow_mod_finish(struct ofproto *,
274 struct ofproto_flow_mod *,
275 const struct openflow_mod_requester *)
276 OVS_REQUIRES(ofproto_mutex);
277static enum ofperr handle_flow_mod__(struct ofproto *,
278 const struct ofputil_flow_mod *,
279 const struct openflow_mod_requester *)
280 OVS_EXCLUDED(ofproto_mutex);
281static void calc_duration(long long int start, long long int now,
282 uint32_t *sec, uint32_t *nsec);
283
284/* ofproto. */
285static uint64_t pick_datapath_id(const struct ofproto *);
286static uint64_t pick_fallback_dpid(void);
287static void ofproto_destroy__(struct ofproto *);
288static void update_mtu(struct ofproto *, struct ofport *);
289static void update_mtu_ofproto(struct ofproto *);
290static void meter_delete(struct ofproto *, uint32_t);
291static void meter_delete_all(struct ofproto *);
292static void meter_insert_rule(struct rule *);
293
294/* unixctl. */
295static void ofproto_unixctl_init(void);
296
297/* All registered ofproto classes, in probe order. */
298static const struct ofproto_class **ofproto_classes;
299static size_t n_ofproto_classes;
300static size_t allocated_ofproto_classes;
301
302/* Global lock that protects all flow table operations. */
303struct ovs_mutex ofproto_mutex = OVS_MUTEX_INITIALIZER;
304
305unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT;
306unsigned ofproto_max_idle = OFPROTO_MAX_IDLE_DEFAULT;
307
308size_t n_handlers, n_revalidators;
309
310/* Map from datapath name to struct ofproto, for use by unixctl commands. */
311static struct hmap all_ofprotos = HMAP_INITIALIZER(&all_ofprotos);
312
313/* Initial mappings of port to OpenFlow number mappings. */
314static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
315
316static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
317
318/* The default value of true waits for flow restore. */
319static bool flow_restore_wait = true;
320
321/* Must be called to initialize the ofproto library.
322 *
323 * The caller may pass in 'iface_hints', which contains an shash of
324 * "iface_hint" elements indexed by the interface's name. The provider
325 * may use these hints to describe the startup configuration in order to
326 * reinitialize its state. The caller owns the provided data, so a
327 * provider will make copies of anything required. An ofproto provider
328 * will remove any existing state that is not described by the hint, and
329 * may choose to remove it all. */
330void
331ofproto_init(const struct shash *iface_hints)
332{
333 struct shash_node *node;
334 size_t i;
335
336 ofproto_class_register(&ofproto_dpif_class);
337
338 /* Make a local copy, since we don't own 'iface_hints' elements. */
339 SHASH_FOR_EACH(node, iface_hints) {
340 const struct iface_hint *orig_hint = node->data;
341 struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
342 const char *br_type = ofproto_normalize_type(orig_hint->br_type);
343
344 new_hint->br_name = xstrdup(orig_hint->br_name);
345 new_hint->br_type = xstrdup(br_type);
346 new_hint->ofp_port = orig_hint->ofp_port;
347
348 shash_add(&init_ofp_ports, node->name, new_hint);
349 }
350
351 for (i = 0; i < n_ofproto_classes; i++) {
352 ofproto_classes[i]->init(&init_ofp_ports);
353 }
354
355 ofproto_unixctl_init();
356}
357
358/* 'type' should be a normalized datapath type, as returned by
359 * ofproto_normalize_type(). Returns the corresponding ofproto_class
360 * structure, or a null pointer if there is none registered for 'type'. */
361static const struct ofproto_class *
362ofproto_class_find__(const char *type)
363{
364 size_t i;
365
366 for (i = 0; i < n_ofproto_classes; i++) {
367 const struct ofproto_class *class = ofproto_classes[i];
368 struct sset types;
369 bool found;
370
371 sset_init(&types);
372 class->enumerate_types(&types);
373 found = sset_contains(&types, type);
374 sset_destroy(&types);
375
376 if (found) {
377 return class;
378 }
379 }
380 VLOG_WARN("unknown datapath type %s", type);
381 return NULL;
382}
383
384/* Registers a new ofproto class. After successful registration, new ofprotos
385 * of that type can be created using ofproto_create(). */
386int
387ofproto_class_register(const struct ofproto_class *new_class)
388{
389 size_t i;
390
391 for (i = 0; i < n_ofproto_classes; i++) {
392 if (ofproto_classes[i] == new_class) {
393 return EEXIST;
394 }
395 }
396
397 if (n_ofproto_classes >= allocated_ofproto_classes) {
398 ofproto_classes = x2nrealloc(ofproto_classes,
399 &allocated_ofproto_classes,
400 sizeof *ofproto_classes);
401 }
402 ofproto_classes[n_ofproto_classes++] = new_class;
403 return 0;
404}
405
406/* Unregisters a datapath provider. 'type' must have been previously
407 * registered and not currently be in use by any ofprotos. After
408 * unregistration new datapaths of that type cannot be opened using
409 * ofproto_create(). */
410int
411ofproto_class_unregister(const struct ofproto_class *class)
412{
413 size_t i;
414
415 for (i = 0; i < n_ofproto_classes; i++) {
416 if (ofproto_classes[i] == class) {
417 for (i++; i < n_ofproto_classes; i++) {
418 ofproto_classes[i - 1] = ofproto_classes[i];
419 }
420 n_ofproto_classes--;
421 return 0;
422 }
423 }
424 VLOG_WARN("attempted to unregister an ofproto class that is not "
425 "registered");
426 return EAFNOSUPPORT;
427}
428
429/* Clears 'types' and enumerates all registered ofproto types into it. The
430 * caller must first initialize the sset. */
431void
432ofproto_enumerate_types(struct sset *types)
433{
434 size_t i;
435
436 sset_clear(types);
437 for (i = 0; i < n_ofproto_classes; i++) {
438 ofproto_classes[i]->enumerate_types(types);
439 }
440}
441
442/* Returns the fully spelled out name for the given ofproto 'type'.
443 *
444 * Normalized type string can be compared with strcmp(). Unnormalized type
445 * string might be the same even if they have different spellings. */
446const char *
447ofproto_normalize_type(const char *type)
448{
449 return type && type[0] ? type : "system";
450}
451
452/* Clears 'names' and enumerates the names of all known created ofprotos with
453 * the given 'type'. The caller must first initialize the sset. Returns 0 if
454 * successful, otherwise a positive errno value.
455 *
456 * Some kinds of datapaths might not be practically enumerable. This is not
457 * considered an error. */
458int
459ofproto_enumerate_names(const char *type, struct sset *names)
460{
461 const struct ofproto_class *class = ofproto_class_find__(type);
462 return class ? class->enumerate_names(type, names) : EAFNOSUPPORT;
463}
464
465static void
466ofproto_bump_tables_version(struct ofproto *ofproto)
467{
468 ++ofproto->tables_version;
469 ofproto->ofproto_class->set_tables_version(ofproto,
470 ofproto->tables_version);
471}
472
473int
474ofproto_create(const char *datapath_name, const char *datapath_type,
475 struct ofproto **ofprotop)
476 OVS_EXCLUDED(ofproto_mutex)
477{
478 const struct ofproto_class *class;
479 struct ofproto *ofproto;
480 int error;
481 int i;
482
483 *ofprotop = NULL;
484
485 datapath_type = ofproto_normalize_type(datapath_type);
486 class = ofproto_class_find__(datapath_type);
487 if (!class) {
488 VLOG_WARN("could not create datapath %s of unknown type %s",
489 datapath_name, datapath_type);
490 return EAFNOSUPPORT;
491 }
492
493 ofproto = class->alloc();
494 if (!ofproto) {
495 VLOG_ERR("failed to allocate datapath %s of type %s",
496 datapath_name, datapath_type);
497 return ENOMEM;
498 }
499
500 /* Initialize. */
501 ovs_mutex_lock(&ofproto_mutex);
502 memset(ofproto, 0, sizeof *ofproto);
503 ofproto->ofproto_class = class;
504 ofproto->name = xstrdup(datapath_name);
505 ofproto->type = xstrdup(datapath_type);
506 hmap_insert(&all_ofprotos, &ofproto->hmap_node,
507 hash_string(ofproto->name, 0));
508 ofproto->datapath_id = 0;
509 ofproto->forward_bpdu = false;
510 ofproto->fallback_dpid = pick_fallback_dpid();
511 ofproto->mfr_desc = NULL;
512 ofproto->hw_desc = NULL;
513 ofproto->sw_desc = NULL;
514 ofproto->serial_desc = NULL;
515 ofproto->dp_desc = NULL;
516 ofproto->frag_handling = OFPUTIL_FRAG_NORMAL;
517 hmap_init(&ofproto->ports);
518 hmap_init(&ofproto->ofport_usage);
519 shash_init(&ofproto->port_by_name);
520 simap_init(&ofproto->ofp_requests);
521 ofproto->max_ports = ofp_to_u16(OFPP_MAX);
522 ofproto->eviction_group_timer = LLONG_MIN;
523 ofproto->tables = NULL;
524 ofproto->n_tables = 0;
525 ofproto->tables_version = OVS_VERSION_MIN;
526 hindex_init(&ofproto->cookies);
527 hmap_init(&ofproto->learned_cookies);
528 ovs_list_init(&ofproto->expirable);
529 ofproto->connmgr = connmgr_create(ofproto, datapath_name, datapath_name);
530 ofproto->min_mtu = INT_MAX;
531 cmap_init(&ofproto->groups);
532 ovs_mutex_unlock(&ofproto_mutex);
533 ofproto->ogf.types = 0xf;
534 ofproto->ogf.capabilities = OFPGFC_CHAINING | OFPGFC_SELECT_LIVENESS |
535 OFPGFC_SELECT_WEIGHT;
536 for (i = 0; i < 4; i++) {
537 ofproto->ogf.max_groups[i] = OFPG_MAX;
538 ofproto->ogf.ofpacts[i] = (UINT64_C(1) << N_OFPACTS) - 1;
539 }
540 ovsrcu_set(&ofproto->metadata_tab, tun_metadata_alloc(NULL));
541
542 ovs_mutex_init(&ofproto->vl_mff_map.mutex);
543 cmap_init(&ofproto->vl_mff_map.cmap);
544
545 error = ofproto->ofproto_class->construct(ofproto);
546 if (error) {
547 VLOG_ERR("failed to open datapath %s: %s",
548 datapath_name, ovs_strerror(error));
549 ovs_mutex_lock(&ofproto_mutex);
550 connmgr_destroy(ofproto->connmgr);
551 ofproto->connmgr = NULL;
552 ovs_mutex_unlock(&ofproto_mutex);
553 ofproto_destroy__(ofproto);
554 return error;
555 }
556
557 /* Check that hidden tables, if any, are at the end. */
558 ovs_assert(ofproto->n_tables);
559 for (i = 0; i + 1 < ofproto->n_tables; i++) {
560 enum oftable_flags flags = ofproto->tables[i].flags;
561 enum oftable_flags next_flags = ofproto->tables[i + 1].flags;
562
563 ovs_assert(!(flags & OFTABLE_HIDDEN) || next_flags & OFTABLE_HIDDEN);
564 }
565
566 ofproto->datapath_id = pick_datapath_id(ofproto);
567 init_ports(ofproto);
568
569 /* Initialize meters table. */
570 if (ofproto->ofproto_class->meter_get_features) {
571 ofproto->ofproto_class->meter_get_features(ofproto,
572 &ofproto->meter_features);
573 } else {
574 memset(&ofproto->meter_features, 0, sizeof ofproto->meter_features);
575 }
576 hmap_init(&ofproto->meters);
577 ofproto->slowpath_meter_id = UINT32_MAX;
578 ofproto->controller_meter_id = UINT32_MAX;
579
580 /* Set the initial tables version. */
581 ofproto_bump_tables_version(ofproto);
582
583 *ofprotop = ofproto;
584 return 0;
585}
586
587/* Must be called (only) by an ofproto implementation in its constructor
588 * function. See the large comment on 'construct' in struct ofproto_class for
589 * details. */
590void
591ofproto_init_tables(struct ofproto *ofproto, int n_tables)
592{
593 struct oftable *table;
594
595 ovs_assert(!ofproto->n_tables);
596 ovs_assert(n_tables >= 1 && n_tables <= 255);
597
598 ofproto->n_tables = n_tables;
599 ofproto->tables = xmalloc(n_tables * sizeof *ofproto->tables);
600 OFPROTO_FOR_EACH_TABLE (table, ofproto) {
601 oftable_init(table);
602 }
603}
604
605/* To be optionally called (only) by an ofproto implementation in its
606 * constructor function. See the large comment on 'construct' in struct
607 * ofproto_class for details.
608 *
609 * Sets the maximum number of ports to 'max_ports'. The ofproto generic layer
610 * will then ensure that actions passed into the ofproto implementation will
611 * not refer to OpenFlow ports numbered 'max_ports' or higher. If this
612 * function is not called, there will be no such restriction.
613 *
614 * Reserved ports numbered OFPP_MAX and higher are special and not subject to
615 * the 'max_ports' restriction. */
616void
617ofproto_init_max_ports(struct ofproto *ofproto, uint16_t max_ports)
618{
619 ovs_assert(max_ports <= ofp_to_u16(OFPP_MAX));
620 ofproto->max_ports = max_ports;
621}
622
623uint64_t
624ofproto_get_datapath_id(const struct ofproto *ofproto)
625{
626 return ofproto->datapath_id;
627}
628
629void
630ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
631{
632 uint64_t old_dpid = p->datapath_id;
633 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
634 if (p->datapath_id != old_dpid) {
635 /* Force all active connections to reconnect, since there is no way to
636 * notify a controller that the datapath ID has changed. */
637 ofproto_reconnect_controllers(p);
638 }
639}
640
641void
642ofproto_set_controllers(struct ofproto *p, struct shash *controllers)
643{
644 connmgr_set_controllers(p->connmgr, controllers);
645}
646
647void
648ofproto_set_fail_mode(struct ofproto *p, enum ofproto_fail_mode fail_mode)
649{
650 connmgr_set_fail_mode(p->connmgr, fail_mode);
651}
652
653/* Drops the connections between 'ofproto' and all of its controllers, forcing
654 * them to reconnect. */
655void
656ofproto_reconnect_controllers(struct ofproto *ofproto)
657{
658 connmgr_reconnect(ofproto->connmgr);
659}
660
661/* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s
662 * in-band control should guarantee access, in the same way that in-band
663 * control guarantees access to OpenFlow controllers. */
664void
665ofproto_set_extra_in_band_remotes(struct ofproto *ofproto,
666 const struct sockaddr_in *extras, size_t n)
667{
668 connmgr_set_extra_in_band_remotes(ofproto->connmgr, extras, n);
669}
670
671/* Sets the OpenFlow queue used by flows set up by in-band control on
672 * 'ofproto' to 'queue_id'. If 'queue_id' is negative, then in-band control
673 * flows will use the default queue. */
674void
675ofproto_set_in_band_queue(struct ofproto *ofproto, int queue_id)
676{
677 connmgr_set_in_band_queue(ofproto->connmgr, queue_id);
678}
679
680/* Sets the bundle max idle timeout */
681void
682ofproto_set_bundle_idle_timeout(unsigned timeout)
683{
684 connmgr_set_bundle_idle_timeout(timeout);
685}
686
687/* Sets the number of flows at which eviction from the kernel flow table
688 * will occur. */
689void
690ofproto_set_flow_limit(unsigned limit)
691{
692 ofproto_flow_limit = limit;
693}
694
695/* Sets the maximum idle time for flows in the datapath before they are
696 * expired. */
697void
698ofproto_set_max_idle(unsigned max_idle)
699{
700 ofproto_max_idle = max_idle;
701}
702
703/* If forward_bpdu is true, the NORMAL action will forward frames with
704 * reserved (e.g. STP) destination Ethernet addresses. if forward_bpdu is false,
705 * the NORMAL action will drop these frames. */
706void
707ofproto_set_forward_bpdu(struct ofproto *ofproto, bool forward_bpdu)
708{
709 bool old_val = ofproto->forward_bpdu;
710 ofproto->forward_bpdu = forward_bpdu;
711 if (old_val != ofproto->forward_bpdu) {
712 if (ofproto->ofproto_class->forward_bpdu_changed) {
713 ofproto->ofproto_class->forward_bpdu_changed(ofproto);
714 }
715 }
716}
717
718/* Sets the MAC aging timeout for the OFPP_NORMAL action on 'ofproto' to
719 * 'idle_time', in seconds, and the maximum number of MAC table entries to
720 * 'max_entries'. */
721void
722ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time,
723 size_t max_entries)
724{
725 if (ofproto->ofproto_class->set_mac_table_config) {
726 ofproto->ofproto_class->set_mac_table_config(ofproto, idle_time,
727 max_entries);
728 }
729}
730
731/* Multicast snooping configuration. */
732
733/* Configures multicast snooping on 'ofproto' using the settings
734 * defined in 's'. If 's' is NULL, disables multicast snooping.
735 *
736 * Returns 0 if successful, otherwise a positive errno value. */
737int
738ofproto_set_mcast_snooping(struct ofproto *ofproto,
739 const struct ofproto_mcast_snooping_settings *s)
740{
741 return (ofproto->ofproto_class->set_mcast_snooping
742 ? ofproto->ofproto_class->set_mcast_snooping(ofproto, s)
743 : EOPNOTSUPP);
744}
745
746/* Configures multicast snooping flood settings on 'ofp_port' of 'ofproto'.
747 *
748 * Returns 0 if successful, otherwise a positive errno value.*/
749int
750ofproto_port_set_mcast_snooping(struct ofproto *ofproto, void *aux,
751 const struct ofproto_mcast_snooping_port_settings *s)
752{
753 return (ofproto->ofproto_class->set_mcast_snooping_port
754 ? ofproto->ofproto_class->set_mcast_snooping_port(ofproto, aux, s)
755 : EOPNOTSUPP);
756}
757
758void
759ofproto_type_set_config(const char *datapath_type, const struct smap *cfg)
760{
761 const struct ofproto_class *class;
762
763 datapath_type = ofproto_normalize_type(datapath_type);
764 class = ofproto_class_find__(datapath_type);
765
766 if (class->type_set_config) {
767 class->type_set_config(datapath_type, cfg);
768 }
769}
770
771void
772ofproto_set_threads(int n_handlers_, int n_revalidators_)
773{
774 int threads = MAX(count_cpu_cores(), 2);
775
776 n_revalidators = MAX(n_revalidators_, 0);
777 n_handlers = MAX(n_handlers_, 0);
778
779 if (!n_revalidators) {
780 n_revalidators = n_handlers
781 ? MAX(threads - (int) n_handlers, 1)
782 : threads / 4 + 1;
783 }
784
785 if (!n_handlers) {
786 n_handlers = MAX(threads - (int) n_revalidators, 1);
787 }
788}
789
790void
791ofproto_set_dp_desc(struct ofproto *p, const char *dp_desc)
792{
793 free(p->dp_desc);
794 p->dp_desc = nullable_xstrdup(dp_desc);
795}
796
797int
798ofproto_set_snoops(struct ofproto *ofproto, const struct sset *snoops)
799{
800 return connmgr_set_snoops(ofproto->connmgr, snoops);
801}
802
803int
804ofproto_set_netflow(struct ofproto *ofproto,
805 const struct netflow_options *nf_options)
806{
807 if (nf_options && sset_is_empty(&nf_options->collectors)) {
808 nf_options = NULL;
809 }
810
811 if (ofproto->ofproto_class->set_netflow) {
812 return ofproto->ofproto_class->set_netflow(ofproto, nf_options);
813 } else {
814 return nf_options ? EOPNOTSUPP : 0;
815 }
816}
817
818int
819ofproto_set_sflow(struct ofproto *ofproto,
820 const struct ofproto_sflow_options *oso)
821{
822 if (oso && sset_is_empty(&oso->targets)) {
823 oso = NULL;
824 }
825
826 if (ofproto->ofproto_class->set_sflow) {
827 return ofproto->ofproto_class->set_sflow(ofproto, oso);
828 } else {
829 return oso ? EOPNOTSUPP : 0;
830 }
831}
832
833int
834ofproto_set_ipfix(struct ofproto *ofproto,
835 const struct ofproto_ipfix_bridge_exporter_options *bo,
836 const struct ofproto_ipfix_flow_exporter_options *fo,
837 size_t n_fo)
838{
839 if (ofproto->ofproto_class->set_ipfix) {
840 return ofproto->ofproto_class->set_ipfix(ofproto, bo, fo, n_fo);
841 } else {
842 return (bo || fo) ? EOPNOTSUPP : 0;
843 }
844}
845
846static int
847ofproto_get_ipfix_stats(struct ofproto *ofproto,
848 bool bridge_ipfix,
849 struct ovs_list *replies)
850{
851 int error;
852
853 if (ofproto->ofproto_class->get_ipfix_stats) {
854 error = ofproto->ofproto_class->get_ipfix_stats(ofproto,
855 bridge_ipfix,
856 replies);
857 } else {
858 error = EOPNOTSUPP;
859 }
860
861 return error;
862}
863
864static enum ofperr
865handle_ipfix_bridge_stats_request(struct ofconn *ofconn,
866 const struct ofp_header *request)
867{
868 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
869 struct ovs_list replies;
870 enum ofperr error;
871
872 ofpmp_init(&replies, request);
873 error = ofproto_get_ipfix_stats(ofproto, true, &replies);
874
875 if (!error) {
876 ofconn_send_replies(ofconn, &replies);
877 } else {
878 ofpbuf_list_delete(&replies);
879 }
880
881 return error;
882}
883
884static enum ofperr
885handle_ipfix_flow_stats_request(struct ofconn *ofconn,
886 const struct ofp_header *request)
887{
888 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
889 struct ovs_list replies;
890 enum ofperr error;
891
892 ofpmp_init(&replies, request);
893 error = ofproto_get_ipfix_stats(ofproto, false, &replies);
894
895 if (!error) {
896 ofconn_send_replies(ofconn, &replies);
897 } else {
898 ofpbuf_list_delete(&replies);
899 }
900
901 return error;
902}
903
904static enum ofperr
905handle_nxt_ct_flush_zone(struct ofconn *ofconn, const struct ofp_header *oh)
906{
907 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
908 const struct nx_zone_id *nzi = ofpmsg_body(oh);
909
910 if (!is_all_zeros(nzi->zero, sizeof nzi->zero)) {
911 return OFPERR_NXBRC_MUST_BE_ZERO;
912 }
913
914 uint16_t zone = ntohs(nzi->zone_id);
915 if (ofproto->ofproto_class->ct_flush) {
916 ofproto->ofproto_class->ct_flush(ofproto, &zone);
917 } else {
918 return EOPNOTSUPP;
919 }
920
921 return 0;
922}
923
924void
925ofproto_set_flow_restore_wait(bool flow_restore_wait_db)
926{
927 flow_restore_wait = flow_restore_wait_db;
928}
929
930bool
931ofproto_get_flow_restore_wait(void)
932{
933 return flow_restore_wait;
934}
935
936\f
937/* Spanning Tree Protocol (STP) configuration. */
938
939/* Configures STP on 'ofproto' using the settings defined in 's'. If
940 * 's' is NULL, disables STP.
941 *
942 * Returns 0 if successful, otherwise a positive errno value. */
943int
944ofproto_set_stp(struct ofproto *ofproto,
945 const struct ofproto_stp_settings *s)
946{
947 return (ofproto->ofproto_class->set_stp
948 ? ofproto->ofproto_class->set_stp(ofproto, s)
949 : EOPNOTSUPP);
950}
951
952/* Retrieves STP status of 'ofproto' and stores it in 's'. If the
953 * 'enabled' member of 's' is false, then the other members are not
954 * meaningful.
955 *
956 * Returns 0 if successful, otherwise a positive errno value. */
957int
958ofproto_get_stp_status(struct ofproto *ofproto,
959 struct ofproto_stp_status *s)
960{
961 return (ofproto->ofproto_class->get_stp_status
962 ? ofproto->ofproto_class->get_stp_status(ofproto, s)
963 : EOPNOTSUPP);
964}
965
966/* Configures STP on 'ofp_port' of 'ofproto' using the settings defined
967 * in 's'. The caller is responsible for assigning STP port numbers
968 * (using the 'port_num' member in the range of 1 through 255, inclusive)
969 * and ensuring there are no duplicates. If the 's' is NULL, then STP
970 * is disabled on the port.
971 *
972 * Returns 0 if successful, otherwise a positive errno value.*/
973int
974ofproto_port_set_stp(struct ofproto *ofproto, ofp_port_t ofp_port,
975 const struct ofproto_port_stp_settings *s)
976{
977 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
978 if (!ofport) {
979 VLOG_WARN("%s: cannot configure STP on nonexistent port %"PRIu32,
980 ofproto->name, ofp_port);
981 return ENODEV;
982 }
983
984 return (ofproto->ofproto_class->set_stp_port
985 ? ofproto->ofproto_class->set_stp_port(ofport, s)
986 : EOPNOTSUPP);
987}
988
989/* Retrieves STP port status of 'ofp_port' on 'ofproto' and stores it in
990 * 's'. If the 'enabled' member in 's' is false, then the other members
991 * are not meaningful.
992 *
993 * Returns 0 if successful, otherwise a positive errno value.*/
994int
995ofproto_port_get_stp_status(struct ofproto *ofproto, ofp_port_t ofp_port,
996 struct ofproto_port_stp_status *s)
997{
998 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
999 if (!ofport) {
1000 VLOG_WARN_RL(&rl, "%s: cannot get STP status on nonexistent "
1001 "port %"PRIu32, ofproto->name, ofp_port);
1002 return ENODEV;
1003 }
1004
1005 return (ofproto->ofproto_class->get_stp_port_status
1006 ? ofproto->ofproto_class->get_stp_port_status(ofport, s)
1007 : EOPNOTSUPP);
1008}
1009
1010/* Retrieves STP port statistics of 'ofp_port' on 'ofproto' and stores it in
1011 * 's'. If the 'enabled' member in 's' is false, then the other members
1012 * are not meaningful.
1013 *
1014 * Returns 0 if successful, otherwise a positive errno value.*/
1015int
1016ofproto_port_get_stp_stats(struct ofproto *ofproto, ofp_port_t ofp_port,
1017 struct ofproto_port_stp_stats *s)
1018{
1019 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
1020 if (!ofport) {
1021 VLOG_WARN_RL(&rl, "%s: cannot get STP stats on nonexistent "
1022 "port %"PRIu32, ofproto->name, ofp_port);
1023 return ENODEV;
1024 }
1025
1026 return (ofproto->ofproto_class->get_stp_port_stats
1027 ? ofproto->ofproto_class->get_stp_port_stats(ofport, s)
1028 : EOPNOTSUPP);
1029}
1030
1031/* Rapid Spanning Tree Protocol (RSTP) configuration. */
1032
1033/* Configures RSTP on 'ofproto' using the settings defined in 's'. If
1034 * 's' is NULL, disables RSTP.
1035 *
1036 * Returns 0 if successful, otherwise a positive errno value. */
1037int
1038ofproto_set_rstp(struct ofproto *ofproto,
1039 const struct ofproto_rstp_settings *s)
1040{
1041 if (!ofproto->ofproto_class->set_rstp) {
1042 return EOPNOTSUPP;
1043 }
1044 ofproto->ofproto_class->set_rstp(ofproto, s);
1045 return 0;
1046}
1047
1048/* Retrieves RSTP status of 'ofproto' and stores it in 's'. If the
1049 * 'enabled' member of 's' is false, then the other members are not
1050 * meaningful.
1051 *
1052 * Returns 0 if successful, otherwise a positive errno value. */
1053int
1054ofproto_get_rstp_status(struct ofproto *ofproto,
1055 struct ofproto_rstp_status *s)
1056{
1057 if (!ofproto->ofproto_class->get_rstp_status) {
1058 return EOPNOTSUPP;
1059 }
1060 ofproto->ofproto_class->get_rstp_status(ofproto, s);
1061 return 0;
1062}
1063
1064/* Configures RSTP on 'ofp_port' of 'ofproto' using the settings defined
1065 * in 's'. The caller is responsible for assigning RSTP port numbers
1066 * (using the 'port_num' member in the range of 1 through 255, inclusive)
1067 * and ensuring there are no duplicates. If the 's' is NULL, then RSTP
1068 * is disabled on the port.
1069 *
1070 * Returns 0 if successful, otherwise a positive errno value.*/
1071int
1072ofproto_port_set_rstp(struct ofproto *ofproto, ofp_port_t ofp_port,
1073 const struct ofproto_port_rstp_settings *s)
1074{
1075 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
1076 if (!ofport) {
1077 VLOG_WARN("%s: cannot configure RSTP on nonexistent port %"PRIu32,
1078 ofproto->name, ofp_port);
1079 return ENODEV;
1080 }
1081
1082 if (!ofproto->ofproto_class->set_rstp_port) {
1083 return EOPNOTSUPP;
1084 }
1085 ofproto->ofproto_class->set_rstp_port(ofport, s);
1086 return 0;
1087}
1088
1089/* Retrieves RSTP port status of 'ofp_port' on 'ofproto' and stores it in
1090 * 's'. If the 'enabled' member in 's' is false, then the other members
1091 * are not meaningful.
1092 *
1093 * Returns 0 if successful, otherwise a positive errno value.*/
1094int
1095ofproto_port_get_rstp_status(struct ofproto *ofproto, ofp_port_t ofp_port,
1096 struct ofproto_port_rstp_status *s)
1097{
1098 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
1099 if (!ofport) {
1100 VLOG_WARN_RL(&rl, "%s: cannot get RSTP status on nonexistent "
1101 "port %"PRIu32, ofproto->name, ofp_port);
1102 return ENODEV;
1103 }
1104
1105 if (!ofproto->ofproto_class->get_rstp_port_status) {
1106 return EOPNOTSUPP;
1107 }
1108 ofproto->ofproto_class->get_rstp_port_status(ofport, s);
1109 return 0;
1110}
1111\f
1112/* Queue DSCP configuration. */
1113
1114/* Registers meta-data associated with the 'n_qdscp' Qualities of Service
1115 * 'queues' attached to 'ofport'. This data is not intended to be sufficient
1116 * to implement QoS. Instead, it is used to implement features which require
1117 * knowledge of what queues exist on a port, and some basic information about
1118 * them.
1119 *
1120 * Returns 0 if successful, otherwise a positive errno value. */
1121int
1122ofproto_port_set_queues(struct ofproto *ofproto, ofp_port_t ofp_port,
1123 const struct ofproto_port_queue *queues,
1124 size_t n_queues)
1125{
1126 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
1127
1128 if (!ofport) {
1129 VLOG_WARN("%s: cannot set queues on nonexistent port %"PRIu32,
1130 ofproto->name, ofp_port);
1131 return ENODEV;
1132 }
1133
1134 return (ofproto->ofproto_class->set_queues
1135 ? ofproto->ofproto_class->set_queues(ofport, queues, n_queues)
1136 : EOPNOTSUPP);
1137}
1138\f
1139/* LLDP configuration. */
1140void
1141ofproto_port_set_lldp(struct ofproto *ofproto,
1142 ofp_port_t ofp_port,
1143 const struct smap *cfg)
1144{
1145 struct ofport *ofport;
1146 int error;
1147
1148 ofport = ofproto_get_port(ofproto, ofp_port);
1149 if (!ofport) {
1150 VLOG_WARN("%s: cannot configure LLDP on nonexistent port %"PRIu32,
1151 ofproto->name, ofp_port);
1152 return;
1153 }
1154 error = (ofproto->ofproto_class->set_lldp
1155 ? ofproto->ofproto_class->set_lldp(ofport, cfg)
1156 : EOPNOTSUPP);
1157 if (error) {
1158 VLOG_WARN("%s: lldp configuration on port %"PRIu32" (%s) failed (%s)",
1159 ofproto->name, ofp_port, netdev_get_name(ofport->netdev),
1160 ovs_strerror(error));
1161 }
1162}
1163
1164int
1165ofproto_set_aa(struct ofproto *ofproto, void *aux OVS_UNUSED,
1166 const struct aa_settings *s)
1167{
1168 if (!ofproto->ofproto_class->set_aa) {
1169 return EOPNOTSUPP;
1170 }
1171 ofproto->ofproto_class->set_aa(ofproto, s);
1172 return 0;
1173}
1174
1175int
1176ofproto_aa_mapping_register(struct ofproto *ofproto, void *aux,
1177 const struct aa_mapping_settings *s)
1178{
1179 if (!ofproto->ofproto_class->aa_mapping_set) {
1180 return EOPNOTSUPP;
1181 }
1182 ofproto->ofproto_class->aa_mapping_set(ofproto, aux, s);
1183 return 0;
1184}
1185
1186int
1187ofproto_aa_mapping_unregister(struct ofproto *ofproto, void *aux)
1188{
1189 if (!ofproto->ofproto_class->aa_mapping_unset) {
1190 return EOPNOTSUPP;
1191 }
1192 ofproto->ofproto_class->aa_mapping_unset(ofproto, aux);
1193 return 0;
1194}
1195
1196int
1197ofproto_aa_vlan_get_queued(struct ofproto *ofproto,
1198 struct ovs_list *list)
1199{
1200 if (!ofproto->ofproto_class->aa_vlan_get_queued) {
1201 return EOPNOTSUPP;
1202 }
1203 ofproto->ofproto_class->aa_vlan_get_queued(ofproto, list);
1204 return 0;
1205}
1206
1207unsigned int
1208ofproto_aa_vlan_get_queue_size(struct ofproto *ofproto)
1209{
1210 if (!ofproto->ofproto_class->aa_vlan_get_queue_size) {
1211 return EOPNOTSUPP;
1212 }
1213 return ofproto->ofproto_class->aa_vlan_get_queue_size(ofproto);
1214}
1215
1216/* Connectivity Fault Management configuration. */
1217
1218/* Clears the CFM configuration from 'ofp_port' on 'ofproto'. */
1219void
1220ofproto_port_clear_cfm(struct ofproto *ofproto, ofp_port_t ofp_port)
1221{
1222 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
1223 if (ofport && ofproto->ofproto_class->set_cfm) {
1224 ofproto->ofproto_class->set_cfm(ofport, NULL);
1225 }
1226}
1227
1228/* Configures connectivity fault management on 'ofp_port' in 'ofproto'. Takes
1229 * basic configuration from the configuration members in 'cfm', and the remote
1230 * maintenance point ID from remote_mpid. Ignores the statistics members of
1231 * 'cfm'.
1232 *
1233 * This function has no effect if 'ofproto' does not have a port 'ofp_port'. */
1234void
1235ofproto_port_set_cfm(struct ofproto *ofproto, ofp_port_t ofp_port,
1236 const struct cfm_settings *s)
1237{
1238 struct ofport *ofport;
1239 int error;
1240
1241 ofport = ofproto_get_port(ofproto, ofp_port);
1242 if (!ofport) {
1243 VLOG_WARN("%s: cannot configure CFM on nonexistent port %"PRIu32,
1244 ofproto->name, ofp_port);
1245 return;
1246 }
1247
1248 /* XXX: For configuration simplicity, we only support one remote_mpid
1249 * outside of the CFM module. It's not clear if this is the correct long
1250 * term solution or not. */
1251 error = (ofproto->ofproto_class->set_cfm
1252 ? ofproto->ofproto_class->set_cfm(ofport, s)
1253 : EOPNOTSUPP);
1254 if (error) {
1255 VLOG_WARN("%s: CFM configuration on port %"PRIu32" (%s) failed (%s)",
1256 ofproto->name, ofp_port, netdev_get_name(ofport->netdev),
1257 ovs_strerror(error));
1258 }
1259}
1260
1261/* Configures BFD on 'ofp_port' in 'ofproto'. This function has no effect if
1262 * 'ofproto' does not have a port 'ofp_port'. */
1263void
1264ofproto_port_set_bfd(struct ofproto *ofproto, ofp_port_t ofp_port,
1265 const struct smap *cfg)
1266{
1267 struct ofport *ofport;
1268 int error;
1269
1270 ofport = ofproto_get_port(ofproto, ofp_port);
1271 if (!ofport) {
1272 VLOG_WARN("%s: cannot configure bfd on nonexistent port %"PRIu32,
1273 ofproto->name, ofp_port);
1274 return;
1275 }
1276
1277 error = (ofproto->ofproto_class->set_bfd
1278 ? ofproto->ofproto_class->set_bfd(ofport, cfg)
1279 : EOPNOTSUPP);
1280 if (error) {
1281 VLOG_WARN("%s: bfd configuration on port %"PRIu32" (%s) failed (%s)",
1282 ofproto->name, ofp_port, netdev_get_name(ofport->netdev),
1283 ovs_strerror(error));
1284 }
1285}
1286
1287/* Checks the status change of BFD on 'ofport'.
1288 *
1289 * Returns true if 'ofproto_class' does not support 'bfd_status_changed'. */
1290bool
1291ofproto_port_bfd_status_changed(struct ofproto *ofproto, ofp_port_t ofp_port)
1292{
1293 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
1294 return (ofport && ofproto->ofproto_class->bfd_status_changed
1295 ? ofproto->ofproto_class->bfd_status_changed(ofport)
1296 : true);
1297}
1298
1299/* Populates 'status' with the status of BFD on 'ofport'. Returns 0 on
1300 * success. Returns a positive errno otherwise. Has no effect if 'ofp_port'
1301 * is not an OpenFlow port in 'ofproto'.
1302 *
1303 * The caller must provide and own '*status'. */
1304int
1305ofproto_port_get_bfd_status(struct ofproto *ofproto, ofp_port_t ofp_port,
1306 struct smap *status)
1307{
1308 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
1309 return (ofport && ofproto->ofproto_class->get_bfd_status
1310 ? ofproto->ofproto_class->get_bfd_status(ofport, status)
1311 : EOPNOTSUPP);
1312}
1313
1314/* Checks the status of LACP negotiation for 'ofp_port' within ofproto.
1315 * Returns 1 if LACP partner information for 'ofp_port' is up-to-date,
1316 * 0 if LACP partner information is not current (generally indicating a
1317 * connectivity problem), or -1 if LACP is not enabled on 'ofp_port'. */
1318int
1319ofproto_port_is_lacp_current(struct ofproto *ofproto, ofp_port_t ofp_port)
1320{
1321 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
1322 return (ofport && ofproto->ofproto_class->port_is_lacp_current
1323 ? ofproto->ofproto_class->port_is_lacp_current(ofport)
1324 : -1);
1325}
1326
1327int
1328ofproto_port_get_lacp_stats(const struct ofport *port, struct lacp_slave_stats *stats)
1329{
1330 struct ofproto *ofproto = port->ofproto;
1331 int error;
1332
1333 if (ofproto->ofproto_class->port_get_lacp_stats) {
1334 error = ofproto->ofproto_class->port_get_lacp_stats(port, stats);
1335 } else {
1336 error = EOPNOTSUPP;
1337 }
1338
1339 return error;
1340}
1341\f
1342/* Bundles. */
1343
1344/* Registers a "bundle" associated with client data pointer 'aux' in 'ofproto'.
1345 * A bundle is the same concept as a Port in OVSDB, that is, it consists of one
1346 * or more "slave" devices (Interfaces, in OVSDB) along with a VLAN
1347 * configuration plus, if there is more than one slave, a bonding
1348 * configuration.
1349 *
1350 * If 'aux' is already registered then this function updates its configuration
1351 * to 's'. Otherwise, this function registers a new bundle.
1352 *
1353 * Bundles only affect the NXAST_AUTOPATH action and output to the OFPP_NORMAL
1354 * port. */
1355int
1356ofproto_bundle_register(struct ofproto *ofproto, void *aux,
1357 const struct ofproto_bundle_settings *s)
1358{
1359 return (ofproto->ofproto_class->bundle_set
1360 ? ofproto->ofproto_class->bundle_set(ofproto, aux, s)
1361 : EOPNOTSUPP);
1362}
1363
1364/* Unregisters the bundle registered on 'ofproto' with auxiliary data 'aux'.
1365 * If no such bundle has been registered, this has no effect. */
1366int
1367ofproto_bundle_unregister(struct ofproto *ofproto, void *aux)
1368{
1369 return ofproto_bundle_register(ofproto, aux, NULL);
1370}
1371
1372\f
1373/* Registers a mirror associated with client data pointer 'aux' in 'ofproto'.
1374 * If 'aux' is already registered then this function updates its configuration
1375 * to 's'. Otherwise, this function registers a new mirror. */
1376int
1377ofproto_mirror_register(struct ofproto *ofproto, void *aux,
1378 const struct ofproto_mirror_settings *s)
1379{
1380 return (ofproto->ofproto_class->mirror_set
1381 ? ofproto->ofproto_class->mirror_set(ofproto, aux, s)
1382 : EOPNOTSUPP);
1383}
1384
1385/* Unregisters the mirror registered on 'ofproto' with auxiliary data 'aux'.
1386 * If no mirror has been registered, this has no effect. */
1387int
1388ofproto_mirror_unregister(struct ofproto *ofproto, void *aux)
1389{
1390 return ofproto_mirror_register(ofproto, aux, NULL);
1391}
1392
1393/* Retrieves statistics from mirror associated with client data pointer
1394 * 'aux' in 'ofproto'. Stores packet and byte counts in 'packets' and
1395 * 'bytes', respectively. If a particular counters is not supported,
1396 * the appropriate argument is set to UINT64_MAX.
1397 */
1398int
1399ofproto_mirror_get_stats(struct ofproto *ofproto, void *aux,
1400 uint64_t *packets, uint64_t *bytes)
1401{
1402 if (!ofproto->ofproto_class->mirror_get_stats) {
1403 *packets = *bytes = UINT64_MAX;
1404 return EOPNOTSUPP;
1405 }
1406
1407 return ofproto->ofproto_class->mirror_get_stats(ofproto, aux,
1408 packets, bytes);
1409}
1410
1411/* Configures the VLANs whose bits are set to 1 in 'flood_vlans' as VLANs on
1412 * which all packets are flooded, instead of using MAC learning. If
1413 * 'flood_vlans' is NULL, then MAC learning applies to all VLANs.
1414 *
1415 * Flood VLANs affect only the treatment of packets output to the OFPP_NORMAL
1416 * port. */
1417int
1418ofproto_set_flood_vlans(struct ofproto *ofproto, unsigned long *flood_vlans)
1419{
1420 return (ofproto->ofproto_class->set_flood_vlans
1421 ? ofproto->ofproto_class->set_flood_vlans(ofproto, flood_vlans)
1422 : EOPNOTSUPP);
1423}
1424
1425/* Returns true if 'aux' is a registered bundle that is currently in use as the
1426 * output for a mirror. */
1427bool
1428ofproto_is_mirror_output_bundle(const struct ofproto *ofproto, void *aux)
1429{
1430 return (ofproto->ofproto_class->is_mirror_output_bundle
1431 ? ofproto->ofproto_class->is_mirror_output_bundle(ofproto, aux)
1432 : false);
1433}
1434\f
1435/* Configuration of OpenFlow tables. */
1436
1437/* Returns the number of OpenFlow tables in 'ofproto'. */
1438int
1439ofproto_get_n_tables(const struct ofproto *ofproto)
1440{
1441 return ofproto->n_tables;
1442}
1443
1444/* Returns the number of Controller visible OpenFlow tables
1445 * in 'ofproto'. This number will exclude Hidden tables.
1446 * This funtion's return value should be less or equal to that of
1447 * ofproto_get_n_tables() . */
1448uint8_t
1449ofproto_get_n_visible_tables(const struct ofproto *ofproto)
1450{
1451 uint8_t n = ofproto->n_tables;
1452
1453 /* Count only non-hidden tables in the number of tables. (Hidden tables,
1454 * if present, are always at the end.) */
1455 while(n && (ofproto->tables[n - 1].flags & OFTABLE_HIDDEN)) {
1456 n--;
1457 }
1458
1459 return n;
1460}
1461
1462/* Configures the OpenFlow table in 'ofproto' with id 'table_id' with the
1463 * settings from 's'. 'table_id' must be in the range 0 through the number of
1464 * OpenFlow tables in 'ofproto' minus 1, inclusive.
1465 *
1466 * For read-only tables, only the name may be configured. */
1467void
1468ofproto_configure_table(struct ofproto *ofproto, int table_id,
1469 const struct ofproto_table_settings *s)
1470{
1471 struct oftable *table;
1472
1473 ovs_assert(table_id >= 0 && table_id < ofproto->n_tables);
1474 table = &ofproto->tables[table_id];
1475
1476 oftable_set_name(table, s->name);
1477
1478 if (table->flags & OFTABLE_READONLY) {
1479 return;
1480 }
1481
1482 if (classifier_set_prefix_fields(&table->cls,
1483 s->prefix_fields, s->n_prefix_fields)) {
1484 /* XXX: Trigger revalidation. */
1485 }
1486
1487 ovs_mutex_lock(&ofproto_mutex);
1488 unsigned int new_eviction = (s->enable_eviction
1489 ? table->eviction | EVICTION_CLIENT
1490 : table->eviction & ~EVICTION_CLIENT);
1491 oftable_configure_eviction(table, new_eviction, s->groups, s->n_groups);
1492 table->max_flows = s->max_flows;
1493 evict_rules_from_table(table);
1494 ovs_mutex_unlock(&ofproto_mutex);
1495}
1496\f
1497bool
1498ofproto_has_snoops(const struct ofproto *ofproto)
1499{
1500 return connmgr_has_snoops(ofproto->connmgr);
1501}
1502
1503void
1504ofproto_get_snoops(const struct ofproto *ofproto, struct sset *snoops)
1505{
1506 connmgr_get_snoops(ofproto->connmgr, snoops);
1507}
1508
1509/* Deletes 'rule' from 'ofproto'.
1510 *
1511 * Within an ofproto implementation, this function allows an ofproto
1512 * implementation to destroy any rules that remain when its ->destruct()
1513 * function is called. This function is not suitable for use elsewhere in an
1514 * ofproto implementation.
1515 *
1516 * This function implements steps 4.4 and 4.5 in the section titled "Rule Life
1517 * Cycle" in ofproto-provider.h. */
1518void
1519ofproto_rule_delete(struct ofproto *ofproto, struct rule *rule)
1520 OVS_EXCLUDED(ofproto_mutex)
1521{
1522 /* This skips the ofmonitor and flow-removed notifications because the
1523 * switch is being deleted and any OpenFlow channels have been or soon will
1524 * be killed. */
1525 ovs_mutex_lock(&ofproto_mutex);
1526
1527 if (rule->state == RULE_INSERTED) {
1528 /* Make sure there is no postponed removal of the rule. */
1529 ovs_assert(cls_rule_visible_in_version(&rule->cr, OVS_VERSION_MAX));
1530
1531 classifier_remove_assert(&rule->ofproto->tables[rule->table_id].cls,
1532 &rule->cr);
1533 ofproto_rule_remove__(rule->ofproto, rule);
1534 if (ofproto->ofproto_class->rule_delete) {
1535 ofproto->ofproto_class->rule_delete(rule);
1536 }
1537
1538 /* This may not be the last reference to the rule. */
1539 ofproto_rule_unref(rule);
1540 }
1541 ovs_mutex_unlock(&ofproto_mutex);
1542}
1543
1544static void
1545ofproto_flush__(struct ofproto *ofproto)
1546 OVS_EXCLUDED(ofproto_mutex)
1547{
1548 struct oftable *table;
1549
1550 /* This will flush all datapath flows. */
1551 if (ofproto->ofproto_class->flush) {
1552 ofproto->ofproto_class->flush(ofproto);
1553 }
1554
1555 /* XXX: There is a small race window here, where new datapath flows can be
1556 * created by upcall handlers based on the existing flow table. We can not
1557 * call ofproto class flush while holding 'ofproto_mutex' to prevent this,
1558 * as then we could deadlock on syncing with the handler threads waiting on
1559 * the same mutex. */
1560
1561 ovs_mutex_lock(&ofproto_mutex);
1562 OFPROTO_FOR_EACH_TABLE (table, ofproto) {
1563 struct rule_collection rules;
1564 struct rule *rule;
1565
1566 if (table->flags & OFTABLE_HIDDEN) {
1567 continue;
1568 }
1569
1570 rule_collection_init(&rules);
1571
1572 CLS_FOR_EACH (rule, cr, &table->cls) {
1573 rule_collection_add(&rules, rule);
1574 }
1575 delete_flows__(&rules, OFPRR_DELETE, NULL);
1576 }
1577 ofproto_group_delete_all__(ofproto);
1578 meter_delete_all(ofproto);
1579 /* XXX: Concurrent handler threads may insert new learned flows based on
1580 * learn actions of the now deleted flows right after we release
1581 * 'ofproto_mutex'. */
1582 ovs_mutex_unlock(&ofproto_mutex);
1583}
1584
1585static void
1586ofproto_destroy__(struct ofproto *ofproto)
1587 OVS_EXCLUDED(ofproto_mutex)
1588{
1589 struct oftable *table;
1590
1591 cmap_destroy(&ofproto->groups);
1592
1593 ovs_mutex_lock(&ofproto_mutex);
1594 hmap_remove(&all_ofprotos, &ofproto->hmap_node);
1595 ovs_mutex_unlock(&ofproto_mutex);
1596
1597 free(ofproto->name);
1598 free(ofproto->type);
1599 free(ofproto->mfr_desc);
1600 free(ofproto->hw_desc);
1601 free(ofproto->sw_desc);
1602 free(ofproto->serial_desc);
1603 free(ofproto->dp_desc);
1604 hmap_destroy(&ofproto->ports);
1605 hmap_destroy(&ofproto->ofport_usage);
1606 shash_destroy(&ofproto->port_by_name);
1607 simap_destroy(&ofproto->ofp_requests);
1608
1609 OFPROTO_FOR_EACH_TABLE (table, ofproto) {
1610 oftable_destroy(table);
1611 }
1612 free(ofproto->tables);
1613
1614 hmap_destroy(&ofproto->meters);
1615
1616 ovs_mutex_lock(&ofproto->vl_mff_map.mutex);
1617 mf_vl_mff_map_clear(&ofproto->vl_mff_map, true);
1618 ovs_mutex_unlock(&ofproto->vl_mff_map.mutex);
1619 cmap_destroy(&ofproto->vl_mff_map.cmap);
1620 ovs_mutex_destroy(&ofproto->vl_mff_map.mutex);
1621 tun_metadata_free(ovsrcu_get_protected(struct tun_table *,
1622 &ofproto->metadata_tab));
1623
1624 ovs_assert(hindex_is_empty(&ofproto->cookies));
1625 hindex_destroy(&ofproto->cookies);
1626
1627 ovs_assert(hmap_is_empty(&ofproto->learned_cookies));
1628 hmap_destroy(&ofproto->learned_cookies);
1629
1630 ofproto->ofproto_class->dealloc(ofproto);
1631}
1632
1633/* Destroying rules is doubly deferred, must have 'ofproto' around for them.
1634 * - 1st we defer the removal of the rules from the classifier
1635 * - 2nd we defer the actual destruction of the rules. */
1636static void
1637ofproto_destroy_defer__(struct ofproto *ofproto)
1638 OVS_EXCLUDED(ofproto_mutex)
1639{
1640 ovsrcu_postpone(ofproto_destroy__, ofproto);
1641}
1642
1643void
1644ofproto_destroy(struct ofproto *p, bool del)
1645 OVS_EXCLUDED(ofproto_mutex)
1646{
1647 struct ofport *ofport, *next_ofport;
1648 struct ofport_usage *usage;
1649
1650 if (!p) {
1651 return;
1652 }
1653
1654 ofproto_flush__(p);
1655 HMAP_FOR_EACH_SAFE (ofport, next_ofport, hmap_node, &p->ports) {
1656 ofport_destroy(ofport, del);
1657 }
1658
1659 HMAP_FOR_EACH_POP (usage, hmap_node, &p->ofport_usage) {
1660 free(usage);
1661 }
1662
1663 p->ofproto_class->destruct(p, del);
1664
1665 /* We should not postpone this because it involves deleting a listening
1666 * socket which we may want to reopen soon. 'connmgr' may be used by other
1667 * threads only if they take the ofproto_mutex and read a non-NULL
1668 * 'ofproto->connmgr'. */
1669 ovs_mutex_lock(&ofproto_mutex);
1670 connmgr_destroy(p->connmgr);
1671 p->connmgr = NULL;
1672 ovs_mutex_unlock(&ofproto_mutex);
1673
1674 /* Destroying rules is deferred, must have 'ofproto' around for them. */
1675 ovsrcu_postpone(ofproto_destroy_defer__, p);
1676}
1677
1678/* Destroys the datapath with the respective 'name' and 'type'. With the Linux
1679 * kernel datapath, for example, this destroys the datapath in the kernel, and
1680 * with the netdev-based datapath, it tears down the data structures that
1681 * represent the datapath.
1682 *
1683 * The datapath should not be currently open as an ofproto. */
1684int
1685ofproto_delete(const char *name, const char *type)
1686{
1687 const struct ofproto_class *class = ofproto_class_find__(type);
1688 return (!class ? EAFNOSUPPORT
1689 : !class->del ? EACCES
1690 : class->del(type, name));
1691}
1692
1693static void
1694process_port_change(struct ofproto *ofproto, int error, char *devname)
1695{
1696 if (error == ENOBUFS) {
1697 reinit_ports(ofproto);
1698 } else if (!error) {
1699 update_port(ofproto, devname);
1700 free(devname);
1701 }
1702}
1703
1704int
1705ofproto_type_run(const char *datapath_type)
1706{
1707 const struct ofproto_class *class;
1708 int error;
1709
1710 datapath_type = ofproto_normalize_type(datapath_type);
1711 class = ofproto_class_find__(datapath_type);
1712
1713 error = class->type_run ? class->type_run(datapath_type) : 0;
1714 if (error && error != EAGAIN) {
1715 VLOG_ERR_RL(&rl, "%s: type_run failed (%s)",
1716 datapath_type, ovs_strerror(error));
1717 }
1718 return error;
1719}
1720
1721void
1722ofproto_type_wait(const char *datapath_type)
1723{
1724 const struct ofproto_class *class;
1725
1726 datapath_type = ofproto_normalize_type(datapath_type);
1727 class = ofproto_class_find__(datapath_type);
1728
1729 if (class->type_wait) {
1730 class->type_wait(datapath_type);
1731 }
1732}
1733
1734int
1735ofproto_run(struct ofproto *p)
1736{
1737 int error;
1738 uint64_t new_seq;
1739
1740 error = p->ofproto_class->run(p);
1741 if (error && error != EAGAIN) {
1742 VLOG_ERR_RL(&rl, "%s: run failed (%s)", p->name, ovs_strerror(error));
1743 }
1744
1745 /* Restore the eviction group heap invariant occasionally. */
1746 if (p->eviction_group_timer < time_msec()) {
1747 size_t i;
1748
1749 p->eviction_group_timer = time_msec() + 1000;
1750
1751 for (i = 0; i < p->n_tables; i++) {
1752 struct oftable *table = &p->tables[i];
1753 struct eviction_group *evg;
1754 struct rule *rule;
1755
1756 if (!table->eviction) {
1757 continue;
1758 }
1759
1760 if (table->n_flows > 100000) {
1761 static struct vlog_rate_limit count_rl =
1762 VLOG_RATE_LIMIT_INIT(1, 1);
1763 VLOG_WARN_RL(&count_rl, "Table %"PRIuSIZE" has an excessive"
1764 " number of rules: %d", i, table->n_flows);
1765 }
1766
1767 ovs_mutex_lock(&ofproto_mutex);
1768 CLS_FOR_EACH (rule, cr, &table->cls) {
1769 if (rule->idle_timeout || rule->hard_timeout) {
1770 if (!rule->eviction_group) {
1771 eviction_group_add_rule(rule);
1772 } else {
1773 heap_raw_change(&rule->evg_node,
1774 rule_eviction_priority(p, rule));
1775 }
1776 }
1777 }
1778
1779 HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) {
1780 heap_rebuild(&evg->rules);
1781 }
1782 ovs_mutex_unlock(&ofproto_mutex);
1783 }
1784 }
1785
1786 if (p->ofproto_class->port_poll) {
1787 char *devname;
1788
1789 while ((error = p->ofproto_class->port_poll(p, &devname)) != EAGAIN) {
1790 process_port_change(p, error, devname);
1791 }
1792 }
1793
1794 new_seq = seq_read(connectivity_seq_get());
1795 if (new_seq != p->change_seq) {
1796 struct sset devnames;
1797 const char *devname;
1798 struct ofport *ofport;
1799
1800 /* Update OpenFlow port status for any port whose netdev has changed.
1801 *
1802 * Refreshing a given 'ofport' can cause an arbitrary ofport to be
1803 * destroyed, so it's not safe to update ports directly from the
1804 * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we
1805 * need this two-phase approach. */
1806 sset_init(&devnames);
1807 HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
1808 uint64_t port_change_seq;
1809
1810 port_change_seq = netdev_get_change_seq(ofport->netdev);
1811 if (ofport->change_seq != port_change_seq) {
1812 ofport->change_seq = port_change_seq;
1813 sset_add(&devnames, netdev_get_name(ofport->netdev));
1814 }
1815 }
1816 SSET_FOR_EACH (devname, &devnames) {
1817 update_port(p, devname);
1818 }
1819 sset_destroy(&devnames);
1820
1821 p->change_seq = new_seq;
1822 }
1823
1824 connmgr_run(p->connmgr, handle_openflow);
1825
1826 return error;
1827}
1828
1829void
1830ofproto_wait(struct ofproto *p)
1831{
1832 p->ofproto_class->wait(p);
1833 if (p->ofproto_class->port_poll_wait) {
1834 p->ofproto_class->port_poll_wait(p);
1835 }
1836 seq_wait(connectivity_seq_get(), p->change_seq);
1837 connmgr_wait(p->connmgr);
1838}
1839
1840bool
1841ofproto_is_alive(const struct ofproto *p)
1842{
1843 return connmgr_has_controllers(p->connmgr);
1844}
1845
1846/* Adds some memory usage statistics for 'ofproto' into 'usage', for use with
1847 * memory_report(). */
1848void
1849ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage)
1850{
1851 const struct oftable *table;
1852 unsigned int n_rules;
1853
1854 simap_increase(usage, "ports", hmap_count(&ofproto->ports));
1855
1856 n_rules = 0;
1857 OFPROTO_FOR_EACH_TABLE (table, ofproto) {
1858 n_rules += table->n_flows;
1859 }
1860 simap_increase(usage, "rules", n_rules);
1861
1862 if (ofproto->ofproto_class->get_memory_usage) {
1863 ofproto->ofproto_class->get_memory_usage(ofproto, usage);
1864 }
1865
1866 connmgr_get_memory_usage(ofproto->connmgr, usage);
1867}
1868
1869void
1870ofproto_type_get_memory_usage(const char *datapath_type, struct simap *usage)
1871{
1872 const struct ofproto_class *class;
1873
1874 datapath_type = ofproto_normalize_type(datapath_type);
1875 class = ofproto_class_find__(datapath_type);
1876
1877 if (class && class->type_get_memory_usage) {
1878 class->type_get_memory_usage(datapath_type, usage);
1879 }
1880}
1881
1882void
1883ofproto_get_ofproto_controller_info(const struct ofproto *ofproto,
1884 struct shash *info)
1885{
1886 connmgr_get_controller_info(ofproto->connmgr, info);
1887}
1888
1889void
1890ofproto_free_ofproto_controller_info(struct shash *info)
1891{
1892 connmgr_free_controller_info(info);
1893}
1894
1895/* Makes a deep copy of 'old' into 'port'. */
1896void
1897ofproto_port_clone(struct ofproto_port *port, const struct ofproto_port *old)
1898{
1899 port->name = xstrdup(old->name);
1900 port->type = xstrdup(old->type);
1901 port->ofp_port = old->ofp_port;
1902}
1903
1904/* Frees memory allocated to members of 'ofproto_port'.
1905 *
1906 * Do not call this function on an ofproto_port obtained from
1907 * ofproto_port_dump_next(): that function retains ownership of the data in the
1908 * ofproto_port. */
1909void
1910ofproto_port_destroy(struct ofproto_port *ofproto_port)
1911{
1912 free(ofproto_port->name);
1913 free(ofproto_port->type);
1914}
1915
1916/* Initializes 'dump' to begin dumping the ports in an ofproto.
1917 *
1918 * This function provides no status indication. An error status for the entire
1919 * dump operation is provided when it is completed by calling
1920 * ofproto_port_dump_done().
1921 */
1922void
1923ofproto_port_dump_start(struct ofproto_port_dump *dump,
1924 const struct ofproto *ofproto)
1925{
1926 dump->ofproto = ofproto;
1927 dump->error = ofproto->ofproto_class->port_dump_start(ofproto,
1928 &dump->state);
1929}
1930
1931/* Attempts to retrieve another port from 'dump', which must have been created
1932 * with ofproto_port_dump_start(). On success, stores a new ofproto_port into
1933 * 'port' and returns true. On failure, returns false.
1934 *
1935 * Failure might indicate an actual error or merely that the last port has been
1936 * dumped. An error status for the entire dump operation is provided when it
1937 * is completed by calling ofproto_port_dump_done().
1938 *
1939 * The ofproto owns the data stored in 'port'. It will remain valid until at
1940 * least the next time 'dump' is passed to ofproto_port_dump_next() or
1941 * ofproto_port_dump_done(). */
1942bool
1943ofproto_port_dump_next(struct ofproto_port_dump *dump,
1944 struct ofproto_port *port)
1945{
1946 const struct ofproto *ofproto = dump->ofproto;
1947
1948 if (dump->error) {
1949 return false;
1950 }
1951
1952 dump->error = ofproto->ofproto_class->port_dump_next(ofproto, dump->state,
1953 port);
1954 if (dump->error) {
1955 ofproto->ofproto_class->port_dump_done(ofproto, dump->state);
1956 return false;
1957 }
1958 return true;
1959}
1960
1961/* Completes port table dump operation 'dump', which must have been created
1962 * with ofproto_port_dump_start(). Returns 0 if the dump operation was
1963 * error-free, otherwise a positive errno value describing the problem. */
1964int
1965ofproto_port_dump_done(struct ofproto_port_dump *dump)
1966{
1967 const struct ofproto *ofproto = dump->ofproto;
1968 if (!dump->error) {
1969 dump->error = ofproto->ofproto_class->port_dump_done(ofproto,
1970 dump->state);
1971 }
1972 return dump->error == EOF ? 0 : dump->error;
1973}
1974
1975/* Returns the type to pass to netdev_open() when 'ofproto' has a port of type
1976 * 'port_type', for a few special cases when a netdev type differs from a port
1977 * type. For example, when using the userspace datapath, a port of type
1978 * "internal" needs to be opened as "tap".
1979 *
1980 * Returns either 'type' itself or a string literal, which must not be
1981 * freed. */
1982const char *
1983ofproto_port_open_type(const struct ofproto *ofproto, const char *port_type)
1984{
1985 return (ofproto->ofproto_class->port_open_type
1986 ? ofproto->ofproto_class->port_open_type(ofproto->type, port_type)
1987 : port_type);
1988}
1989
1990/* Attempts to add 'netdev' as a port on 'ofproto'. If 'ofp_portp' is
1991 * non-null and '*ofp_portp' is not OFPP_NONE, attempts to use that as
1992 * the port's OpenFlow port number.
1993 *
1994 * If successful, returns 0 and sets '*ofp_portp' to the new port's
1995 * OpenFlow port number (if 'ofp_portp' is non-null). On failure,
1996 * returns a positive errno value and sets '*ofp_portp' to OFPP_NONE (if
1997 * 'ofp_portp' is non-null). */
1998int
1999ofproto_port_add(struct ofproto *ofproto, struct netdev *netdev,
2000 ofp_port_t *ofp_portp)
2001{
2002 ofp_port_t ofp_port = ofp_portp ? *ofp_portp : OFPP_NONE;
2003 int error;
2004
2005 error = ofproto->ofproto_class->port_add(ofproto, netdev);
2006 if (!error) {
2007 const char *netdev_name = netdev_get_name(netdev);
2008
2009 simap_put(&ofproto->ofp_requests, netdev_name,
2010 ofp_to_u16(ofp_port));
2011 error = update_port(ofproto, netdev_name);
2012 }
2013 if (ofp_portp) {
2014 *ofp_portp = OFPP_NONE;
2015 if (!error) {
2016 struct ofproto_port ofproto_port;
2017
2018 error = ofproto_port_query_by_name(ofproto,
2019 netdev_get_name(netdev),
2020 &ofproto_port);
2021 if (!error) {
2022 *ofp_portp = ofproto_port.ofp_port;
2023 ofproto_port_destroy(&ofproto_port);
2024 }
2025 }
2026 }
2027 return error;
2028}
2029
2030/* Looks up a port named 'devname' in 'ofproto'. On success, returns 0 and
2031 * initializes '*port' appropriately; on failure, returns a positive errno
2032 * value.
2033 *
2034 * The caller owns the data in 'ofproto_port' and must free it with
2035 * ofproto_port_destroy() when it is no longer needed. */
2036int
2037ofproto_port_query_by_name(const struct ofproto *ofproto, const char *devname,
2038 struct ofproto_port *port)
2039{
2040 int error;
2041
2042 error = ofproto->ofproto_class->port_query_by_name(ofproto, devname, port);
2043 if (error) {
2044 memset(port, 0, sizeof *port);
2045 }
2046 return error;
2047}
2048
2049/* Deletes port number 'ofp_port' from the datapath for 'ofproto'.
2050 * Returns 0 if successful, otherwise a positive errno. */
2051int
2052ofproto_port_del(struct ofproto *ofproto, ofp_port_t ofp_port)
2053{
2054 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
2055 const char *name = ofport ? netdev_get_name(ofport->netdev) : "<unknown>";
2056 struct simap_node *ofp_request_node;
2057 int error;
2058
2059 ofp_request_node = simap_find(&ofproto->ofp_requests, name);
2060 if (ofp_request_node) {
2061 simap_delete(&ofproto->ofp_requests, ofp_request_node);
2062 }
2063
2064 error = ofproto->ofproto_class->port_del(ofproto, ofp_port);
2065 if (!error && ofport) {
2066 /* 'name' is the netdev's name and update_port() is going to close the
2067 * netdev. Just in case update_port() refers to 'name' after it
2068 * destroys 'ofport', make a copy of it around the update_port()
2069 * call. */
2070 char *devname = xstrdup(name);
2071 update_port(ofproto, devname);
2072 free(devname);
2073 }
2074 return error;
2075}
2076
2077/* Refreshes datapath configuration of port number 'ofp_port' in 'ofproto'.
2078 *
2079 * This function has no effect if 'ofproto' does not have a port 'ofp_port'. */
2080void
2081ofproto_port_set_config(struct ofproto *ofproto, ofp_port_t ofp_port,
2082 const struct smap *cfg)
2083{
2084 struct ofport *ofport;
2085 int error;
2086
2087 ofport = ofproto_get_port(ofproto, ofp_port);
2088 if (!ofport) {
2089 VLOG_WARN("%s: cannot configure datapath on nonexistent port %"PRIu32,
2090 ofproto->name, ofp_port);
2091 return;
2092 }
2093
2094 error = (ofproto->ofproto_class->port_set_config
2095 ? ofproto->ofproto_class->port_set_config(ofport, cfg)
2096 : EOPNOTSUPP);
2097 if (error) {
2098 VLOG_WARN("%s: datapath configuration on port %"PRIu32
2099 " (%s) failed (%s)",
2100 ofproto->name, ofp_port, netdev_get_name(ofport->netdev),
2101 ovs_strerror(error));
2102 }
2103}
2104
2105
2106static void
2107flow_mod_init(struct ofputil_flow_mod *fm,
2108 const struct match *match, int priority,
2109 const struct ofpact *ofpacts, size_t ofpacts_len,
2110 enum ofp_flow_mod_command command)
2111{
2112 *fm = (struct ofputil_flow_mod) {
2113 .priority = priority,
2114 .table_id = 0,
2115 .command = command,
2116 .buffer_id = UINT32_MAX,
2117 .out_port = OFPP_ANY,
2118 .out_group = OFPG_ANY,
2119 .ofpacts = CONST_CAST(struct ofpact *, ofpacts),
2120 .ofpacts_len = ofpacts_len,
2121 };
2122 minimatch_init(&fm->match, match);
2123}
2124
2125static int
2126simple_flow_mod(struct ofproto *ofproto,
2127 const struct match *match, int priority,
2128 const struct ofpact *ofpacts, size_t ofpacts_len,
2129 enum ofp_flow_mod_command command)
2130{
2131 struct ofputil_flow_mod fm;
2132 flow_mod_init(&fm, match, priority, ofpacts, ofpacts_len, command);
2133 enum ofperr error = handle_flow_mod__(ofproto, &fm, NULL);
2134 minimatch_destroy(&fm.match);
2135 return error;
2136}
2137
2138/* Adds a flow to OpenFlow flow table 0 in 'p' that matches 'cls_rule' and
2139 * performs the 'n_actions' actions in 'actions'. The new flow will not
2140 * timeout.
2141 *
2142 * If cls_rule->priority is in the range of priorities supported by OpenFlow
2143 * (0...65535, inclusive) then the flow will be visible to OpenFlow
2144 * controllers; otherwise, it will be hidden.
2145 *
2146 * The caller retains ownership of 'cls_rule' and 'ofpacts'.
2147 *
2148 * This is a helper function for in-band control and fail-open. */
2149void
2150ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
2151 int priority,
2152 const struct ofpact *ofpacts, size_t ofpacts_len)
2153 OVS_EXCLUDED(ofproto_mutex)
2154{
2155 const struct rule *rule;
2156 bool must_add;
2157
2158 /* First do a cheap check whether the rule we're looking for already exists
2159 * with the actions that we want. If it does, then we're done. */
2160 rule = rule_from_cls_rule(classifier_find_match_exactly(
2161 &ofproto->tables[0].cls, match, priority,
2162 OVS_VERSION_MAX));
2163 if (rule) {
2164 const struct rule_actions *actions = rule_get_actions(rule);
2165 must_add = !ofpacts_equal(actions->ofpacts, actions->ofpacts_len,
2166 ofpacts, ofpacts_len);
2167 } else {
2168 must_add = true;
2169 }
2170
2171 /* If there's no such rule or the rule doesn't have the actions we want,
2172 * fall back to a executing a full flow mod. We can't optimize this at
2173 * all because we didn't take enough locks above to ensure that the flow
2174 * table didn't already change beneath us. */
2175 if (must_add) {
2176 simple_flow_mod(ofproto, match, priority, ofpacts, ofpacts_len,
2177 OFPFC_MODIFY_STRICT);
2178 }
2179}
2180
2181/* Executes the flow modification specified in 'fm'. Returns 0 on success, or
2182 * an OFPERR_* OpenFlow error code on failure.
2183 *
2184 * This is a helper function for in-band control and fail-open. */
2185enum ofperr
2186ofproto_flow_mod(struct ofproto *ofproto, const struct ofputil_flow_mod *fm)
2187 OVS_EXCLUDED(ofproto_mutex)
2188{
2189 return handle_flow_mod__(ofproto, fm, NULL);
2190}
2191
2192/* Searches for a rule with matching criteria exactly equal to 'target' in
2193 * ofproto's table 0 and, if it finds one, deletes it.
2194 *
2195 * This is a helper function for in-band control and fail-open. */
2196void
2197ofproto_delete_flow(struct ofproto *ofproto,
2198 const struct match *target, int priority)
2199 OVS_REQUIRES(ofproto_mutex)
2200{
2201 struct classifier *cls = &ofproto->tables[0].cls;
2202 struct rule *rule;
2203
2204 /* First do a cheap check whether the rule we're looking for has already
2205 * been deleted. If so, then we're done. */
2206 rule = rule_from_cls_rule(classifier_find_match_exactly(
2207 cls, target, priority, OVS_VERSION_MAX));
2208 if (!rule) {
2209 return;
2210 }
2211
2212 struct rule_collection rules;
2213
2214 rule_collection_init(&rules);
2215 rule_collection_add(&rules, rule);
2216 delete_flows__(&rules, OFPRR_DELETE, NULL);
2217 rule_collection_destroy(&rules);
2218}
2219
2220/* Delete all of the flows from all of ofproto's flow tables, then reintroduce
2221 * the flows required by in-band control and fail-open. */
2222void
2223ofproto_flush_flows(struct ofproto *ofproto)
2224{
2225 COVERAGE_INC(ofproto_flush);
2226 ofproto_flush__(ofproto);
2227 connmgr_flushed(ofproto->connmgr);
2228}
2229\f
2230static void
2231reinit_ports(struct ofproto *p)
2232{
2233 struct ofproto_port_dump dump;
2234 struct sset devnames;
2235 struct ofport *ofport;
2236 struct ofproto_port ofproto_port;
2237 const char *devname;
2238
2239 COVERAGE_INC(ofproto_reinit_ports);
2240
2241 sset_init(&devnames);
2242 HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
2243 sset_add(&devnames, netdev_get_name(ofport->netdev));
2244 }
2245 OFPROTO_PORT_FOR_EACH (&ofproto_port, &dump, p) {
2246 sset_add(&devnames, ofproto_port.name);
2247 }
2248
2249 SSET_FOR_EACH (devname, &devnames) {
2250 update_port(p, devname);
2251 }
2252 sset_destroy(&devnames);
2253}
2254
2255static ofp_port_t
2256alloc_ofp_port(struct ofproto *ofproto, const char *netdev_name)
2257{
2258 uint16_t port_idx;
2259
2260 port_idx = simap_get(&ofproto->ofp_requests, netdev_name);
2261 port_idx = port_idx ? port_idx : UINT16_MAX;
2262
2263 if (port_idx >= ofproto->max_ports
2264 || ofport_get_usage(ofproto, u16_to_ofp(port_idx)) == LLONG_MAX) {
2265 uint16_t lru_ofport = 0, end_port_no = ofproto->alloc_port_no;
2266 long long int last_used_at, lru = LLONG_MAX;
2267
2268 /* Search for a free OpenFlow port number. We try not to
2269 * immediately reuse them to prevent problems due to old
2270 * flows.
2271 *
2272 * We limit the automatically assigned port numbers to the lower half
2273 * of the port range, to reserve the upper half for assignment by
2274 * controllers. */
2275 for (;;) {
2276 if (++ofproto->alloc_port_no >= MIN(ofproto->max_ports, 32768)) {
2277 ofproto->alloc_port_no = 1;
2278 }
2279 last_used_at = ofport_get_usage(ofproto,
2280 u16_to_ofp(ofproto->alloc_port_no));
2281 if (!last_used_at) {
2282 port_idx = ofproto->alloc_port_no;
2283 break;
2284 } else if ( last_used_at < time_msec() - 60*60*1000) {
2285 /* If the port with ofport 'ofproto->alloc_port_no' was deleted
2286 * more than an hour ago, consider it usable. */
2287 ofport_remove_usage(ofproto,
2288 u16_to_ofp(ofproto->alloc_port_no));
2289 port_idx = ofproto->alloc_port_no;
2290 break;
2291 } else if (last_used_at < lru) {
2292 lru = last_used_at;
2293 lru_ofport = ofproto->alloc_port_no;
2294 }
2295
2296 if (ofproto->alloc_port_no == end_port_no) {
2297 if (lru_ofport) {
2298 port_idx = lru_ofport;
2299 break;
2300 }
2301 return OFPP_NONE;
2302 }
2303 }
2304 }
2305 ofport_set_usage(ofproto, u16_to_ofp(port_idx), LLONG_MAX);
2306 return u16_to_ofp(port_idx);
2307}
2308
2309static void
2310dealloc_ofp_port(struct ofproto *ofproto, ofp_port_t ofp_port)
2311{
2312 if (ofp_to_u16(ofp_port) < ofproto->max_ports) {
2313 ofport_set_usage(ofproto, ofp_port, time_msec());
2314 }
2315}
2316
2317/* Opens and returns a netdev for 'ofproto_port' in 'ofproto', or a null
2318 * pointer if the netdev cannot be opened. On success, also fills in
2319 * '*pp'. */
2320static struct netdev *
2321ofport_open(struct ofproto *ofproto,
2322 struct ofproto_port *ofproto_port,
2323 struct ofputil_phy_port *pp)
2324{
2325 enum netdev_flags flags;
2326 struct netdev *netdev;
2327 int error;
2328
2329 error = netdev_open(ofproto_port->name, ofproto_port->type, &netdev);
2330 if (error) {
2331 VLOG_WARN_RL(&rl, "%s: ignoring port %s (%"PRIu32") because netdev %s "
2332 "cannot be opened (%s)",
2333 ofproto->name,
2334 ofproto_port->name, ofproto_port->ofp_port,
2335 ofproto_port->name, ovs_strerror(error));
2336 return NULL;
2337 }
2338
2339 if (ofproto_port->ofp_port == OFPP_NONE) {
2340 if (!strcmp(ofproto->name, ofproto_port->name)) {
2341 ofproto_port->ofp_port = OFPP_LOCAL;
2342 } else {
2343 ofproto_port->ofp_port = alloc_ofp_port(ofproto,
2344 ofproto_port->name);
2345 }
2346 }
2347 pp->port_no = ofproto_port->ofp_port;
2348 netdev_get_etheraddr(netdev, &pp->hw_addr);
2349 pp->hw_addr64 = eth_addr64_zero;
2350 ovs_strlcpy(pp->name, ofproto_port->name, sizeof pp->name);
2351 netdev_get_flags(netdev, &flags);
2352 pp->config = flags & NETDEV_UP ? 0 : OFPUTIL_PC_PORT_DOWN;
2353 pp->state = netdev_get_carrier(netdev) ? 0 : OFPUTIL_PS_LINK_DOWN;
2354 netdev_get_features(netdev, &pp->curr, &pp->advertised,
2355 &pp->supported, &pp->peer);
2356 pp->curr_speed = netdev_features_to_bps(pp->curr, 0) / 1000;
2357 pp->max_speed = netdev_features_to_bps(pp->supported, 0) / 1000;
2358
2359 return netdev;
2360}
2361
2362/* Returns true if most fields of 'a' and 'b' are equal. Differences in name
2363 * and port number are disregarded. */
2364static bool
2365ofport_equal(const struct ofputil_phy_port *a,
2366 const struct ofputil_phy_port *b)
2367{
2368 return (eth_addr_equals(a->hw_addr, b->hw_addr)
2369 && eth_addr64_equals(a->hw_addr64, b->hw_addr64)
2370 && a->state == b->state
2371 && a->config == b->config
2372 && a->curr == b->curr
2373 && a->advertised == b->advertised
2374 && a->supported == b->supported
2375 && a->peer == b->peer
2376 && a->curr_speed == b->curr_speed
2377 && a->max_speed == b->max_speed);
2378}
2379
2380/* Adds an ofport to 'p' initialized based on the given 'netdev' and 'opp'.
2381 * The caller must ensure that 'p' does not have a conflicting ofport (that is,
2382 * one with the same name or port number). */
2383static int
2384ofport_install(struct ofproto *p,
2385 struct netdev *netdev, const struct ofputil_phy_port *pp)
2386{
2387 const char *netdev_name = netdev_get_name(netdev);
2388 struct ofport *ofport;
2389 int error;
2390
2391 /* Create ofport. */
2392 ofport = p->ofproto_class->port_alloc();
2393 if (!ofport) {
2394 error = ENOMEM;
2395 goto error;
2396 }
2397 ofport->ofproto = p;
2398 ofport->netdev = netdev;
2399 ofport->change_seq = netdev_get_change_seq(netdev);
2400 ofport->pp = *pp;
2401 ofport->ofp_port = pp->port_no;
2402 ofport->created = time_msec();
2403 ofport->may_enable = false;
2404
2405 /* Add port to 'p'. */
2406 hmap_insert(&p->ports, &ofport->hmap_node,
2407 hash_ofp_port(ofport->ofp_port));
2408 shash_add(&p->port_by_name, netdev_name, ofport);
2409
2410 update_mtu(p, ofport);
2411
2412 /* Let the ofproto_class initialize its private data. */
2413 error = p->ofproto_class->port_construct(ofport);
2414 if (error) {
2415 goto error;
2416 }
2417 connmgr_send_port_status(p->connmgr, NULL, NULL, pp, OFPPR_ADD);
2418 return 0;
2419
2420error:
2421 VLOG_WARN_RL(&rl, "%s: could not add port %s (%s)",
2422 p->name, netdev_name, ovs_strerror(error));
2423 if (ofport) {
2424 ofport_destroy__(ofport);
2425 } else {
2426 netdev_close(netdev);
2427 }
2428 return error;
2429}
2430
2431/* Removes 'ofport' from 'p' and destroys it. */
2432static void
2433ofport_remove(struct ofport *ofport)
2434{
2435 struct ofproto *p = ofport->ofproto;
2436 bool is_mtu_overridden = ofport_is_mtu_overridden(p, ofport);
2437
2438 connmgr_send_port_status(ofport->ofproto->connmgr, NULL, NULL, &ofport->pp,
2439 OFPPR_DELETE);
2440 ofport_destroy(ofport, true);
2441 if (!is_mtu_overridden) {
2442 update_mtu_ofproto(p);
2443 }
2444}
2445
2446/* If 'ofproto' contains an ofport named 'name', removes it from 'ofproto' and
2447 * destroys it. */
2448static void
2449ofport_remove_with_name(struct ofproto *ofproto, const char *name)
2450{
2451 struct ofport *port = shash_find_data(&ofproto->port_by_name, name);
2452 if (port) {
2453 ofport_remove(port);
2454 }
2455}
2456
2457static enum ofputil_port_state
2458normalize_state(enum ofputil_port_config config,
2459 enum ofputil_port_state state,
2460 bool may_enable)
2461{
2462 return (config & OFPUTIL_PC_PORT_DOWN
2463 || state & OFPUTIL_PS_LINK_DOWN
2464 || !may_enable
2465 ? state & ~OFPUTIL_PS_LIVE
2466 : state | OFPUTIL_PS_LIVE);
2467}
2468
2469void
2470ofproto_port_set_enable(struct ofport *port, bool enable)
2471{
2472 if (enable != port->may_enable) {
2473 port->may_enable = enable;
2474 ofproto_port_set_state(port, normalize_state(port->pp.config,
2475 port->pp.state,
2476 port->may_enable));
2477 }
2478}
2479
2480/* Update OpenFlow 'state' in 'port' and notify controller. */
2481void
2482ofproto_port_set_state(struct ofport *port, enum ofputil_port_state state)
2483{
2484 state = normalize_state(port->pp.config, state, port->may_enable);
2485 if (port->pp.state != state) {
2486 struct ofputil_phy_port old_pp = port->pp;
2487 port->pp.state = state;
2488 connmgr_send_port_status(port->ofproto->connmgr, NULL, &old_pp,
2489 &port->pp, OFPPR_MODIFY);
2490 }
2491}
2492
2493void
2494ofproto_port_unregister(struct ofproto *ofproto, ofp_port_t ofp_port)
2495{
2496 struct ofport *port = ofproto_get_port(ofproto, ofp_port);
2497 if (port) {
2498 if (port->ofproto->ofproto_class->set_stp_port) {
2499 port->ofproto->ofproto_class->set_stp_port(port, NULL);
2500 }
2501 if (port->ofproto->ofproto_class->set_rstp_port) {
2502 port->ofproto->ofproto_class->set_rstp_port(port, NULL);
2503 }
2504 if (port->ofproto->ofproto_class->set_cfm) {
2505 port->ofproto->ofproto_class->set_cfm(port, NULL);
2506 }
2507 if (port->ofproto->ofproto_class->bundle_remove) {
2508 port->ofproto->ofproto_class->bundle_remove(port);
2509 }
2510 }
2511}
2512
2513static void
2514ofport_destroy__(struct ofport *port)
2515{
2516 struct ofproto *ofproto = port->ofproto;
2517 const char *name = netdev_get_name(port->netdev);
2518
2519 hmap_remove(&ofproto->ports, &port->hmap_node);
2520 shash_find_and_delete(&ofproto->port_by_name, name);
2521
2522 netdev_close(port->netdev);
2523 ofproto->ofproto_class->port_dealloc(port);
2524}
2525
2526static void
2527ofport_destroy(struct ofport *port, bool del)
2528{
2529 if (port) {
2530 dealloc_ofp_port(port->ofproto, port->ofp_port);
2531 port->ofproto->ofproto_class->port_destruct(port, del);
2532 ofport_destroy__(port);
2533 }
2534}
2535
2536struct ofport *
2537ofproto_get_port(const struct ofproto *ofproto, ofp_port_t ofp_port)
2538{
2539 struct ofport *port;
2540
2541 HMAP_FOR_EACH_IN_BUCKET (port, hmap_node, hash_ofp_port(ofp_port),
2542 &ofproto->ports) {
2543 if (port->ofp_port == ofp_port) {
2544 return port;
2545 }
2546 }
2547 return NULL;
2548}
2549
2550static long long int
2551ofport_get_usage(const struct ofproto *ofproto, ofp_port_t ofp_port)
2552{
2553 struct ofport_usage *usage;
2554
2555 HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port),
2556 &ofproto->ofport_usage) {
2557 if (usage->ofp_port == ofp_port) {
2558 return usage->last_used;
2559 }
2560 }
2561 return 0;
2562}
2563
2564static void
2565ofport_set_usage(struct ofproto *ofproto, ofp_port_t ofp_port,
2566 long long int last_used)
2567{
2568 struct ofport_usage *usage;
2569 HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port),
2570 &ofproto->ofport_usage) {
2571 if (usage->ofp_port == ofp_port) {
2572 usage->last_used = last_used;
2573 return;
2574 }
2575 }
2576 ovs_assert(last_used == LLONG_MAX);
2577
2578 usage = xmalloc(sizeof *usage);
2579 usage->ofp_port = ofp_port;
2580 usage->last_used = last_used;
2581 hmap_insert(&ofproto->ofport_usage, &usage->hmap_node,
2582 hash_ofp_port(ofp_port));
2583}
2584
2585static void
2586ofport_remove_usage(struct ofproto *ofproto, ofp_port_t ofp_port)
2587{
2588 struct ofport_usage *usage;
2589 HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port),
2590 &ofproto->ofport_usage) {
2591 if (usage->ofp_port == ofp_port) {
2592 hmap_remove(&ofproto->ofport_usage, &usage->hmap_node);
2593 free(usage);
2594 break;
2595 }
2596 }
2597}
2598
2599int
2600ofproto_port_get_stats(const struct ofport *port, struct netdev_stats *stats)
2601{
2602 struct ofproto *ofproto = port->ofproto;
2603 int error;
2604
2605 if (ofproto->ofproto_class->port_get_stats) {
2606 error = ofproto->ofproto_class->port_get_stats(port, stats);
2607 } else {
2608 error = EOPNOTSUPP;
2609 }
2610
2611 return error;
2612}
2613
2614static int
2615update_port(struct ofproto *ofproto, const char *name)
2616{
2617 struct ofproto_port ofproto_port;
2618 struct ofputil_phy_port pp;
2619 struct netdev *netdev;
2620 struct ofport *port;
2621 int error = 0;
2622
2623 COVERAGE_INC(ofproto_update_port);
2624
2625 /* Fetch 'name''s location and properties from the datapath. */
2626 netdev = (!ofproto_port_query_by_name(ofproto, name, &ofproto_port)
2627 ? ofport_open(ofproto, &ofproto_port, &pp)
2628 : NULL);
2629
2630 if (netdev) {
2631 port = ofproto_get_port(ofproto, ofproto_port.ofp_port);
2632 if (port && !strcmp(netdev_get_name(port->netdev), name)) {
2633 struct netdev *old_netdev = port->netdev;
2634
2635 /* ofport_open() only sets OFPUTIL_PC_PORT_DOWN and
2636 * OFPUTIL_PS_LINK_DOWN. Keep the other config and state bits (but
2637 * a port that is down cannot be live). */
2638 pp.config |= port->pp.config & ~OFPUTIL_PC_PORT_DOWN;
2639 pp.state |= port->pp.state & ~OFPUTIL_PS_LINK_DOWN;
2640 pp.state = normalize_state(pp.config, pp.state, port->may_enable);
2641
2642 /* 'name' hasn't changed location. Any properties changed? */
2643 if (!ofport_equal(&port->pp, &pp)) {
2644 connmgr_send_port_status(port->ofproto->connmgr, NULL,
2645 &port->pp, &pp, OFPPR_MODIFY);
2646 port->pp = pp;
2647 }
2648
2649 update_mtu(ofproto, port);
2650
2651 /* Install the newly opened netdev in case it has changed.
2652 * Don't close the old netdev yet in case port_modified has to
2653 * remove a retained reference to it.*/
2654 port->netdev = netdev;
2655 port->change_seq = netdev_get_change_seq(netdev);
2656
2657 if (port->ofproto->ofproto_class->port_modified) {
2658 port->ofproto->ofproto_class->port_modified(port);
2659 }
2660
2661 netdev_close(old_netdev);
2662 } else {
2663 /* If 'port' is nonnull then its name differs from 'name' and thus
2664 * we should delete it. If we think there's a port named 'name'
2665 * then its port number must be wrong now so delete it too. */
2666 if (port) {
2667 ofport_remove(port);
2668 }
2669 ofport_remove_with_name(ofproto, name);
2670 error = ofport_install(ofproto, netdev, &pp);
2671 }
2672 } else {
2673 /* Any port named 'name' is gone now. */
2674 ofport_remove_with_name(ofproto, name);
2675 }
2676 ofproto_port_destroy(&ofproto_port);
2677
2678 return error;
2679}
2680
2681static int
2682init_ports(struct ofproto *p)
2683{
2684 struct ofproto_port_dump dump;
2685 struct ofproto_port ofproto_port;
2686 struct shash_node *node, *next;
2687
2688 OFPROTO_PORT_FOR_EACH (&ofproto_port, &dump, p) {
2689 const char *name = ofproto_port.name;
2690
2691 if (shash_find(&p->port_by_name, name)) {
2692 VLOG_WARN_RL(&rl, "%s: ignoring duplicate device %s in datapath",
2693 p->name, name);
2694 } else {
2695 struct ofputil_phy_port pp;
2696 struct netdev *netdev;
2697
2698 /* Check if an OpenFlow port number had been requested. */
2699 node = shash_find(&init_ofp_ports, name);
2700 if (node) {
2701 const struct iface_hint *iface_hint = node->data;
2702 simap_put(&p->ofp_requests, name,
2703 ofp_to_u16(iface_hint->ofp_port));
2704 }
2705
2706 netdev = ofport_open(p, &ofproto_port, &pp);
2707 if (netdev) {
2708 ofport_install(p, netdev, &pp);
2709 if (ofp_to_u16(ofproto_port.ofp_port) < p->max_ports) {
2710 p->alloc_port_no = MAX(p->alloc_port_no,
2711 ofp_to_u16(ofproto_port.ofp_port));
2712 }
2713 }
2714 }
2715 }
2716
2717 SHASH_FOR_EACH_SAFE(node, next, &init_ofp_ports) {
2718 struct iface_hint *iface_hint = node->data;
2719
2720 if (!strcmp(iface_hint->br_name, p->name)) {
2721 free(iface_hint->br_name);
2722 free(iface_hint->br_type);
2723 free(iface_hint);
2724 shash_delete(&init_ofp_ports, node);
2725 }
2726 }
2727
2728 return 0;
2729}
2730
2731static bool
2732ofport_is_internal_or_patch(const struct ofproto *p, const struct ofport *port)
2733{
2734 const char *netdev_type = netdev_get_type(port->netdev);
2735 return !strcmp(netdev_type, ofproto_port_open_type(p, "internal")) ||
2736 !strcmp(netdev_type, ofproto_port_open_type(p, "patch"));
2737}
2738
2739/* If 'port' is internal or patch and if the user didn't explicitly specify an
2740 * mtu through the database, we have to override it. */
2741static bool
2742ofport_is_mtu_overridden(const struct ofproto *p, const struct ofport *port)
2743{
2744 return ofport_is_internal_or_patch(p, port)
2745 && !netdev_mtu_is_user_config(port->netdev);
2746}
2747
2748/* Find the minimum MTU of all non-overridden devices attached to 'p'.
2749 * Returns ETH_PAYLOAD_MAX or the minimum of the ports. */
2750static int
2751find_min_mtu(struct ofproto *p)
2752{
2753 struct ofport *ofport;
2754 int mtu = 0;
2755
2756 HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
2757 struct netdev *netdev = ofport->netdev;
2758 int dev_mtu;
2759
2760 /* Skip any overridden port, since that's what we're trying to set. */
2761 if (ofport_is_mtu_overridden(p, ofport)) {
2762 continue;
2763 }
2764
2765 if (netdev_get_mtu(netdev, &dev_mtu)) {
2766 continue;
2767 }
2768 if (!mtu || dev_mtu < mtu) {
2769 mtu = dev_mtu;
2770 }
2771 }
2772
2773 return mtu ? mtu: ETH_PAYLOAD_MAX;
2774}
2775
2776/* Update MTU of all overridden devices on 'p' to the minimum of the
2777 * non-overridden ports in event of 'port' added or changed. */
2778static void
2779update_mtu(struct ofproto *p, struct ofport *port)
2780{
2781 struct netdev *netdev = port->netdev;
2782 int dev_mtu;
2783
2784 if (netdev_get_mtu(netdev, &dev_mtu)) {
2785 port->mtu = 0;
2786 return;
2787 }
2788 if (ofport_is_mtu_overridden(p, port)) {
2789 if (dev_mtu > p->min_mtu) {
2790 if (!netdev_set_mtu(port->netdev, p->min_mtu)) {
2791 dev_mtu = p->min_mtu;
2792 }
2793 }
2794 port->mtu = dev_mtu;
2795 return;
2796 }
2797
2798 port->mtu = dev_mtu;
2799 /* For non-overridden port find new min mtu. */
2800
2801 update_mtu_ofproto(p);
2802}
2803
2804static void
2805update_mtu_ofproto(struct ofproto *p)
2806{
2807 struct ofport *ofport;
2808 int old_min = p->min_mtu;
2809
2810 p->min_mtu = find_min_mtu(p);
2811 if (p->min_mtu == old_min) {
2812 return;
2813 }
2814
2815 HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
2816 struct netdev *netdev = ofport->netdev;
2817
2818 if (ofport_is_mtu_overridden(p, ofport)) {
2819 if (!netdev_set_mtu(netdev, p->min_mtu)) {
2820 ofport->mtu = p->min_mtu;
2821 }
2822 }
2823 }
2824}
2825\f
2826static void
2827ofproto_rule_destroy__(struct rule *rule)
2828 OVS_NO_THREAD_SAFETY_ANALYSIS
2829{
2830 cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
2831 rule_actions_destroy(rule_get_actions(rule));
2832 ovs_mutex_destroy(&rule->mutex);
2833 rule->ofproto->ofproto_class->rule_dealloc(rule);
2834}
2835
2836static void
2837rule_destroy_cb(struct rule *rule)
2838 OVS_NO_THREAD_SAFETY_ANALYSIS
2839{
2840 /* Send rule removed if needed. */
2841 if (rule->flags & OFPUTIL_FF_SEND_FLOW_REM
2842 && rule->removed_reason != OVS_OFPRR_NONE
2843 && !rule_is_hidden(rule)) {
2844 ofproto_rule_send_removed(rule);
2845 }
2846 rule->ofproto->ofproto_class->rule_destruct(rule);
2847 mf_vl_mff_unref(&rule->ofproto->vl_mff_map, rule->match_tlv_bitmap);
2848 mf_vl_mff_unref(&rule->ofproto->vl_mff_map, rule->ofpacts_tlv_bitmap);
2849 ofproto_rule_destroy__(rule);
2850}
2851
2852void
2853ofproto_rule_ref(struct rule *rule)
2854{
2855 if (rule) {
2856 ovs_refcount_ref(&rule->ref_count);
2857 }
2858}
2859
2860bool
2861ofproto_rule_try_ref(struct rule *rule)
2862{
2863 if (rule) {
2864 return ovs_refcount_try_ref_rcu(&rule->ref_count);
2865 }
2866 return false;
2867}
2868
2869/* Decrements 'rule''s ref_count and schedules 'rule' to be destroyed if the
2870 * ref_count reaches 0.
2871 *
2872 * Use of RCU allows short term use (between RCU quiescent periods) without
2873 * keeping a reference. A reference must be taken if the rule needs to
2874 * stay around accross the RCU quiescent periods. */
2875void
2876ofproto_rule_unref(struct rule *rule)
2877{
2878 if (rule && ovs_refcount_unref_relaxed(&rule->ref_count) == 1) {
2879 ovs_assert(rule->state != RULE_INSERTED);
2880 ovsrcu_postpone(rule_destroy_cb, rule);
2881 }
2882}
2883
2884static void
2885remove_rule_rcu__(struct rule *rule)
2886 OVS_REQUIRES(ofproto_mutex)
2887{
2888 struct ofproto *ofproto = rule->ofproto;
2889 struct oftable *table = &ofproto->tables[rule->table_id];
2890
2891 ovs_assert(!cls_rule_visible_in_version(&rule->cr, OVS_VERSION_MAX));
2892 classifier_remove_assert(&table->cls, &rule->cr);
2893 if (ofproto->ofproto_class->rule_delete) {
2894 ofproto->ofproto_class->rule_delete(rule);
2895 }
2896 ofproto_rule_unref(rule);
2897}
2898
2899static void
2900remove_rule_rcu(struct rule *rule)
2901 OVS_EXCLUDED(ofproto_mutex)
2902{
2903 ovs_mutex_lock(&ofproto_mutex);
2904 remove_rule_rcu__(rule);
2905 ovs_mutex_unlock(&ofproto_mutex);
2906}
2907
2908/* Removes and deletes rules from a NULL-terminated array of rule pointers. */
2909static void
2910remove_rules_rcu(struct rule **rules)
2911 OVS_EXCLUDED(ofproto_mutex)
2912{
2913 struct rule **orig_rules = rules;
2914
2915 if (*rules) {
2916 struct ofproto *ofproto = rules[0]->ofproto;
2917 unsigned long tables[BITMAP_N_LONGS(256)];
2918 struct rule *rule;
2919 size_t table_id;
2920
2921 memset(tables, 0, sizeof tables);
2922
2923 ovs_mutex_lock(&ofproto_mutex);
2924 while ((rule = *rules++)) {
2925 /* Defer once for each new table. This defers the subtable cleanup
2926 * until later, so that when removing large number of flows the
2927 * operation is faster. */
2928 if (!bitmap_is_set(tables, rule->table_id)) {
2929 struct classifier *cls = &ofproto->tables[rule->table_id].cls;
2930
2931 bitmap_set1(tables, rule->table_id);
2932 classifier_defer(cls);
2933 }
2934 remove_rule_rcu__(rule);
2935 }
2936
2937 BITMAP_FOR_EACH_1(table_id, 256, tables) {
2938 struct classifier *cls = &ofproto->tables[table_id].cls;
2939
2940 classifier_publish(cls);
2941 }
2942 ovs_mutex_unlock(&ofproto_mutex);
2943 }
2944
2945 free(orig_rules);
2946}
2947
2948void
2949ofproto_group_ref(struct ofgroup *group)
2950{
2951 if (group) {
2952 ovs_refcount_ref(&group->ref_count);
2953 }
2954}
2955
2956bool
2957ofproto_group_try_ref(struct ofgroup *group)
2958{
2959 if (group) {
2960 return ovs_refcount_try_ref_rcu(&group->ref_count);
2961 }
2962 return false;
2963}
2964
2965static void
2966group_destroy_cb(struct ofgroup *group)
2967{
2968 group->ofproto->ofproto_class->group_destruct(group);
2969 ofputil_group_properties_destroy(CONST_CAST(struct ofputil_group_props *,
2970 &group->props));
2971 ofputil_bucket_list_destroy(CONST_CAST(struct ovs_list *,
2972 &group->buckets));
2973 group->ofproto->ofproto_class->group_dealloc(group);
2974}
2975
2976void
2977ofproto_group_unref(struct ofgroup *group)
2978 OVS_NO_THREAD_SAFETY_ANALYSIS
2979{
2980 if (group && ovs_refcount_unref_relaxed(&group->ref_count) == 1) {
2981 ovs_assert(rule_collection_n(&group->rules) == 0);
2982 ovsrcu_postpone(group_destroy_cb, group);
2983 }
2984}
2985
2986static void
2987remove_group_rcu__(struct ofgroup *group)
2988 OVS_REQUIRES(ofproto_mutex)
2989{
2990 struct ofproto *ofproto = group->ofproto;
2991
2992 ovs_assert(!versions_visible_in_version(&group->versions, OVS_VERSION_MAX));
2993
2994 cmap_remove(&ofproto->groups, &group->cmap_node,
2995 hash_int(group->group_id, 0));
2996 ofproto_group_unref(group);
2997}
2998
2999static void
3000remove_group_rcu(struct ofgroup *group)
3001 OVS_EXCLUDED(ofproto_mutex)
3002{
3003 ovs_mutex_lock(&ofproto_mutex);
3004 remove_group_rcu__(group);
3005 ovs_mutex_unlock(&ofproto_mutex);
3006}
3007
3008/* Removes and deletes groups from a NULL-terminated array of group
3009 * pointers. */
3010static void
3011remove_groups_rcu(struct ofgroup **groups)
3012 OVS_EXCLUDED(ofproto_mutex)
3013{
3014 ovs_mutex_lock(&ofproto_mutex);
3015 for (struct ofgroup **g = groups; *g; g++) {
3016 remove_group_rcu__(*g);
3017 }
3018 ovs_mutex_unlock(&ofproto_mutex);
3019 free(groups);
3020}
3021
3022static bool ofproto_fix_meter_action(const struct ofproto *,
3023 struct ofpact_meter *);
3024
3025static bool ofproto_fix_controller_action(const struct ofproto *,
3026 struct ofpact_controller *);
3027
3028/* Creates and returns a new 'struct rule_actions', whose actions are a copy
3029 * of from the 'ofpacts_len' bytes of 'ofpacts'. */
3030const struct rule_actions *
3031rule_actions_create(const struct ofpact *ofpacts, size_t ofpacts_len)
3032{
3033 struct rule_actions *actions;
3034
3035 actions = xmalloc(sizeof *actions + ofpacts_len);
3036 actions->ofpacts_len = ofpacts_len;
3037 memcpy(actions->ofpacts, ofpacts, ofpacts_len);
3038 actions->has_meter = ofpacts_get_meter(ofpacts, ofpacts_len) != 0;
3039 actions->has_groups =
3040 (ofpact_find_type_flattened(ofpacts, OFPACT_GROUP,
3041 ofpact_end(ofpacts, ofpacts_len))
3042 != NULL);
3043 actions->has_learn_with_delete = (next_learn_with_delete(actions, NULL)
3044 != NULL);
3045
3046 return actions;
3047}
3048
3049/* Free the actions after the RCU quiescent period is reached. */
3050void
3051rule_actions_destroy(const struct rule_actions *actions)
3052{
3053 if (actions) {
3054 ovsrcu_postpone(free, CONST_CAST(struct rule_actions *, actions));
3055 }
3056}
3057
3058/* Returns true if 'rule' has an OpenFlow OFPAT_OUTPUT or OFPAT_ENQUEUE action
3059 * that outputs to 'port' (output to OFPP_FLOOD and OFPP_ALL doesn't count). */
3060bool
3061ofproto_rule_has_out_port(const struct rule *rule, ofp_port_t port)
3062 OVS_REQUIRES(ofproto_mutex)
3063{
3064 if (port == OFPP_ANY) {
3065 return true;
3066 } else {
3067 const struct rule_actions *actions = rule_get_actions(rule);
3068 return ofpacts_output_to_port(actions->ofpacts,
3069 actions->ofpacts_len, port);
3070 }
3071}
3072
3073/* Returns true if 'rule' has group and equals group_id. */
3074static bool
3075ofproto_rule_has_out_group(const struct rule *rule, uint32_t group_id)
3076 OVS_REQUIRES(ofproto_mutex)
3077{
3078 if (group_id == OFPG_ANY) {
3079 return true;
3080 } else {
3081 const struct rule_actions *actions = rule_get_actions(rule);
3082 return ofpacts_output_to_group(actions->ofpacts,
3083 actions->ofpacts_len, group_id);
3084 }
3085}
3086
3087static bool
3088rule_is_readonly(const struct rule *rule)
3089{
3090 const struct oftable *table = &rule->ofproto->tables[rule->table_id];
3091 return (table->flags & OFTABLE_READONLY) != 0;
3092}
3093\f
3094static uint32_t
3095hash_learned_cookie(ovs_be64 cookie_, uint8_t table_id)
3096{
3097 uint64_t cookie = (OVS_FORCE uint64_t) cookie_;
3098 return hash_3words(cookie, cookie >> 32, table_id);
3099}
3100
3101static void
3102learned_cookies_update_one__(struct ofproto *ofproto,
3103 const struct ofpact_learn *learn,
3104 int delta, struct ovs_list *dead_cookies)
3105 OVS_REQUIRES(ofproto_mutex)
3106{
3107 uint32_t hash = hash_learned_cookie(learn->cookie, learn->table_id);
3108 struct learned_cookie *c;
3109
3110 HMAP_FOR_EACH_WITH_HASH (c, hmap_node, hash, &ofproto->learned_cookies) {
3111 if (c->cookie == learn->cookie && c->table_id == learn->table_id) {
3112 c->n += delta;
3113 ovs_assert(c->n >= 0);
3114
3115 if (!c->n) {
3116 hmap_remove(&ofproto->learned_cookies, &c->hmap_node);
3117 ovs_list_push_back(dead_cookies, &c->list_node);
3118 }
3119
3120 return;
3121 }
3122 }
3123
3124 ovs_assert(delta > 0);
3125 c = xmalloc(sizeof *c);
3126 hmap_insert(&ofproto->learned_cookies, &c->hmap_node, hash);
3127 c->cookie = learn->cookie;
3128 c->table_id = learn->table_id;
3129 c->n = delta;
3130}
3131
3132static const struct ofpact_learn *
3133next_learn_with_delete(const struct rule_actions *actions,
3134 const struct ofpact_learn *start)
3135{
3136 const struct ofpact *pos;
3137
3138 for (pos = start ? ofpact_next(&start->ofpact) : actions->ofpacts;
3139 pos < ofpact_end(actions->ofpacts, actions->ofpacts_len);
3140 pos = ofpact_next(pos)) {
3141 if (pos->type == OFPACT_LEARN) {
3142 const struct ofpact_learn *learn = ofpact_get_LEARN(pos);
3143 if (learn->flags & NX_LEARN_F_DELETE_LEARNED) {
3144 return learn;
3145 }
3146 }
3147 }
3148
3149 return NULL;
3150}
3151
3152static void
3153learned_cookies_update__(struct ofproto *ofproto,
3154 const struct rule_actions *actions,
3155 int delta, struct ovs_list *dead_cookies)
3156 OVS_REQUIRES(ofproto_mutex)
3157{
3158 if (actions->has_learn_with_delete) {
3159 const struct ofpact_learn *learn;
3160
3161 for (learn = next_learn_with_delete(actions, NULL); learn;
3162 learn = next_learn_with_delete(actions, learn)) {
3163 learned_cookies_update_one__(ofproto, learn, delta, dead_cookies);
3164 }
3165 }
3166}
3167
3168static void
3169learned_cookies_inc(struct ofproto *ofproto,
3170 const struct rule_actions *actions)
3171 OVS_REQUIRES(ofproto_mutex)
3172{
3173 learned_cookies_update__(ofproto, actions, +1, NULL);
3174}
3175
3176static void
3177learned_cookies_dec(struct ofproto *ofproto,
3178 const struct rule_actions *actions,
3179 struct ovs_list *dead_cookies)
3180 OVS_REQUIRES(ofproto_mutex)
3181{
3182 learned_cookies_update__(ofproto, actions, -1, dead_cookies);
3183}
3184
3185static void
3186learned_cookies_flush(struct ofproto *ofproto, struct ovs_list *dead_cookies)
3187 OVS_REQUIRES(ofproto_mutex)
3188{
3189 struct learned_cookie *c;
3190
3191 struct minimatch match;
3192
3193 minimatch_init_catchall(&match);
3194 LIST_FOR_EACH_POP (c, list_node, dead_cookies) {
3195 struct rule_criteria criteria;
3196 struct rule_collection rules;
3197 rule_criteria_init(&criteria, c->table_id, &match, 0, OVS_VERSION_MAX,
3198 c->cookie, OVS_BE64_MAX, OFPP_ANY, OFPG_ANY);
3199 rule_criteria_require_rw(&criteria, false);
3200 collect_rules_loose(ofproto, &criteria, &rules);
3201 rule_criteria_destroy(&criteria);
3202 delete_flows__(&rules, OFPRR_DELETE, NULL);
3203
3204 free(c);
3205 }
3206 minimatch_destroy(&match);
3207}
3208\f
3209static enum ofperr
3210handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh)
3211{
3212 ofconn_send_reply(ofconn, ofputil_encode_echo_reply(oh));
3213 return 0;
3214}
3215
3216static void
3217query_tables(struct ofproto *ofproto,
3218 struct ofputil_table_features **featuresp,
3219 struct ofputil_table_stats **statsp)
3220{
3221 struct mf_bitmap rw_fields = oxm_writable_fields();
3222 struct mf_bitmap match = oxm_matchable_fields();
3223 struct mf_bitmap mask = oxm_maskable_fields();
3224
3225 struct ofputil_table_features *features;
3226 struct ofputil_table_stats *stats;
3227 int i;
3228
3229 features = *featuresp = xcalloc(ofproto->n_tables, sizeof *features);
3230 for (i = 0; i < ofproto->n_tables; i++) {
3231 struct ofputil_table_features *f = &features[i];
3232
3233 f->table_id = i;
3234 f->name[0] = '\0';
3235 f->metadata_match = OVS_BE64_MAX;
3236 f->metadata_write = OVS_BE64_MAX;
3237 atomic_read_relaxed(&ofproto->tables[i].miss_config, &f->miss_config);
3238 f->max_entries = 1000000;
3239
3240 bool more_tables = false;
3241 for (int j = i + 1; j < ofproto->n_tables; j++) {
3242 if (!(ofproto->tables[j].flags & OFTABLE_HIDDEN)) {
3243 bitmap_set1(f->nonmiss.next, j);
3244 more_tables = true;
3245 }
3246 }
3247 f->nonmiss.instructions = (1u << N_OVS_INSTRUCTIONS) - 1;
3248 if (!more_tables) {
3249 f->nonmiss.instructions &= ~(1u << OVSINST_OFPIT11_GOTO_TABLE);
3250 }
3251 f->nonmiss.write.ofpacts = (UINT64_C(1) << N_OFPACTS) - 1;
3252 f->nonmiss.write.set_fields = rw_fields;
3253 f->nonmiss.apply = f->nonmiss.write;
3254 f->miss = f->nonmiss;
3255
3256 f->match = match;
3257 f->mask = mask;
3258 f->wildcard = match;
3259 }
3260
3261 if (statsp) {
3262 stats = *statsp = xcalloc(ofproto->n_tables, sizeof *stats);
3263 for (i = 0; i < ofproto->n_tables; i++) {
3264 struct ofputil_table_stats *s = &stats[i];
3265
3266 s->table_id = i;
3267 s->active_count = ofproto->tables[i].n_flows;
3268 if (i == 0) {
3269 s->active_count -= connmgr_count_hidden_rules(
3270 ofproto->connmgr);
3271 }
3272 }
3273 } else {
3274 stats = NULL;
3275 }
3276
3277 ofproto->ofproto_class->query_tables(ofproto, features, stats);
3278
3279 for (i = 0; i < ofproto->n_tables; i++) {
3280 const struct oftable *table = &ofproto->tables[i];
3281 struct ofputil_table_features *f = &features[i];
3282
3283 if (table->name) {
3284 ovs_strzcpy(f->name, table->name, sizeof f->name);
3285 }
3286
3287 if (table->max_flows < f->max_entries) {
3288 f->max_entries = table->max_flows;
3289 }
3290 }
3291}
3292
3293static void
3294query_switch_features(struct ofproto *ofproto,
3295 bool *arp_match_ip, uint64_t *ofpacts)
3296{
3297 struct ofputil_table_features *features, *f;
3298
3299 *arp_match_ip = false;
3300 *ofpacts = 0;
3301
3302 query_tables(ofproto, &features, NULL);
3303 for (f = features; f < &features[ofproto->n_tables]; f++) {
3304 *ofpacts |= f->nonmiss.apply.ofpacts | f->miss.apply.ofpacts;
3305 if (bitmap_is_set(f->match.bm, MFF_ARP_SPA) ||
3306 bitmap_is_set(f->match.bm, MFF_ARP_TPA)) {
3307 *arp_match_ip = true;
3308 }
3309 }
3310 free(features);
3311
3312 /* Sanity check. */
3313 ovs_assert(*ofpacts & (UINT64_C(1) << OFPACT_OUTPUT));
3314}
3315
3316static enum ofperr
3317handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh)
3318{
3319 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
3320 struct ofputil_switch_features features;
3321 struct ofport *port;
3322 bool arp_match_ip;
3323 struct ofpbuf *b;
3324
3325 query_switch_features(ofproto, &arp_match_ip, &features.ofpacts);
3326
3327 features.datapath_id = ofproto->datapath_id;
3328 features.n_buffers = 0;
3329 features.n_tables = ofproto_get_n_visible_tables(ofproto);
3330 features.capabilities = (OFPUTIL_C_FLOW_STATS | OFPUTIL_C_TABLE_STATS |
3331 OFPUTIL_C_PORT_STATS | OFPUTIL_C_QUEUE_STATS |
3332 OFPUTIL_C_GROUP_STATS | OFPUTIL_C_BUNDLES);
3333 if (arp_match_ip) {
3334 features.capabilities |= OFPUTIL_C_ARP_MATCH_IP;
3335 }
3336 /* FIXME: Fill in proper features.auxiliary_id for auxiliary connections */
3337 features.auxiliary_id = 0;
3338 b = ofputil_encode_switch_features(&features, ofconn_get_protocol(ofconn),
3339 oh->xid);
3340 HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
3341 ofputil_put_switch_features_port(&port->pp, b);
3342 }
3343
3344 ofconn_send_reply(ofconn, b);
3345 return 0;
3346}
3347
3348static enum ofperr
3349handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh)
3350{
3351 struct ofputil_switch_config config;
3352 config.frag = ofconn_get_ofproto(ofconn)->frag_handling;
3353 config.invalid_ttl_to_controller
3354 = ofconn_get_invalid_ttl_to_controller(ofconn);
3355 config.miss_send_len = ofconn_get_miss_send_len(ofconn);
3356
3357 ofconn_send_reply(ofconn, ofputil_encode_get_config_reply(oh, &config));
3358
3359 return 0;
3360}
3361
3362static enum ofperr
3363handle_set_config(struct ofconn *ofconn, const struct ofp_header *oh)
3364{
3365 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
3366 struct ofputil_switch_config config;
3367 enum ofperr error;
3368
3369 error = ofputil_decode_set_config(oh, &config);
3370 if (error) {
3371 return error;
3372 }
3373
3374 if (ofconn_get_type(ofconn) != OFCONN_PRIMARY
3375 || ofconn_get_role(ofconn) != OFPCR12_ROLE_SLAVE) {
3376 enum ofputil_frag_handling cur = ofproto->frag_handling;
3377 enum ofputil_frag_handling next = config.frag;
3378
3379 if (cur != next) {
3380 if (ofproto->ofproto_class->set_frag_handling(ofproto, next)) {
3381 ofproto->frag_handling = next;
3382 } else {
3383 VLOG_WARN_RL(&rl, "%s: unsupported fragment handling mode %s",
3384 ofproto->name,
3385 ofputil_frag_handling_to_string(next));
3386 }
3387 }
3388 }
3389
3390 if (config.invalid_ttl_to_controller >= 0) {
3391 ofconn_set_invalid_ttl_to_controller(ofconn,
3392 config.invalid_ttl_to_controller);
3393 }
3394
3395 ofconn_set_miss_send_len(ofconn, config.miss_send_len);
3396
3397 return 0;
3398}
3399
3400/* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow
3401 * error message code for the caller to propagate upward. Otherwise, returns
3402 * 0.
3403 *
3404 * The log message mentions 'msg_type'. */
3405static enum ofperr
3406reject_slave_controller(struct ofconn *ofconn)
3407{
3408 if (ofconn_get_type(ofconn) == OFCONN_PRIMARY
3409 && ofconn_get_role(ofconn) == OFPCR12_ROLE_SLAVE) {
3410 return OFPERR_OFPBRC_IS_SLAVE;
3411 } else {
3412 return 0;
3413 }
3414}
3415
3416/* Checks that the 'ofpacts_len' bytes of action in 'ofpacts' are appropriate
3417 * for 'ofproto':
3418 *
3419 * - If they use a meter, then 'ofproto' has that meter configured.
3420 * Updates the meter action with ofproto's datapath's provider_meter_id.
3421 *
3422 * - If they use any groups, then 'ofproto' has that group configured.
3423 *
3424 * Returns 0 if successful, otherwise an OpenFlow error. Caller must hold
3425 * 'ofproto_mutex' for the result to be valid also after this function
3426 * returns. */
3427enum ofperr
3428ofproto_check_ofpacts(struct ofproto *ofproto,
3429 const struct ofpact ofpacts[], size_t ofpacts_len)
3430 OVS_REQUIRES(ofproto_mutex)
3431{
3432 const struct ofpact *a;
3433
3434 OFPACT_FOR_EACH_FLATTENED (a, ofpacts, ofpacts_len) {
3435 if (a->type == OFPACT_METER &&
3436 !ofproto_fix_meter_action(ofproto, ofpact_get_METER(a))) {
3437 return OFPERR_OFPMMFC_INVALID_METER;
3438 }
3439
3440 if (a->type == OFPACT_CONTROLLER) {
3441 struct ofpact_controller *ca = ofpact_get_CONTROLLER(a);
3442
3443 if (!ofproto_fix_controller_action(ofproto, ca)) {
3444 static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
3445 VLOG_INFO_RL(&rl2, "%s: controller action specified an "
3446 "unknown meter id: %d",
3447 ofproto->name, ca->meter_id);
3448 }
3449 }
3450
3451 if (a->type == OFPACT_GROUP
3452 && !ofproto_group_exists(ofproto, ofpact_get_GROUP(a)->group_id)) {
3453 return OFPERR_OFPBAC_BAD_OUT_GROUP;
3454 }
3455 }
3456
3457 return 0;
3458}
3459
3460void
3461ofproto_packet_out_uninit(struct ofproto_packet_out *opo)
3462{
3463 dp_packet_delete(opo->packet);
3464 opo->packet = NULL;
3465 free(opo->flow);
3466 opo->flow = NULL;
3467 free(opo->ofpacts);
3468 opo->ofpacts = NULL;
3469 opo->ofpacts_len = 0;
3470 ovs_assert(!opo->aux);
3471}
3472
3473/* Takes ownership of po->ofpacts, which must have been malloc'ed. */
3474static enum ofperr
3475ofproto_packet_out_init(struct ofproto *ofproto,
3476 struct ofconn *ofconn,
3477 struct ofproto_packet_out *opo,
3478 const struct ofputil_packet_out *po)
3479{
3480 enum ofperr error;
3481 struct match match;
3482 struct {
3483 struct miniflow mf;
3484 uint64_t buf[FLOW_U64S];
3485 } m;
3486
3487 uint16_t in_port = ofp_to_u16(po->flow_metadata.flow.in_port.ofp_port);
3488 if (in_port >= ofproto->max_ports && in_port < ofp_to_u16(OFPP_MAX)) {
3489 return OFPERR_OFPBRC_BAD_PORT;
3490 }
3491
3492 /* Get payload. */
3493 if (po->buffer_id != UINT32_MAX) {
3494 return OFPERR_OFPBRC_BUFFER_UNKNOWN;
3495 }
3496
3497 /* Ensure that the L3 header is 32-bit aligned. */
3498 opo->packet = dp_packet_clone_data_with_headroom(po->packet,
3499 po->packet_len, 2);
3500 /* Take the received packet_tpye as packet_type of the packet. */
3501 opo->packet->packet_type = po->flow_metadata.flow.packet_type;
3502
3503 /* Store struct flow. */
3504 opo->flow = xmalloc(sizeof *opo->flow);
3505 *opo->flow = po->flow_metadata.flow;
3506 miniflow_extract(opo->packet, &m.mf);
3507 flow_union_with_miniflow(opo->flow, &m.mf);
3508
3509 /* Check actions like for flow mods. We pass a 'table_id' of 0 to
3510 * ofproto_check_consistency(), which isn't strictly correct because these
3511 * actions aren't in any table. This is OK as 'table_id' is only used to
3512 * check instructions (e.g., goto-table), which can't appear on the action
3513 * list of a packet-out. */
3514 match_wc_init(&match, opo->flow);
3515 struct ofpact_check_params cp = {
3516 .match = &match,
3517 .max_ports = u16_to_ofp(ofproto->max_ports),
3518 .table_id = 0,
3519 .n_tables = ofproto->n_tables
3520 };
3521 error = ofpacts_check_consistency(po->ofpacts, po->ofpacts_len,
3522 ofconn_get_protocol(ofconn), &cp);
3523 if (error) {
3524 dp_packet_delete(opo->packet);
3525 free(opo->flow);
3526 return error;
3527 }
3528
3529 opo->ofpacts = po->ofpacts;
3530 opo->ofpacts_len = po->ofpacts_len;
3531
3532 opo->aux = NULL;
3533 return 0;
3534}
3535
3536static enum ofperr
3537ofproto_packet_out_start(struct ofproto *ofproto,
3538 struct ofproto_packet_out *opo)
3539 OVS_REQUIRES(ofproto_mutex)
3540{
3541 enum ofperr error;
3542
3543 error = ofproto_check_ofpacts(ofproto, opo->ofpacts, opo->ofpacts_len);
3544 if (error) {
3545 return error;
3546 }
3547
3548 return ofproto->ofproto_class->packet_xlate(ofproto, opo);
3549}
3550
3551static void
3552ofproto_packet_out_revert(struct ofproto *ofproto,
3553 struct ofproto_packet_out *opo)
3554 OVS_REQUIRES(ofproto_mutex)
3555{
3556 ofproto->ofproto_class->packet_xlate_revert(ofproto, opo);
3557}
3558
3559static void
3560ofproto_packet_out_finish(struct ofproto *ofproto,
3561 struct ofproto_packet_out *opo)
3562 OVS_REQUIRES(ofproto_mutex)
3563{
3564 ofproto->ofproto_class->packet_execute(ofproto, opo);
3565}
3566
3567static enum ofperr
3568handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh)
3569 OVS_EXCLUDED(ofproto_mutex)
3570{
3571 struct ofproto *p = ofconn_get_ofproto(ofconn);
3572 struct ofputil_packet_out po;
3573 struct ofproto_packet_out opo;
3574 uint64_t ofpacts_stub[1024 / 8];
3575 struct ofpbuf ofpacts;
3576 enum ofperr error;
3577
3578 COVERAGE_INC(ofproto_packet_out);
3579
3580 error = reject_slave_controller(ofconn);
3581 if (error) {
3582 return error;
3583 }
3584
3585 /* Decode message. */
3586 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
3587 error = ofputil_decode_packet_out(&po, oh, ofproto_get_tun_tab(p),
3588 &ofpacts);
3589 if (error) {
3590 ofpbuf_uninit(&ofpacts);
3591 return error;
3592 }
3593
3594 po.ofpacts = ofpbuf_steal_data(&ofpacts); /* Move to heap. */
3595 error = ofproto_packet_out_init(p, ofconn, &opo, &po);
3596 if (error) {
3597 free(po.ofpacts);
3598 return error;
3599 }
3600
3601 ovs_mutex_lock(&ofproto_mutex);
3602 opo.version = p->tables_version;
3603 error = ofproto_packet_out_start(p, &opo);
3604 if (!error) {
3605 ofproto_packet_out_finish(p, &opo);
3606 }
3607 ovs_mutex_unlock(&ofproto_mutex);
3608
3609 ofproto_packet_out_uninit(&opo);
3610 return error;
3611}
3612
3613static enum ofperr
3614handle_nxt_resume(struct ofconn *ofconn, const struct ofp_header *oh)
3615{
3616 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
3617 struct ofputil_packet_in_private pin;
3618 enum ofperr error;
3619
3620 error = ofputil_decode_packet_in_private(oh, false,
3621 ofproto_get_tun_tab(ofproto),
3622 &ofproto->vl_mff_map, &pin, NULL,
3623 NULL);
3624 if (error) {
3625 return error;
3626 }
3627
3628 error = (ofproto->ofproto_class->nxt_resume
3629 ? ofproto->ofproto_class->nxt_resume(ofproto, &pin)
3630 : OFPERR_NXR_NOT_SUPPORTED);
3631
3632 ofputil_packet_in_private_destroy(&pin);
3633
3634 return error;
3635}
3636
3637static void
3638update_port_config(struct ofconn *ofconn, struct ofport *port,
3639 enum ofputil_port_config config,
3640 enum ofputil_port_config mask)
3641{
3642 enum ofputil_port_config toggle = (config ^ port->pp.config) & mask;
3643
3644 if (toggle & OFPUTIL_PC_PORT_DOWN
3645 && (config & OFPUTIL_PC_PORT_DOWN
3646 ? netdev_turn_flags_off(port->netdev, NETDEV_UP, NULL)
3647 : netdev_turn_flags_on(port->netdev, NETDEV_UP, NULL))) {
3648 /* We tried to bring the port up or down, but it failed, so don't
3649 * update the "down" bit. */
3650 toggle &= ~OFPUTIL_PC_PORT_DOWN;
3651 }
3652
3653 if (toggle) {
3654 struct ofputil_phy_port old_pp = port->pp;
3655
3656 port->pp.config ^= toggle;
3657 port->pp.state = normalize_state(port->pp.config, port->pp.state,
3658 port->may_enable);
3659
3660 port->ofproto->ofproto_class->port_reconfigured(port, old_pp.config);
3661 connmgr_send_port_status(port->ofproto->connmgr, ofconn, &old_pp,
3662 &port->pp, OFPPR_MODIFY);
3663 }
3664}
3665
3666static enum ofperr
3667port_mod_start(struct ofconn *ofconn, struct ofputil_port_mod *pm,
3668 struct ofport **port)
3669{
3670 struct ofproto *p = ofconn_get_ofproto(ofconn);
3671
3672 *port = ofproto_get_port(p, pm->port_no);
3673 if (!*port) {
3674 return OFPERR_OFPPMFC_BAD_PORT;
3675 }
3676 if (!eth_addr_equals((*port)->pp.hw_addr, pm->hw_addr) ||
3677 !eth_addr64_equals((*port)->pp.hw_addr64, pm->hw_addr64)) {
3678 return OFPERR_OFPPMFC_BAD_HW_ADDR;
3679 }
3680 return 0;
3681}
3682
3683static void
3684port_mod_finish(struct ofconn *ofconn, struct ofputil_port_mod *pm,
3685 struct ofport *port)
3686{
3687 update_port_config(ofconn, port, pm->config, pm->mask);
3688 if (pm->advertise) {
3689 netdev_set_advertisements(port->netdev, pm->advertise);
3690 }
3691}
3692
3693static enum ofperr
3694handle_port_mod(struct ofconn *ofconn, const struct ofp_header *oh)
3695{
3696 struct ofputil_port_mod pm;
3697 struct ofport *port;
3698 enum ofperr error;
3699
3700 error = reject_slave_controller(ofconn);
3701 if (error) {
3702 return error;
3703 }
3704
3705 error = ofputil_decode_port_mod(oh, &pm, false);
3706 if (error) {
3707 return error;
3708 }
3709
3710 error = port_mod_start(ofconn, &pm, &port);
3711 if (!error) {
3712 port_mod_finish(ofconn, &pm, port);
3713 }
3714 return error;
3715}
3716
3717static enum ofperr
3718handle_desc_stats_request(struct ofconn *ofconn,
3719 const struct ofp_header *request)
3720{
3721 static const char *default_mfr_desc = "Nicira, Inc.";
3722 static const char *default_hw_desc = "Open vSwitch";
3723 static const char *default_sw_desc = VERSION;
3724 static const char *default_serial_desc = "None";
3725 static const char *default_dp_desc = "None";
3726
3727 struct ofproto *p = ofconn_get_ofproto(ofconn);
3728 struct ofp_desc_stats *ods;
3729 struct ofpbuf *msg;
3730
3731 msg = ofpraw_alloc_stats_reply(request, 0);
3732 ods = ofpbuf_put_zeros(msg, sizeof *ods);
3733 ovs_strlcpy(ods->mfr_desc, p->mfr_desc ? p->mfr_desc : default_mfr_desc,
3734 sizeof ods->mfr_desc);
3735 ovs_strlcpy(ods->hw_desc, p->hw_desc ? p->hw_desc : default_hw_desc,
3736 sizeof ods->hw_desc);
3737 ovs_strlcpy(ods->sw_desc, p->sw_desc ? p->sw_desc : default_sw_desc,
3738 sizeof ods->sw_desc);
3739 ovs_strlcpy(ods->serial_num,
3740 p->serial_desc ? p->serial_desc : default_serial_desc,
3741 sizeof ods->serial_num);
3742 ovs_strlcpy(ods->dp_desc, p->dp_desc ? p->dp_desc : default_dp_desc,
3743 sizeof ods->dp_desc);
3744 ofconn_send_reply(ofconn, msg);
3745
3746 return 0;
3747}
3748
3749static enum ofperr
3750handle_table_stats_request(struct ofconn *ofconn,
3751 const struct ofp_header *request)
3752{
3753 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
3754 struct ofputil_table_features *features;
3755 struct ofputil_table_stats *stats;
3756 struct ofpbuf *reply;
3757 size_t i;
3758
3759 query_tables(ofproto, &features, &stats);
3760
3761 reply = ofputil_encode_table_stats_reply(request);
3762 for (i = 0; i < ofproto->n_tables; i++) {
3763 if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) {
3764 ofputil_append_table_stats_reply(reply, &stats[i], &features[i]);
3765 }
3766 }
3767 ofconn_send_reply(ofconn, reply);
3768
3769 free(features);
3770 free(stats);
3771
3772 return 0;
3773}
3774
3775static enum ofperr
3776handle_table_features_request(struct ofconn *ofconn,
3777 const struct ofp_header *request)
3778{
3779 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
3780 struct ofpbuf msg = ofpbuf_const_initializer(request,
3781 ntohs(request->length));
3782 ofpraw_pull_assert(&msg);
3783 if (msg.size || ofpmp_more(request)) {
3784 return OFPERR_OFPTFFC_EPERM;
3785 }
3786
3787 struct ofputil_table_features *features;
3788 query_tables(ofproto, &features, NULL);
3789
3790 struct ovs_list replies;
3791 ofpmp_init(&replies, request);
3792 for (size_t i = 0; i < ofproto->n_tables; i++) {
3793 if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) {
3794 ofputil_append_table_features_reply(&features[i], &replies);
3795 }
3796 }
3797 ofconn_send_replies(ofconn, &replies);
3798
3799 free(features);
3800
3801 return 0;
3802}
3803
3804/* Returns the vacancy of 'oftable', a number that ranges from 0 (if the table
3805 * is full) to 100 (if the table is empty).
3806 *
3807 * A table without a limit on flows is considered to be empty. */
3808static uint8_t
3809oftable_vacancy(const struct oftable *t)
3810{
3811 return (!t->max_flows ? 100
3812 : t->n_flows >= t->max_flows ? 0
3813 : (t->max_flows - t->n_flows) * 100.0 / t->max_flows);
3814}
3815
3816static void
3817query_table_desc__(struct ofputil_table_desc *td,
3818 struct ofproto *ofproto, uint8_t table_id)
3819{
3820 const struct oftable *t = &ofproto->tables[table_id];
3821
3822 td->table_id = table_id;
3823 td->eviction = (t->eviction & EVICTION_OPENFLOW
3824 ? OFPUTIL_TABLE_EVICTION_ON
3825 : OFPUTIL_TABLE_EVICTION_OFF);
3826 td->eviction_flags = OFPROTO_EVICTION_FLAGS;
3827 td->vacancy = (t->vacancy_event
3828 ? OFPUTIL_TABLE_VACANCY_ON
3829 : OFPUTIL_TABLE_VACANCY_OFF);
3830 td->table_vacancy.vacancy_down = t->vacancy_down;
3831 td->table_vacancy.vacancy_up = t->vacancy_up;
3832 td->table_vacancy.vacancy = oftable_vacancy(t);
3833}
3834
3835/* This function queries the database for dumping table-desc. */
3836static void
3837query_tables_desc(struct ofproto *ofproto, struct ofputil_table_desc **descp)
3838{
3839 struct ofputil_table_desc *table_desc;
3840 size_t i;
3841
3842 table_desc = *descp = xcalloc(ofproto->n_tables, sizeof *table_desc);
3843 for (i = 0; i < ofproto->n_tables; i++) {
3844 struct ofputil_table_desc *td = &table_desc[i];
3845 query_table_desc__(td, ofproto, i);
3846 }
3847}
3848
3849/* Function to handle dump-table-desc request. */
3850static enum ofperr
3851handle_table_desc_request(struct ofconn *ofconn,
3852 const struct ofp_header *request)
3853{
3854 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
3855 struct ofputil_table_desc *table_desc;
3856 struct ovs_list replies;
3857 size_t i;
3858
3859 query_tables_desc(ofproto, &table_desc);
3860 ofpmp_init(&replies, request);
3861 for (i = 0; i < ofproto->n_tables; i++) {
3862 if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) {
3863 ofputil_append_table_desc_reply(&table_desc[i], &replies,
3864 request->version);
3865 }
3866 }
3867 ofconn_send_replies(ofconn, &replies);
3868 free(table_desc);
3869 return 0;
3870}
3871
3872/* This function determines and sends the vacancy event, based on the value
3873 * of current vacancy and threshold vacancy. If the current vacancy is less
3874 * than or equal to vacancy_down, vacancy up events must be enabled, and when
3875 * the current vacancy is greater or equal to vacancy_up, vacancy down events
3876 * must be enabled. */
3877static void
3878send_table_status(struct ofproto *ofproto, uint8_t table_id)
3879{
3880 struct oftable *t = &ofproto->tables[table_id];
3881 if (!t->vacancy_event) {
3882 return;
3883 }
3884
3885 uint8_t vacancy = oftable_vacancy(t);
3886 enum ofp14_table_reason event;
3887 if (vacancy < t->vacancy_down) {
3888 event = OFPTR_VACANCY_DOWN;
3889 } else if (vacancy > t->vacancy_up) {
3890 event = OFPTR_VACANCY_UP;
3891 } else {
3892 return;
3893 }
3894
3895 if (event == t->vacancy_event) {
3896 struct ofputil_table_desc td;
3897 query_table_desc__(&td, ofproto, table_id);
3898 connmgr_send_table_status(ofproto->connmgr, &td, event);
3899
3900 t->vacancy_event = (event == OFPTR_VACANCY_DOWN
3901 ? OFPTR_VACANCY_UP
3902 : OFPTR_VACANCY_DOWN);
3903 }
3904}
3905
3906static void
3907append_port_stat(struct ofport *port, struct ovs_list *replies)
3908{
3909 struct ofputil_port_stats ops = { .port_no = port->pp.port_no };
3910
3911 calc_duration(port->created, time_msec(),
3912 &ops.duration_sec, &ops.duration_nsec);
3913
3914 /* Intentionally ignore return value, since errors will set
3915 * 'stats' to all-1s, which is correct for OpenFlow, and
3916 * netdev_get_stats() will log errors. */
3917 ofproto_port_get_stats(port, &ops.stats);
3918 netdev_get_custom_stats(port->netdev, &ops.custom_stats);
3919
3920 ofputil_append_port_stat(replies, &ops);
3921
3922 netdev_free_custom_stats_counters(&ops.custom_stats);
3923}
3924
3925static void
3926handle_port_request(struct ofconn *ofconn,
3927 const struct ofp_header *request, ofp_port_t port_no,
3928 void (*cb)(struct ofport *, struct ovs_list *replies))
3929{
3930 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
3931 struct ofport *port;
3932 struct ovs_list replies;
3933
3934 ofpmp_init(&replies, request);
3935 if (port_no != OFPP_ANY) {
3936 port = ofproto_get_port(ofproto, port_no);
3937 if (port) {
3938 cb(port, &replies);
3939 }
3940 } else {
3941 HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
3942 cb(port, &replies);
3943 }
3944 }
3945
3946 ofconn_send_replies(ofconn, &replies);
3947}
3948
3949static enum ofperr
3950handle_port_stats_request(struct ofconn *ofconn,
3951 const struct ofp_header *request)
3952{
3953 ofp_port_t port_no;
3954 enum ofperr error;
3955
3956 error = ofputil_decode_port_stats_request(request, &port_no);
3957 if (!error) {
3958 handle_port_request(ofconn, request, port_no, append_port_stat);
3959 }
3960 return error;
3961}
3962
3963static void
3964append_port_desc(struct ofport *port, struct ovs_list *replies)
3965{
3966 ofputil_append_port_desc_stats_reply(&port->pp, replies);
3967}
3968
3969static enum ofperr
3970handle_port_desc_stats_request(struct ofconn *ofconn,
3971 const struct ofp_header *request)
3972{
3973 ofp_port_t port_no;
3974 enum ofperr error;
3975
3976 error = ofputil_decode_port_desc_stats_request(request, &port_no);
3977 if (!error) {
3978 handle_port_request(ofconn, request, port_no, append_port_desc);
3979 }
3980 return error;
3981}
3982
3983static uint32_t
3984hash_cookie(ovs_be64 cookie)
3985{
3986 return hash_uint64((OVS_FORCE uint64_t)cookie);
3987}
3988
3989static void
3990cookies_insert(struct ofproto *ofproto, struct rule *rule)
3991 OVS_REQUIRES(ofproto_mutex)
3992{
3993 hindex_insert(&ofproto->cookies, &rule->cookie_node,
3994 hash_cookie(rule->flow_cookie));
3995}
3996
3997static void
3998cookies_remove(struct ofproto *ofproto, struct rule *rule)
3999 OVS_REQUIRES(ofproto_mutex)
4000{
4001 hindex_remove(&ofproto->cookies, &rule->cookie_node);
4002}
4003
4004static void
4005calc_duration(long long int start, long long int now,
4006 uint32_t *sec, uint32_t *nsec)
4007{
4008 long long int msecs = now - start;
4009 *sec = msecs / 1000;
4010 *nsec = (msecs % 1000) * (1000 * 1000);
4011}
4012
4013/* Checks whether 'table_id' is 0xff or a valid table ID in 'ofproto'. Returns
4014 * true if 'table_id' is OK, false otherwise. */
4015static bool
4016check_table_id(const struct ofproto *ofproto, uint8_t table_id)
4017{
4018 return table_id == OFPTT_ALL || table_id < ofproto->n_tables;
4019}
4020
4021static struct oftable *
4022next_visible_table(const struct ofproto *ofproto, uint8_t table_id)
4023{
4024 struct oftable *table;
4025
4026 for (table = &ofproto->tables[table_id];
4027 table < &ofproto->tables[ofproto->n_tables];
4028 table++) {
4029 if (!(table->flags & OFTABLE_HIDDEN)) {
4030 return table;
4031 }
4032 }
4033
4034 return NULL;
4035}
4036
4037static struct oftable *
4038first_matching_table(const struct ofproto *ofproto, uint8_t table_id)
4039{
4040 if (table_id == 0xff) {
4041 return next_visible_table(ofproto, 0);
4042 } else if (table_id < ofproto->n_tables) {
4043 return &ofproto->tables[table_id];
4044 } else {
4045 return NULL;
4046 }
4047}
4048
4049static struct oftable *
4050next_matching_table(const struct ofproto *ofproto,
4051 const struct oftable *table, uint8_t table_id)
4052{
4053 return (table_id == 0xff
4054 ? next_visible_table(ofproto, (table - ofproto->tables) + 1)
4055 : NULL);
4056}
4057
4058/* Assigns TABLE to each oftable, in turn, that matches TABLE_ID in OFPROTO:
4059 *
4060 * - If TABLE_ID is 0xff, this iterates over every classifier table in
4061 * OFPROTO, skipping tables marked OFTABLE_HIDDEN.
4062 *
4063 * - If TABLE_ID is the number of a table in OFPROTO, then the loop iterates
4064 * only once, for that table. (This can be used to access tables marked
4065 * OFTABLE_HIDDEN.)
4066 *
4067 * - Otherwise, TABLE_ID isn't valid for OFPROTO, so the loop won't be
4068 * entered at all. (Perhaps you should have validated TABLE_ID with
4069 * check_table_id().)
4070 *
4071 * All parameters are evaluated multiple times.
4072 */
4073#define FOR_EACH_MATCHING_TABLE(TABLE, TABLE_ID, OFPROTO) \
4074 for ((TABLE) = first_matching_table(OFPROTO, TABLE_ID); \
4075 (TABLE) != NULL; \
4076 (TABLE) = next_matching_table(OFPROTO, TABLE, TABLE_ID))
4077
4078/* Initializes 'criteria' in a straightforward way based on the other
4079 * parameters.
4080 *
4081 * By default, the criteria include flows that are read-only, on the assumption
4082 * that the collected flows won't be modified. Call rule_criteria_require_rw()
4083 * if flows will be modified.
4084 *
4085 * For "loose" matching, the 'priority' parameter is unimportant and may be
4086 * supplied as 0. */
4087static void
4088rule_criteria_init(struct rule_criteria *criteria, uint8_t table_id,
4089 const struct minimatch *match, int priority,
4090 ovs_version_t version, ovs_be64 cookie,
4091 ovs_be64 cookie_mask, ofp_port_t out_port,
4092 uint32_t out_group)
4093{
4094 criteria->table_id = table_id;
4095 cls_rule_init_from_minimatch(&criteria->cr, match, priority);
4096 criteria->version = version;
4097 criteria->cookie = cookie;
4098 criteria->cookie_mask = cookie_mask;
4099 criteria->out_port = out_port;
4100 criteria->out_group = out_group;
4101
4102 /* We ordinarily want to skip hidden rules, but there has to be a way for
4103 * code internal to OVS to modify and delete them, so if the criteria
4104 * specify a priority that can only be for a hidden flow, then allow hidden
4105 * rules to be selected. (This doesn't allow OpenFlow clients to meddle
4106 * with hidden flows because OpenFlow uses only a 16-bit field to specify
4107 * priority.) */
4108 criteria->include_hidden = priority > UINT16_MAX;
4109
4110 /* We assume that the criteria are being used to collect flows for reading
4111 * but not modification. Thus, we should collect read-only flows. */
4112 criteria->include_readonly = true;
4113}
4114
4115/* By default, criteria initialized by rule_criteria_init() will match flows
4116 * that are read-only, on the assumption that the collected flows won't be
4117 * modified. Call this function to match only flows that are be modifiable.
4118 *
4119 * Specify 'can_write_readonly' as false in ordinary circumstances, true if the
4120 * caller has special privileges that allow it to modify even "read-only"
4121 * flows. */
4122static void
4123rule_criteria_require_rw(struct rule_criteria *criteria,
4124 bool can_write_readonly)
4125{
4126 criteria->include_readonly = can_write_readonly;
4127}
4128
4129static void
4130rule_criteria_destroy(struct rule_criteria *criteria)
4131{
4132 cls_rule_destroy(&criteria->cr);
4133 criteria->version = OVS_VERSION_NOT_REMOVED; /* Mark as destroyed. */
4134}
4135
4136/* Schedules postponed removal of rules, destroys 'rules'. */
4137static void
4138remove_rules_postponed(struct rule_collection *rules)
4139 OVS_REQUIRES(ofproto_mutex)
4140{
4141 if (rule_collection_n(rules) > 0) {
4142 if (rule_collection_n(rules) == 1) {
4143 ovsrcu_postpone(remove_rule_rcu, rule_collection_rules(rules)[0]);
4144 rule_collection_init(rules);
4145 } else {
4146 ovsrcu_postpone(remove_rules_rcu, rule_collection_detach(rules));
4147 }
4148 }
4149}
4150
4151/* Schedules postponed removal of groups, destroys 'groups'. */
4152static void
4153remove_groups_postponed(struct group_collection *groups)
4154 OVS_REQUIRES(ofproto_mutex)
4155{
4156 if (group_collection_n(groups) > 0) {
4157 if (group_collection_n(groups) == 1) {
4158 ovsrcu_postpone(remove_group_rcu,
4159 group_collection_groups(groups)[0]);
4160 group_collection_init(groups);
4161 } else {
4162 ovsrcu_postpone(remove_groups_rcu,
4163 group_collection_detach(groups));
4164 }
4165 }
4166}
4167
4168/* Checks whether 'rule' matches 'c' and, if so, adds it to 'rules'. This
4169 * function verifies most of the criteria in 'c' itself, but the caller must
4170 * check 'c->cr' itself.
4171 *
4172 * Rules that have already been marked for removal are not collected.
4173 *
4174 * Increments '*n_readonly' if 'rule' wasn't added because it's read-only (and
4175 * 'c' only includes modifiable rules). */
4176static void
4177collect_rule(struct rule *rule, const struct rule_criteria *c,
4178 struct rule_collection *rules, size_t *n_readonly)
4179 OVS_REQUIRES(ofproto_mutex)
4180{
4181 if ((c->table_id == rule->table_id || c->table_id == 0xff)
4182 && ofproto_rule_has_out_port(rule, c->out_port)
4183 && ofproto_rule_has_out_group(rule, c->out_group)
4184 && !((rule->flow_cookie ^ c->cookie) & c->cookie_mask)
4185 && (!rule_is_hidden(rule) || c->include_hidden)
4186 && cls_rule_visible_in_version(&rule->cr, c->version)) {
4187 /* Rule matches all the criteria... */
4188 if (!rule_is_readonly(rule) || c->include_readonly) {
4189 /* ...add it. */
4190 rule_collection_add(rules, rule);
4191 } else {
4192 /* ...except it's read-only. */
4193 ++*n_readonly;
4194 }
4195 }
4196}
4197
4198/* Searches 'ofproto' for rules that match the criteria in 'criteria'. Matches
4199 * on classifiers rules are done in the "loose" way required for OpenFlow
4200 * OFPFC_MODIFY and OFPFC_DELETE requests. Puts the selected rules on list
4201 * 'rules'.
4202 *
4203 * Returns 0 on success, otherwise an OpenFlow error code. */
4204static enum ofperr
4205collect_rules_loose(struct ofproto *ofproto,
4206 const struct rule_criteria *criteria,
4207 struct rule_collection *rules)
4208 OVS_REQUIRES(ofproto_mutex)
4209{
4210 struct oftable *table;
4211 enum ofperr error = 0;
4212 size_t n_readonly = 0;
4213
4214 rule_collection_init(rules);
4215
4216 if (!check_table_id(ofproto, criteria->table_id)) {
4217 error = OFPERR_OFPBRC_BAD_TABLE_ID;
4218 goto exit;
4219 }
4220
4221 if (criteria->cookie_mask == OVS_BE64_MAX) {
4222 struct rule *rule;
4223
4224 HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node,
4225 hash_cookie(criteria->cookie),
4226 &ofproto->cookies) {
4227 if (cls_rule_is_loose_match(&rule->cr, &criteria->cr.match)) {
4228 collect_rule(rule, criteria, rules, &n_readonly);
4229 }
4230 }
4231 } else {
4232 FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
4233 struct rule *rule;
4234
4235 CLS_FOR_EACH_TARGET (rule, cr, &table->cls, &criteria->cr,
4236 criteria->version) {
4237 collect_rule(rule, criteria, rules, &n_readonly);
4238 }
4239 }
4240 }
4241
4242exit:
4243 if (!error && !rule_collection_n(rules) && n_readonly) {
4244 /* We didn't find any rules to modify. We did find some read-only
4245 * rules that we're not allowed to modify, so report that. */
4246 error = OFPERR_OFPBRC_EPERM;
4247 }
4248 if (error) {
4249 rule_collection_destroy(rules);
4250 }
4251 return error;
4252}
4253
4254/* Searches 'ofproto' for rules that match the criteria in 'criteria'. Matches
4255 * on classifiers rules are done in the "strict" way required for OpenFlow
4256 * OFPFC_MODIFY_STRICT and OFPFC_DELETE_STRICT requests. Puts the selected
4257 * rules on list 'rules'.
4258 *
4259 * Returns 0 on success, otherwise an OpenFlow error code. */
4260static enum ofperr
4261collect_rules_strict(struct ofproto *ofproto,
4262 const struct rule_criteria *criteria,
4263 struct rule_collection *rules)
4264 OVS_REQUIRES(ofproto_mutex)
4265{
4266 struct oftable *table;
4267 size_t n_readonly = 0;
4268 enum ofperr error = 0;
4269
4270 rule_collection_init(rules);
4271
4272 if (!check_table_id(ofproto, criteria->table_id)) {
4273 error = OFPERR_OFPBRC_BAD_TABLE_ID;
4274 goto exit;
4275 }
4276
4277 if (criteria->cookie_mask == OVS_BE64_MAX) {
4278 struct rule *rule;
4279
4280 HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node,
4281 hash_cookie(criteria->cookie),
4282 &ofproto->cookies) {
4283 if (cls_rule_equal(&rule->cr, &criteria->cr)) {
4284 collect_rule(rule, criteria, rules, &n_readonly);
4285 }
4286 }
4287 } else {
4288 FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
4289 struct rule *rule;
4290
4291 rule = rule_from_cls_rule(classifier_find_rule_exactly(
4292 &table->cls, &criteria->cr,
4293 criteria->version));
4294 if (rule) {
4295 collect_rule(rule, criteria, rules, &n_readonly);
4296 }
4297 }
4298 }
4299
4300exit:
4301 if (!error && !rule_collection_n(rules) && n_readonly) {
4302 /* We didn't find any rules to modify. We did find some read-only
4303 * rules that we're not allowed to modify, so report that. */
4304 error = OFPERR_OFPBRC_EPERM;
4305 }
4306 if (error) {
4307 rule_collection_destroy(rules);
4308 }
4309 return error;
4310}
4311
4312/* Returns 'age_ms' (a duration in milliseconds), converted to seconds and
4313 * forced into the range of a uint16_t. */
4314static int
4315age_secs(long long int age_ms)
4316{
4317 return (age_ms < 0 ? 0
4318 : age_ms >= UINT16_MAX * 1000 ? UINT16_MAX
4319 : (unsigned int) age_ms / 1000);
4320}
4321
4322static enum ofperr
4323handle_flow_stats_request(struct ofconn *ofconn,
4324 const struct ofp_header *request)
4325 OVS_EXCLUDED(ofproto_mutex)
4326{
4327 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
4328 struct ofputil_flow_stats_request fsr;
4329 struct rule_criteria criteria;
4330 struct rule_collection rules;
4331 struct ovs_list replies;
4332 enum ofperr error;
4333
4334 error = ofputil_decode_flow_stats_request(&fsr, request,
4335 ofproto_get_tun_tab(ofproto),
4336 &ofproto->vl_mff_map);
4337 if (error) {
4338 return error;
4339 }
4340
4341 struct minimatch match;
4342 minimatch_init(&match, &fsr.match);
4343 rule_criteria_init(&criteria, fsr.table_id, &match, 0, OVS_VERSION_MAX,
4344 fsr.cookie, fsr.cookie_mask, fsr.out_port,
4345 fsr.out_group);
4346 minimatch_destroy(&match);
4347
4348 ovs_mutex_lock(&ofproto_mutex);
4349 error = collect_rules_loose(ofproto, &criteria, &rules);
4350 rule_criteria_destroy(&criteria);
4351 if (!error) {
4352 rule_collection_ref(&rules);
4353 }
4354 ovs_mutex_unlock(&ofproto_mutex);
4355
4356 if (error) {
4357 return error;
4358 }
4359
4360 ofpmp_init(&replies, request);
4361 struct rule *rule;
4362 RULE_COLLECTION_FOR_EACH (rule, &rules) {
4363 long long int now = time_msec();
4364 struct ofputil_flow_stats fs;
4365 long long int created, used, modified;
4366 const struct rule_actions *actions;
4367 enum ofputil_flow_mod_flags flags;
4368
4369 ovs_mutex_lock(&rule->mutex);
4370 fs.cookie = rule->flow_cookie;
4371 fs.idle_timeout = rule->idle_timeout;
4372 fs.hard_timeout = rule->hard_timeout;
4373 fs.importance = rule->importance;
4374 created = rule->created;
4375 modified = rule->modified;
4376 actions = rule_get_actions(rule);
4377 flags = rule->flags;
4378 ovs_mutex_unlock(&rule->mutex);
4379
4380 ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
4381 &fs.byte_count, &used);
4382
4383 minimatch_expand(&rule->cr.match, &fs.match);
4384 fs.table_id = rule->table_id;
4385 calc_duration(created, now, &fs.duration_sec, &fs.duration_nsec);
4386 fs.priority = rule->cr.priority;
4387 fs.idle_age = age_secs(now - used);
4388 fs.hard_age = age_secs(now - modified);
4389 fs.ofpacts = actions->ofpacts;
4390 fs.ofpacts_len = actions->ofpacts_len;
4391
4392 fs.flags = flags;
4393 ofputil_append_flow_stats_reply(&fs, &replies,
4394 ofproto_get_tun_tab(ofproto));
4395 }
4396
4397 rule_collection_unref(&rules);
4398 rule_collection_destroy(&rules);
4399
4400 ofconn_send_replies(ofconn, &replies);
4401
4402 return 0;
4403}
4404
4405static void
4406flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results)
4407{
4408 uint64_t packet_count, byte_count;
4409 const struct rule_actions *actions;
4410 long long int created, used;
4411
4412 rule->ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
4413 &byte_count, &used);
4414
4415 ovs_mutex_lock(&rule->mutex);
4416 actions = rule_get_actions(rule);
4417 created = rule->created;
4418 ovs_mutex_unlock(&rule->mutex);
4419
4420 if (rule->table_id != 0) {
4421 ds_put_format(results, "table_id=%"PRIu8", ", rule->table_id);
4422 }
4423 ds_put_format(results, "duration=%llds, ", (time_msec() - created) / 1000);
4424 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
4425 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
4426 cls_rule_format(&rule->cr, ofproto_get_tun_tab(ofproto), NULL, results);
4427 ds_put_char(results, ',');
4428
4429 ds_put_cstr(results, "actions=");
4430 struct ofpact_format_params fp = { .s = results };
4431 ofpacts_format(actions->ofpacts, actions->ofpacts_len, &fp);
4432
4433 ds_put_cstr(results, "\n");
4434}
4435
4436/* Adds a pretty-printed description of all flows to 'results', including
4437 * hidden flows (e.g., set up by in-band control). */
4438void
4439ofproto_get_all_flows(struct ofproto *p, struct ds *results)
4440{
4441 struct oftable *table;
4442
4443 OFPROTO_FOR_EACH_TABLE (table, p) {
4444 struct rule *rule;
4445
4446 CLS_FOR_EACH (rule, cr, &table->cls) {
4447 flow_stats_ds(p, rule, results);
4448 }
4449 }
4450}
4451
4452/* Obtains the NetFlow engine type and engine ID for 'ofproto' into
4453 * '*engine_type' and '*engine_id', respectively. */
4454void
4455ofproto_get_netflow_ids(const struct ofproto *ofproto,
4456 uint8_t *engine_type, uint8_t *engine_id)
4457{
4458 ofproto->ofproto_class->get_netflow_ids(ofproto, engine_type, engine_id);
4459}
4460
4461/* Checks the status change of CFM on 'ofport'.
4462 *
4463 * Returns true if 'ofproto_class' does not support 'cfm_status_changed'. */
4464bool
4465ofproto_port_cfm_status_changed(struct ofproto *ofproto, ofp_port_t ofp_port)
4466{
4467 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
4468 return (ofport && ofproto->ofproto_class->cfm_status_changed
4469 ? ofproto->ofproto_class->cfm_status_changed(ofport)
4470 : true);
4471}
4472
4473/* Checks the status of CFM configured on 'ofp_port' within 'ofproto'.
4474 * Returns 0 if the port's CFM status was successfully stored into
4475 * '*status'. Returns positive errno if the port did not have CFM
4476 * configured.
4477 *
4478 * The caller must provide and own '*status', and must free 'status->rmps'.
4479 * '*status' is indeterminate if the return value is non-zero. */
4480int
4481ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port,
4482 struct cfm_status *status)
4483{
4484 struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
4485 return (ofport && ofproto->ofproto_class->get_cfm_status
4486 ? ofproto->ofproto_class->get_cfm_status(ofport, status)
4487 : EOPNOTSUPP);
4488}
4489
4490static enum ofperr
4491handle_aggregate_stats_request(struct ofconn *ofconn,
4492 const struct ofp_header *oh)
4493 OVS_EXCLUDED(ofproto_mutex)
4494{
4495 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
4496 struct ofputil_flow_stats_request request;
4497 struct ofputil_aggregate_stats stats;
4498 bool unknown_packets, unknown_bytes;
4499 struct rule_criteria criteria;
4500 struct rule_collection rules;
4501 struct ofpbuf *reply;
4502 enum ofperr error;
4503
4504 error = ofputil_decode_flow_stats_request(&request, oh,
4505 ofproto_get_tun_tab(ofproto),
4506 &ofproto->vl_mff_map);
4507 if (error) {
4508 return error;
4509 }
4510
4511 struct minimatch match;
4512 minimatch_init(&match, &request.match);
4513 rule_criteria_init(&criteria, request.table_id, &match, 0,
4514 OVS_VERSION_MAX, request.cookie, request.cookie_mask,
4515 request.out_port, request.out_group);
4516 minimatch_destroy(&match);
4517
4518 ovs_mutex_lock(&ofproto_mutex);
4519 error = collect_rules_loose(ofproto, &criteria, &rules);
4520 rule_criteria_destroy(&criteria);
4521 if (!error) {
4522 rule_collection_ref(&rules);
4523 }
4524 ovs_mutex_unlock(&ofproto_mutex);
4525
4526 if (error) {
4527 return error;
4528 }
4529
4530 memset(&stats, 0, sizeof stats);
4531 unknown_packets = unknown_bytes = false;
4532
4533 struct rule *rule;
4534 RULE_COLLECTION_FOR_EACH (rule, &rules) {
4535 uint64_t packet_count;
4536 uint64_t byte_count;
4537 long long int used;
4538
4539 ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
4540 &byte_count, &used);
4541
4542 if (packet_count == UINT64_MAX) {
4543 unknown_packets = true;
4544 } else {
4545 stats.packet_count += packet_count;
4546 }
4547
4548 if (byte_count == UINT64_MAX) {
4549 unknown_bytes = true;
4550 } else {
4551 stats.byte_count += byte_count;
4552 }
4553
4554 stats.flow_count++;
4555 }
4556 if (unknown_packets) {
4557 stats.packet_count = UINT64_MAX;
4558 }
4559 if (unknown_bytes) {
4560 stats.byte_count = UINT64_MAX;
4561 }
4562
4563 rule_collection_unref(&rules);
4564 rule_collection_destroy(&rules);
4565
4566 reply = ofputil_encode_aggregate_stats_reply(&stats, oh);
4567 ofconn_send_reply(ofconn, reply);
4568
4569 return 0;
4570}
4571
4572struct queue_stats_cbdata {
4573 struct ofport *ofport;
4574 struct ovs_list replies;
4575 long long int now;
4576};
4577
4578static void
4579put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id,
4580 const struct netdev_queue_stats *stats)
4581{
4582 struct ofputil_queue_stats oqs;
4583
4584 oqs.port_no = cbdata->ofport->pp.port_no;
4585 oqs.queue_id = queue_id;
4586 oqs.tx_bytes = stats->tx_bytes;
4587 oqs.tx_packets = stats->tx_packets;
4588 oqs.tx_errors = stats->tx_errors;
4589 if (stats->created != LLONG_MIN) {
4590 calc_duration(stats->created, cbdata->now,
4591 &oqs.duration_sec, &oqs.duration_nsec);
4592 } else {
4593 oqs.duration_sec = oqs.duration_nsec = UINT32_MAX;
4594 }
4595 ofputil_append_queue_stat(&cbdata->replies, &oqs);
4596}
4597
4598static void
4599handle_queue_stats_dump_cb(uint32_t queue_id,
4600 struct netdev_queue_stats *stats,
4601 void *cbdata_)
4602{
4603 struct queue_stats_cbdata *cbdata = cbdata_;
4604
4605 put_queue_stats(cbdata, queue_id, stats);
4606}
4607
4608static enum ofperr
4609handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id,
4610 struct queue_stats_cbdata *cbdata)
4611{
4612 cbdata->ofport = port;
4613 if (queue_id == OFPQ_ALL) {
4614 netdev_dump_queue_stats(port->netdev,
4615 handle_queue_stats_dump_cb, cbdata);
4616 } else {
4617 struct netdev_queue_stats stats;
4618
4619 if (!netdev_get_queue_stats(port->netdev, queue_id, &stats)) {
4620 put_queue_stats(cbdata, queue_id, &stats);
4621 } else {
4622 return OFPERR_OFPQOFC_BAD_QUEUE;
4623 }
4624 }
4625 return 0;
4626}
4627
4628static enum ofperr
4629handle_queue_stats_request(struct ofconn *ofconn,
4630 const struct ofp_header *rq)
4631{
4632 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
4633 struct queue_stats_cbdata cbdata;
4634 struct ofport *port;
4635 enum ofperr error;
4636 struct ofputil_queue_stats_request oqsr;
4637
4638 COVERAGE_INC(ofproto_queue_req);
4639
4640 ofpmp_init(&cbdata.replies, rq);
4641 cbdata.now = time_msec();
4642
4643 error = ofputil_decode_queue_stats_request(rq, &oqsr);
4644 if (error) {
4645 return error;
4646 }
4647
4648 if (oqsr.port_no == OFPP_ANY) {
4649 error = OFPERR_OFPQOFC_BAD_QUEUE;
4650 HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
4651 if (!handle_queue_stats_for_port(port, oqsr.queue_id, &cbdata)) {
4652 error = 0;
4653 }
4654 }
4655 } else {
4656 port = ofproto_get_port(ofproto, oqsr.port_no);
4657 error = (port
4658 ? handle_queue_stats_for_port(port, oqsr.queue_id, &cbdata)
4659 : OFPERR_OFPQOFC_BAD_PORT);
4660 }
4661 if (!error) {
4662 ofconn_send_replies(ofconn, &cbdata.replies);
4663 } else {
4664 ofpbuf_list_delete(&cbdata.replies);
4665 }
4666
4667 return error;
4668}
4669
4670static enum ofperr
4671evict_rules_from_table(struct oftable *table)
4672 OVS_REQUIRES(ofproto_mutex)
4673{
4674 enum ofperr error = 0;
4675 struct rule_collection rules;
4676 unsigned int count = table->n_flows;
4677 unsigned int max_flows = table->max_flows;
4678
4679 rule_collection_init(&rules);
4680
4681 while (count-- > max_flows) {
4682 struct rule *rule;
4683
4684 if (!choose_rule_to_evict(table, &rule)) {
4685 error = OFPERR_OFPFMFC_TABLE_FULL;
4686 break;
4687 } else {
4688 eviction_group_remove_rule(rule);
4689 rule_collection_add(&rules, rule);
4690 }
4691 }
4692 delete_flows__(&rules, OFPRR_EVICTION, NULL);
4693
4694 return error;
4695}
4696
4697static void
4698get_conjunctions(const struct ofputil_flow_mod *fm,
4699 struct cls_conjunction **conjsp, size_t *n_conjsp)
4700{
4701 struct cls_conjunction *conjs = NULL;
4702 int n_conjs = 0;
4703
4704 const struct ofpact *ofpact;
4705 OFPACT_FOR_EACH (ofpact, fm->ofpacts, fm->ofpacts_len) {
4706 if (ofpact->type == OFPACT_CONJUNCTION) {
4707 n_conjs++;
4708 } else if (ofpact->type != OFPACT_NOTE) {
4709 /* "conjunction" may appear with "note" actions but not with any
4710 * other type of actions. */
4711 ovs_assert(!n_conjs);
4712 break;
4713 }
4714 }
4715 if (n_conjs) {
4716 int i = 0;
4717
4718 conjs = xzalloc(n_conjs * sizeof *conjs);
4719 OFPACT_FOR_EACH (ofpact, fm->ofpacts, fm->ofpacts_len) {
4720 if (ofpact->type == OFPACT_CONJUNCTION) {
4721 struct ofpact_conjunction *oc = ofpact_get_CONJUNCTION(ofpact);
4722 conjs[i].clause = oc->clause;
4723 conjs[i].n_clauses = oc->n_clauses;
4724 conjs[i].id = oc->id;
4725 i++;
4726 }
4727 }
4728 }
4729
4730 *conjsp = conjs;
4731 *n_conjsp = n_conjs;
4732}
4733
4734/* add_flow_init(), add_flow_start(), add_flow_revert(), and add_flow_finish()
4735 * implement OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
4736 * in which no matching flow already exists in the flow table.
4737 *
4738 * add_flow_init() creates a new flow according to 'fm' and stores it to 'ofm'
4739 * for later reference. If the flow replaces other flow, it will be updated to
4740 * match modify semantics later by add_flow_start() (by calling
4741 * replace_rule_start()).
4742 *
4743 * Returns 0 on success, or an OpenFlow error code on failure.
4744 *
4745 * On successful return the caller must complete the operation by calling
4746 * add_flow_start(), and if that succeeds, then either add_flow_finish(), or
4747 * add_flow_revert() if the operation needs to be reverted due to a later
4748 * failure.
4749 */
4750static enum ofperr
4751add_flow_init(struct ofproto *ofproto, struct ofproto_flow_mod *ofm,
4752 const struct ofputil_flow_mod *fm)
4753 OVS_EXCLUDED(ofproto_mutex)
4754{
4755 struct oftable *table;
4756 struct cls_rule cr;
4757 uint8_t table_id;
4758 enum ofperr error;
4759
4760 if (!check_table_id(ofproto, fm->table_id)) {
4761 return OFPERR_OFPBRC_BAD_TABLE_ID;
4762 }
4763
4764 /* Pick table. */
4765 if (fm->table_id == 0xff) {
4766 if (ofproto->ofproto_class->rule_choose_table) {
4767 error = ofproto->ofproto_class->rule_choose_table(ofproto,
4768 &fm->match,
4769 &table_id);
4770 if (error) {
4771 return error;
4772 }
4773 ovs_assert(table_id < ofproto->n_tables);
4774 } else {
4775 table_id = 0;
4776 }
4777 } else if (fm->table_id < ofproto->n_tables) {
4778 table_id = fm->table_id;
4779 } else {
4780 return OFPERR_OFPBRC_BAD_TABLE_ID;
4781 }
4782
4783 table = &ofproto->tables[table_id];
4784 if (table->flags & OFTABLE_READONLY
4785 && !(fm->flags & OFPUTIL_FF_NO_READONLY)) {
4786 return OFPERR_OFPBRC_EPERM;
4787 }
4788
4789 if (!(fm->flags & OFPUTIL_FF_HIDDEN_FIELDS)
4790 && !minimatch_has_default_hidden_fields(&fm->match)) {
4791 VLOG_WARN_RL(&rl, "%s: (add_flow) only internal flows can set "
4792 "non-default values to hidden fields", ofproto->name);
4793 return OFPERR_OFPBRC_EPERM;
4794 }
4795
4796 if (!ofm->temp_rule) {
4797 cls_rule_init_from_minimatch(&cr, &fm->match, fm->priority);
4798
4799 /* Allocate new rule. Destroys 'cr'. */
4800 uint64_t map = miniflow_get_tun_metadata_present_map(fm->match.flow);
4801 error = ofproto_rule_create(ofproto, &cr, table - ofproto->tables,
4802 fm->new_cookie, fm->idle_timeout,
4803 fm->hard_timeout, fm->flags,
4804 fm->importance, fm->ofpacts,
4805 fm->ofpacts_len, map,
4806 fm->ofpacts_tlv_bitmap, &ofm->temp_rule);
4807 if (error) {
4808 return error;
4809 }
4810
4811 get_conjunctions(fm, &ofm->conjs, &ofm->n_conjs);
4812 }
4813 return 0;
4814}
4815
4816/* ofm->temp_rule is consumed only in the successful case. */
4817static enum ofperr
4818add_flow_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
4819 OVS_REQUIRES(ofproto_mutex)
4820{
4821 struct rule *old_rule = NULL;
4822 struct rule *new_rule = ofm->temp_rule;
4823 const struct rule_actions *actions = rule_get_actions(new_rule);
4824 struct oftable *table = &ofproto->tables[new_rule->table_id];
4825 enum ofperr error;
4826
4827 /* Must check actions while holding ofproto_mutex to avoid a race. */
4828 error = ofproto_check_ofpacts(ofproto, actions->ofpacts,
4829 actions->ofpacts_len);
4830 if (error) {
4831 return error;
4832 }
4833
4834 /* Check for the existence of an identical rule.
4835 * This will not return rules earlier marked for removal. */
4836 old_rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls,
4837 &new_rule->cr,
4838 ofm->version));
4839 if (!old_rule) {
4840 /* Check for overlap, if requested. */
4841 if (new_rule->flags & OFPUTIL_FF_CHECK_OVERLAP
4842 && classifier_rule_overlaps(&table->cls, &new_rule->cr,
4843 ofm->version)) {
4844 return OFPERR_OFPFMFC_OVERLAP;
4845 }
4846
4847 /* If necessary, evict an existing rule to clear out space. */
4848 if (table->n_flows >= table->max_flows) {
4849 if (!choose_rule_to_evict(table, &old_rule)) {
4850 return OFPERR_OFPFMFC_TABLE_FULL;
4851 }
4852 eviction_group_remove_rule(old_rule);
4853 /* Marks 'old_rule' as an evicted rule rather than replaced rule.
4854 */
4855 old_rule->removed_reason = OFPRR_EVICTION;
4856 }
4857 } else {
4858 ofm->modify_cookie = true;
4859 }
4860
4861 if (old_rule) {
4862 rule_collection_add(&ofm->old_rules, old_rule);
4863 }
4864 /* Take ownership of the temp_rule. */
4865 rule_collection_add(&ofm->new_rules, new_rule);
4866 ofm->temp_rule = NULL;
4867
4868 replace_rule_start(ofproto, ofm, old_rule, new_rule);
4869 return 0;
4870}
4871
4872/* Revert the effects of add_flow_start(). */
4873static void
4874add_flow_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
4875 OVS_REQUIRES(ofproto_mutex)
4876{
4877 struct rule *old_rule = rule_collection_n(&ofm->old_rules)
4878 ? rule_collection_rules(&ofm->old_rules)[0] : NULL;
4879 struct rule *new_rule = rule_collection_rules(&ofm->new_rules)[0];
4880
4881 replace_rule_revert(ofproto, old_rule, new_rule);
4882}
4883
4884/* To be called after version bump. */
4885static void
4886add_flow_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm,
4887 const struct openflow_mod_requester *req)
4888 OVS_REQUIRES(ofproto_mutex)
4889{
4890 struct rule *old_rule = rule_collection_n(&ofm->old_rules)
4891 ? rule_collection_rules(&ofm->old_rules)[0] : NULL;
4892 struct rule *new_rule = rule_collection_rules(&ofm->new_rules)[0];
4893 struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies);
4894
4895 replace_rule_finish(ofproto, ofm, req, old_rule, new_rule, &dead_cookies);
4896 learned_cookies_flush(ofproto, &dead_cookies);
4897
4898 if (old_rule) {
4899 ovsrcu_postpone(remove_rule_rcu, old_rule);
4900 } else {
4901 ofmonitor_report(ofproto->connmgr, new_rule, NXFME_ADDED, 0,
4902 req ? req->ofconn : NULL,
4903 req ? req->request->xid : 0, NULL);
4904
4905 /* Send Vacancy Events for OF1.4+. */
4906 send_table_status(ofproto, new_rule->table_id);
4907 }
4908}
4909\f
4910/* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
4911
4912/* Create a new rule. Note that the rule is NOT inserted into a any data
4913 * structures yet. Takes ownership of 'cr'. Only assigns '*new_rule' if
4914 * successful. */
4915static enum ofperr
4916ofproto_rule_create(struct ofproto *ofproto, struct cls_rule *cr,
4917 uint8_t table_id, ovs_be64 new_cookie,
4918 uint16_t idle_timeout, uint16_t hard_timeout,
4919 enum ofputil_flow_mod_flags flags, uint16_t importance,
4920 const struct ofpact *ofpacts, size_t ofpacts_len,
4921 uint64_t match_tlv_bitmap, uint64_t ofpacts_tlv_bitmap,
4922 struct rule **new_rule)
4923 OVS_NO_THREAD_SAFETY_ANALYSIS
4924{
4925 struct rule *rule;
4926 enum ofperr error;
4927
4928 /* Allocate new rule. */
4929 rule = ofproto->ofproto_class->rule_alloc();
4930 if (!rule) {
4931 cls_rule_destroy(cr);
4932 VLOG_WARN_RL(&rl, "%s: failed to allocate a rule.", ofproto->name);
4933 return OFPERR_OFPFMFC_UNKNOWN;
4934 }
4935
4936 /* Initialize base state. */
4937 *CONST_CAST(struct ofproto **, &rule->ofproto) = ofproto;
4938 cls_rule_move(CONST_CAST(struct cls_rule *, &rule->cr), cr);
4939 ovs_refcount_init(&rule->ref_count);
4940
4941 ovs_mutex_init(&rule->mutex);
4942 ovs_mutex_lock(&rule->mutex);
4943 *CONST_CAST(ovs_be64 *, &rule->flow_cookie) = new_cookie;
4944 rule->created = rule->modified = time_msec();
4945 rule->idle_timeout = idle_timeout;
4946 rule->hard_timeout = hard_timeout;
4947 *CONST_CAST(uint16_t *, &rule->importance) = importance;
4948 rule->removed_reason = OVS_OFPRR_NONE;
4949
4950 *CONST_CAST(uint8_t *, &rule->table_id) = table_id;
4951 rule->flags = flags & OFPUTIL_FF_STATE;
4952
4953 *CONST_CAST(const struct rule_actions **, &rule->actions)
4954 = rule_actions_create(ofpacts, ofpacts_len);
4955
4956 ovs_list_init(&rule->meter_list_node);
4957 rule->eviction_group = NULL;
4958 rule->monitor_flags = 0;
4959 rule->add_seqno = 0;
4960 rule->modify_seqno = 0;
4961 ovs_list_init(&rule->expirable);
4962 ovs_mutex_unlock(&rule->mutex);
4963
4964 /* Construct rule, initializing derived state. */
4965 error = ofproto->ofproto_class->rule_construct(rule);
4966 if (error) {
4967 ofproto_rule_destroy__(rule);
4968 return error;
4969 }
4970
4971 rule->state = RULE_INITIALIZED;
4972 rule->match_tlv_bitmap = match_tlv_bitmap;
4973 rule->ofpacts_tlv_bitmap = ofpacts_tlv_bitmap;
4974 mf_vl_mff_ref(&rule->ofproto->vl_mff_map, match_tlv_bitmap);
4975 mf_vl_mff_ref(&rule->ofproto->vl_mff_map, ofpacts_tlv_bitmap);
4976
4977 *new_rule = rule;
4978 return 0;
4979}
4980
4981/* Initialize 'ofm' for a learn action. If the rule already existed, reference
4982 * to that rule is taken, otherwise a new rule is created. 'ofm' keeps the
4983 * rule reference in both. This does not take the global 'ofproto_mutex'. */
4984enum ofperr
4985ofproto_flow_mod_init_for_learn(struct ofproto *ofproto,
4986 const struct ofputil_flow_mod *fm,
4987 struct ofproto_flow_mod *ofm)
4988 OVS_EXCLUDED(ofproto_mutex)
4989{
4990 /* Reject flow mods that do not look like they were generated by a learn
4991 * action. */
4992 if (fm->command != OFPFC_MODIFY_STRICT || fm->table_id == OFPTT_ALL
4993 || fm->flags & OFPUTIL_FF_RESET_COUNTS
4994 || fm->buffer_id != UINT32_MAX) {
4995 return OFPERR_OFPFMFC_UNKNOWN;
4996 }
4997
4998 /* Check if the rule already exists, and we can get a reference to it. */
4999 struct oftable *table = &ofproto->tables[fm->table_id];
5000 struct rule *rule;
5001
5002 rule = rule_from_cls_rule(classifier_find_minimatch_exactly(
5003 &table->cls, &fm->match, fm->priority,
5004 OVS_VERSION_MAX));
5005 if (rule) {
5006 /* Check if the rule's attributes match as well. */
5007 const struct rule_actions *actions;
5008
5009 ovs_mutex_lock(&rule->mutex);
5010 actions = rule_get_actions(rule);
5011 if (rule->idle_timeout == fm->idle_timeout
5012 && rule->hard_timeout == fm->hard_timeout
5013 && rule->importance == fm->importance
5014 && rule->flags == (fm->flags & OFPUTIL_FF_STATE)
5015 && (!fm->modify_cookie || (fm->new_cookie == rule->flow_cookie))
5016 && ofpacts_equal(fm->ofpacts, fm->ofpacts_len,
5017 actions->ofpacts, actions->ofpacts_len)) {
5018 /* Rule already exists and need not change, except for the modified
5019 * timestamp. Get a reference to the existing rule. */
5020 ovs_mutex_unlock(&rule->mutex);
5021 if (!ofproto_rule_try_ref(rule)) {
5022 rule = NULL; /* Pretend it did not exist. */
5023 }
5024 } else {
5025 ovs_mutex_unlock(&rule->mutex);
5026 rule = NULL;
5027 }
5028 }
5029
5030 return ofproto_flow_mod_init(ofproto, ofm, fm, rule);
5031}
5032
5033enum ofperr
5034ofproto_flow_mod_learn_refresh(struct ofproto_flow_mod *ofm)
5035{
5036 enum ofperr error = 0;
5037
5038 /* ofm->temp_rule is our reference to the learned rule. We have a
5039 * reference to an existing rule, if it already was in the classifier,
5040 * otherwise we may have a fresh rule that we need to insert. */
5041 struct rule *rule = ofm->temp_rule;
5042 if (!rule) {
5043 return OFPERR_OFPFMFC_UNKNOWN;
5044 }
5045
5046 /* Create a new rule if the current one has been removed from the
5047 * classifier. We need to do this since RCU does not allow a current rule
5048 * to be reinserted before all threads have quiesced.
5049 *
5050 * It is possible that the rule is removed asynchronously, e.g., right
5051 * after we have read the 'rule->state' below. In this case the next time
5052 * this function is executed the rule will be reinstated. */
5053 if (rule->state == RULE_REMOVED) {
5054 struct cls_rule cr;
5055
5056 cls_rule_clone(&cr, &rule->cr);
5057 ovs_mutex_lock(&rule->mutex);
5058 error = ofproto_rule_create(rule->ofproto, &cr, rule->table_id,
5059 rule->flow_cookie,
5060 rule->idle_timeout,
5061 rule->hard_timeout, rule->flags,
5062 rule->importance,
5063 rule->actions->ofpacts,
5064 rule->actions->ofpacts_len,
5065 rule->match_tlv_bitmap,
5066 rule->ofpacts_tlv_bitmap,
5067 &ofm->temp_rule);
5068 ovs_mutex_unlock(&rule->mutex);
5069 if (!error) {
5070 ofproto_rule_unref(rule); /* Release old reference. */
5071 }
5072 } else {
5073 /* Refresh the existing rule. */
5074 ovs_mutex_lock(&rule->mutex);
5075 rule->modified = time_msec();
5076 ovs_mutex_unlock(&rule->mutex);
5077 }
5078 return error;
5079}
5080
5081enum ofperr
5082ofproto_flow_mod_learn_start(struct ofproto_flow_mod *ofm)
5083 OVS_REQUIRES(ofproto_mutex)
5084{
5085 struct rule *rule = ofm->temp_rule;
5086
5087 /* ofproto_flow_mod_start() consumes the reference, so we
5088 * take a new one. */
5089 ofproto_rule_ref(rule);
5090 enum ofperr error = ofproto_flow_mod_start(rule->ofproto, ofm);
5091 ofm->temp_rule = rule;
5092
5093 return error;
5094}
5095
5096void
5097ofproto_flow_mod_learn_revert(struct ofproto_flow_mod *ofm)
5098 OVS_REQUIRES(ofproto_mutex)
5099{
5100 struct rule *rule = rule_collection_rules(&ofm->new_rules)[0];
5101 ofproto_flow_mod_revert(rule->ofproto, ofm);
5102}
5103
5104void
5105ofproto_flow_mod_learn_finish(struct ofproto_flow_mod *ofm,
5106 struct ofproto *orig_ofproto)
5107 OVS_REQUIRES(ofproto_mutex)
5108{
5109 struct rule *rule = rule_collection_rules(&ofm->new_rules)[0];
5110
5111 /* If learning on a different bridge, must bump its version
5112 * number and flush connmgr afterwards. */
5113 if (rule->ofproto != orig_ofproto) {
5114 ofproto_bump_tables_version(rule->ofproto);
5115 }
5116 ofproto_flow_mod_finish(rule->ofproto, ofm, NULL);
5117 if (rule->ofproto != orig_ofproto) {
5118 ofmonitor_flush(rule->ofproto->connmgr);
5119 }
5120}
5121
5122/* Refresh 'ofm->temp_rule', for which the caller holds a reference, if already
5123 * in the classifier, insert it otherwise. If the rule has already been
5124 * removed from the classifier, a new rule is created using 'ofm->temp_rule' as
5125 * a template and the reference to the old 'ofm->temp_rule' is freed. If
5126 * 'keep_ref' is true, then a reference to the current rule is held, otherwise
5127 * it is released and 'ofm->temp_rule' is set to NULL.
5128 *
5129 * If 'limit' != 0, insertion will fail if there are more than 'limit' rules
5130 * in the same table with the same cookie. If insertion succeeds,
5131 * '*below_limitp' will be set to true. If insertion fails '*below_limitp'
5132 * will be set to false.
5133 *
5134 * Caller needs to be the exclusive owner of 'ofm' as it is being manipulated
5135 * during the call. */
5136enum ofperr
5137ofproto_flow_mod_learn(struct ofproto_flow_mod *ofm, bool keep_ref,
5138 unsigned limit, bool *below_limitp)
5139 OVS_EXCLUDED(ofproto_mutex)
5140{
5141 enum ofperr error = ofproto_flow_mod_learn_refresh(ofm);
5142 struct rule *rule = ofm->temp_rule;
5143 bool below_limit = true;
5144
5145 /* Do we need to insert the rule? */
5146 if (!error && rule->state == RULE_INITIALIZED) {
5147 ovs_mutex_lock(&ofproto_mutex);
5148
5149 if (limit) {
5150 struct rule_criteria criteria;
5151 struct rule_collection rules;
5152 struct minimatch match;
5153
5154 minimatch_init_catchall(&match);
5155 rule_criteria_init(&criteria, rule->table_id, &match, 0,
5156 OVS_VERSION_MAX, rule->flow_cookie,
5157 OVS_BE64_MAX, OFPP_ANY, OFPG_ANY);
5158 minimatch_destroy(&match);
5159
5160 rule_criteria_require_rw(&criteria, false);
5161 collect_rules_loose(rule->ofproto, &criteria, &rules);
5162 if (rule_collection_n(&rules) >= limit) {
5163 below_limit = false;
5164 }
5165 rule_collection_destroy(&rules);
5166 rule_criteria_destroy(&criteria);
5167 }
5168
5169 if (below_limit) {
5170 ofm->version = rule->ofproto->tables_version + 1;
5171
5172 error = ofproto_flow_mod_learn_start(ofm);
5173 if (!error) {
5174 ofproto_flow_mod_learn_finish(ofm, NULL);
5175 }
5176 } else {
5177 static struct vlog_rate_limit rll = VLOG_RATE_LIMIT_INIT(1, 5);
5178 VLOG_INFO_RL(&rll, "Learn limit for flow %"PRIu64" reached.",
5179 rule->flow_cookie);
5180
5181 ofproto_flow_mod_uninit(ofm);
5182 }
5183 ovs_mutex_unlock(&ofproto_mutex);
5184 }
5185
5186 if (!keep_ref && below_limit) {
5187 ofproto_rule_unref(rule);
5188 ofm->temp_rule = NULL;
5189 }
5190 if (below_limitp) {
5191 *below_limitp = below_limit;
5192 }
5193 return error;
5194}
5195
5196static void
5197replace_rule_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm,
5198 struct rule *old_rule, struct rule *new_rule)
5199{
5200 struct oftable *table = &ofproto->tables[new_rule->table_id];
5201
5202 /* 'old_rule' may be either an evicted rule or replaced rule. */
5203 if (old_rule) {
5204 /* Copy values from old rule for modify semantics. */
5205 if (old_rule->removed_reason != OFPRR_EVICTION) {
5206 bool change_cookie = (ofm->modify_cookie
5207 && new_rule->flow_cookie != OVS_BE64_MAX
5208 && new_rule->flow_cookie != old_rule->flow_cookie);
5209
5210 ovs_mutex_lock(&new_rule->mutex);
5211 ovs_mutex_lock(&old_rule->mutex);
5212 if (ofm->command != OFPFC_ADD) {
5213 new_rule->idle_timeout = old_rule->idle_timeout;
5214 new_rule->hard_timeout = old_rule->hard_timeout;
5215 *CONST_CAST(uint16_t *, &new_rule->importance) = old_rule->importance;
5216 new_rule->flags = old_rule->flags;
5217 new_rule->created = old_rule->created;
5218 }
5219 if (!change_cookie) {
5220 *CONST_CAST(ovs_be64 *, &new_rule->flow_cookie)
5221 = old_rule->flow_cookie;
5222 }
5223 ovs_mutex_unlock(&old_rule->mutex);
5224 ovs_mutex_unlock(&new_rule->mutex);
5225 }
5226
5227 /* Mark the old rule for removal in the next version. */
5228 cls_rule_make_invisible_in_version(&old_rule->cr, ofm->version);
5229
5230 /* Remove the old rule from data structures. */
5231 ofproto_rule_remove__(ofproto, old_rule);
5232 } else {
5233 table->n_flows++;
5234 }
5235 /* Insert flow to ofproto data structures, so that later flow_mods may
5236 * relate to it. This is reversible, in case later errors require this to
5237 * be reverted. */
5238 ofproto_rule_insert__(ofproto, new_rule);
5239 /* Make the new rule visible for classifier lookups only from the next
5240 * version. */
5241 classifier_insert(&table->cls, &new_rule->cr, ofm->version, ofm->conjs,
5242 ofm->n_conjs);
5243}
5244
5245static void
5246replace_rule_revert(struct ofproto *ofproto,
5247 struct rule *old_rule, struct rule *new_rule)
5248{
5249 struct oftable *table = &ofproto->tables[new_rule->table_id];
5250
5251 if (old_rule) {
5252 if (old_rule->removed_reason == OFPRR_EVICTION) {
5253 /* Revert the eviction. */
5254 eviction_group_add_rule(old_rule);
5255 }
5256
5257 /* Restore the old rule to data structures. */
5258 ofproto_rule_insert__(ofproto, old_rule);
5259
5260 /* Restore the original visibility of the old rule. */
5261 cls_rule_restore_visibility(&old_rule->cr);
5262 } else {
5263 /* Restore table's rule count. */
5264 table->n_flows--;
5265 }
5266
5267 /* Remove the new rule immediately. It was never visible to lookups. */
5268 classifier_remove_assert(&table->cls, &new_rule->cr);
5269 ofproto_rule_remove__(ofproto, new_rule);
5270 ofproto_rule_unref(new_rule);
5271}
5272
5273/* Adds the 'new_rule', replacing the 'old_rule'. */
5274static void
5275replace_rule_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm,
5276 const struct openflow_mod_requester *req,
5277 struct rule *old_rule, struct rule *new_rule,
5278 struct ovs_list *dead_cookies)
5279 OVS_REQUIRES(ofproto_mutex)
5280{
5281 struct rule *replaced_rule;
5282
5283 replaced_rule = (old_rule && old_rule->removed_reason != OFPRR_EVICTION)
5284 ? old_rule : NULL;
5285
5286 /* Insert the new flow to the ofproto provider. A non-NULL 'replaced_rule'
5287 * is a duplicate rule the 'new_rule' is replacing. The provider should
5288 * link the packet and byte counts from the old rule to the new one if
5289 * 'modify_keep_counts' is 'true'. The 'replaced_rule' will be deleted
5290 * right after this call. */
5291 ofproto->ofproto_class->rule_insert(new_rule, replaced_rule,
5292 ofm->modify_keep_counts);
5293 learned_cookies_inc(ofproto, rule_get_actions(new_rule));
5294
5295 if (old_rule) {
5296 const struct rule_actions *old_actions = rule_get_actions(old_rule);
5297 const struct rule_actions *new_actions = rule_get_actions(new_rule);
5298
5299 learned_cookies_dec(ofproto, old_actions, dead_cookies);
5300
5301 if (replaced_rule) {
5302 enum nx_flow_update_event event = ofm->command == OFPFC_ADD
5303 ? NXFME_ADDED : NXFME_MODIFIED;
5304
5305 bool changed_cookie = (new_rule->flow_cookie
5306 != old_rule->flow_cookie);
5307
5308 bool changed_actions = !ofpacts_equal(new_actions->ofpacts,
5309 new_actions->ofpacts_len,
5310 old_actions->ofpacts,
5311 old_actions->ofpacts_len);
5312
5313 if (event != NXFME_MODIFIED || changed_actions
5314 || changed_cookie) {
5315 ofmonitor_report(ofproto->connmgr, new_rule, event, 0,
5316 req ? req->ofconn : NULL,
5317 req ? req->request->xid : 0,
5318 changed_actions ? old_actions : NULL);
5319 }
5320 } else {
5321 /* XXX: This is slight duplication with delete_flows_finish__() */
5322 ofmonitor_report(ofproto->connmgr, old_rule, NXFME_DELETED,
5323 OFPRR_EVICTION,
5324 req ? req->ofconn : NULL,
5325 req ? req->request->xid : 0, NULL);
5326 }
5327 }
5328}
5329
5330/* ofm->temp_rule is consumed only in the successful case. */
5331static enum ofperr
5332modify_flows_start__(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
5333 OVS_REQUIRES(ofproto_mutex)
5334{
5335 struct rule_collection *old_rules = &ofm->old_rules;
5336 struct rule_collection *new_rules = &ofm->new_rules;
5337 enum ofperr error;
5338
5339 if (rule_collection_n(old_rules) > 0) {
5340 /* Create a new 'modified' rule for each old rule. */
5341 struct rule *old_rule, *new_rule;
5342 const struct rule_actions *actions = rule_get_actions(ofm->temp_rule);
5343
5344 /* Must check actions while holding ofproto_mutex to avoid a race. */
5345 error = ofproto_check_ofpacts(ofproto, actions->ofpacts,
5346 actions->ofpacts_len);
5347 if (error) {
5348 return error;
5349 }
5350
5351 /* Use the temp rule as the first new rule, and as the template for
5352 * the rest. */
5353 struct rule *temp = ofm->temp_rule;
5354 ofm->temp_rule = NULL; /* We consume the template. */
5355
5356 bool first = true;
5357 RULE_COLLECTION_FOR_EACH (old_rule, old_rules) {
5358 if (first) {
5359 /* The template rule's match is possibly a loose one, so it
5360 * must be replaced with the old rule's match so that the new
5361 * rule actually replaces the old one. */
5362 cls_rule_destroy(CONST_CAST(struct cls_rule *, &temp->cr));
5363 cls_rule_clone(CONST_CAST(struct cls_rule *, &temp->cr),
5364 &old_rule->cr);
5365 if (temp->match_tlv_bitmap != old_rule->match_tlv_bitmap) {
5366 mf_vl_mff_unref(&temp->ofproto->vl_mff_map,
5367 temp->match_tlv_bitmap);
5368 temp->match_tlv_bitmap = old_rule->match_tlv_bitmap;
5369 mf_vl_mff_ref(&temp->ofproto->vl_mff_map,
5370 temp->match_tlv_bitmap);
5371 }
5372 *CONST_CAST(uint8_t *, &temp->table_id) = old_rule->table_id;
5373 rule_collection_add(new_rules, temp);
5374 first = false;
5375 } else {
5376 struct cls_rule cr;
5377 cls_rule_clone(&cr, &old_rule->cr);
5378 error = ofproto_rule_create(ofproto, &cr, old_rule->table_id,
5379 temp->flow_cookie,
5380 temp->idle_timeout,
5381 temp->hard_timeout, temp->flags,
5382 temp->importance,
5383 temp->actions->ofpacts,
5384 temp->actions->ofpacts_len,
5385 old_rule->match_tlv_bitmap,
5386 temp->ofpacts_tlv_bitmap,
5387 &new_rule);
5388 if (!error) {
5389 rule_collection_add(new_rules, new_rule);
5390 } else {
5391 /* Return the template rule in place in the error case. */
5392 ofm->temp_rule = temp;
5393 rule_collection_rules(new_rules)[0] = NULL;
5394
5395 rule_collection_unref(new_rules);
5396 rule_collection_destroy(new_rules);
5397 return error;
5398 }
5399 }
5400 }
5401 ovs_assert(rule_collection_n(new_rules)
5402 == rule_collection_n(old_rules));
5403
5404 RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) {
5405 replace_rule_start(ofproto, ofm, old_rule, new_rule);
5406 }
5407 } else if (ofm->modify_may_add_flow) {
5408 /* No match, add a new flow, consumes 'temp'. */
5409 error = add_flow_start(ofproto, ofm);
5410 } else {
5411 /* No flow to modify and may not add a flow. */
5412 ofproto_rule_unref(ofm->temp_rule);
5413 ofm->temp_rule = NULL; /* We consume the template. */
5414 error = 0;
5415 }
5416
5417 return error;
5418}
5419
5420static enum ofperr
5421modify_flows_init_loose(struct ofproto *ofproto,
5422 struct ofproto_flow_mod *ofm,
5423 const struct ofputil_flow_mod *fm)
5424 OVS_EXCLUDED(ofproto_mutex)
5425{
5426 rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, 0,
5427 OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, OFPP_ANY,
5428 OFPG_ANY);
5429 rule_criteria_require_rw(&ofm->criteria,
5430 (fm->flags & OFPUTIL_FF_NO_READONLY) != 0);
5431 /* Must create a new flow in advance for the case that no matches are
5432 * found. Also used for template for multiple modified flows. */
5433 add_flow_init(ofproto, ofm, fm);
5434
5435 return 0;
5436}
5437
5438/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code on
5439 * failure. */
5440static enum ofperr
5441modify_flows_start_loose(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
5442 OVS_REQUIRES(ofproto_mutex)
5443{
5444 struct rule_collection *old_rules = &ofm->old_rules;
5445 enum ofperr error;
5446
5447 error = collect_rules_loose(ofproto, &ofm->criteria, old_rules);
5448
5449 if (!error) {
5450 error = modify_flows_start__(ofproto, ofm);
5451 }
5452
5453 if (error) {
5454 rule_collection_destroy(old_rules);
5455 }
5456
5457 return error;
5458}
5459
5460static void
5461modify_flows_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
5462 OVS_REQUIRES(ofproto_mutex)
5463{
5464 struct rule_collection *old_rules = &ofm->old_rules;
5465 struct rule_collection *new_rules = &ofm->new_rules;
5466
5467 /* Old rules were not changed yet, only need to revert new rules. */
5468 if (rule_collection_n(old_rules) > 0) {
5469 struct rule *old_rule, *new_rule;
5470 RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) {
5471 replace_rule_revert(ofproto, old_rule, new_rule);
5472 }
5473 rule_collection_destroy(new_rules);
5474 rule_collection_destroy(old_rules);
5475 }
5476}
5477
5478static void
5479modify_flows_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm,
5480 const struct openflow_mod_requester *req)
5481 OVS_REQUIRES(ofproto_mutex)
5482{
5483 struct rule_collection *old_rules = &ofm->old_rules;
5484 struct rule_collection *new_rules = &ofm->new_rules;
5485
5486 if (rule_collection_n(old_rules) == 0
5487 && rule_collection_n(new_rules) == 1) {
5488 add_flow_finish(ofproto, ofm, req);
5489 } else if (rule_collection_n(old_rules) > 0) {
5490 struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies);
5491
5492 ovs_assert(rule_collection_n(new_rules)
5493 == rule_collection_n(old_rules));
5494
5495 struct rule *old_rule, *new_rule;
5496 RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) {
5497 replace_rule_finish(ofproto, ofm, req, old_rule, new_rule,
5498 &dead_cookies);
5499 }
5500 learned_cookies_flush(ofproto, &dead_cookies);
5501 remove_rules_postponed(old_rules);
5502 }
5503}
5504
5505static enum ofperr
5506modify_flow_init_strict(struct ofproto *ofproto OVS_UNUSED,
5507 struct ofproto_flow_mod *ofm,
5508 const struct ofputil_flow_mod *fm)
5509 OVS_EXCLUDED(ofproto_mutex)
5510{
5511 rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, fm->priority,
5512 OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, OFPP_ANY,
5513 OFPG_ANY);
5514 rule_criteria_require_rw(&ofm->criteria,
5515 (fm->flags & OFPUTIL_FF_NO_READONLY) != 0);
5516 /* Must create a new flow in advance for the case that no matches are
5517 * found. Also used for template for multiple modified flows. */
5518 add_flow_init(ofproto, ofm, fm);
5519
5520 return 0;
5521}
5522
5523/* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
5524 * code on failure. */
5525static enum ofperr
5526modify_flow_start_strict(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
5527 OVS_REQUIRES(ofproto_mutex)
5528{
5529 struct rule_collection *old_rules = &ofm->old_rules;
5530 enum ofperr error;
5531
5532 error = collect_rules_strict(ofproto, &ofm->criteria, old_rules);
5533
5534 if (!error) {
5535 /* collect_rules_strict() can return max 1 rule. */
5536 error = modify_flows_start__(ofproto, ofm);
5537 }
5538
5539 return error;
5540}
5541\f
5542/* OFPFC_DELETE implementation. */
5543
5544static void
5545delete_flows_start__(struct ofproto *ofproto, ovs_version_t version,
5546 const struct rule_collection *rules)
5547 OVS_REQUIRES(ofproto_mutex)
5548{
5549 struct rule *rule;
5550
5551 RULE_COLLECTION_FOR_EACH (rule, rules) {
5552 struct oftable *table = &ofproto->tables[rule->table_id];
5553
5554 table->n_flows--;
5555 cls_rule_make_invisible_in_version(&rule->cr, version);
5556
5557 /* Remove rule from ofproto data structures. */
5558 ofproto_rule_remove__(ofproto, rule);
5559 }
5560}
5561
5562static void
5563delete_flows_revert__(struct ofproto *ofproto,
5564 const struct rule_collection *rules)
5565 OVS_REQUIRES(ofproto_mutex)
5566{
5567 struct rule *rule;
5568
5569 RULE_COLLECTION_FOR_EACH (rule, rules) {
5570 struct oftable *table = &ofproto->tables[rule->table_id];
5571
5572 /* Add rule back to ofproto data structures. */
5573 ofproto_rule_insert__(ofproto, rule);
5574
5575 /* Restore table's rule count. */
5576 table->n_flows++;
5577
5578 /* Restore the original visibility of the rule. */
5579 cls_rule_restore_visibility(&rule->cr);
5580 }
5581}
5582
5583static void
5584delete_flows_finish__(struct ofproto *ofproto,
5585 struct rule_collection *rules,
5586 enum ofp_flow_removed_reason reason,
5587 const struct openflow_mod_requester *req)
5588 OVS_REQUIRES(ofproto_mutex)
5589{
5590 if (rule_collection_n(rules)) {
5591 struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies);
5592 struct rule *rule;
5593
5594 RULE_COLLECTION_FOR_EACH (rule, rules) {
5595 /* This value will be used to send the flow removed message right
5596 * before the rule is actually destroyed. */
5597 rule->removed_reason = reason;
5598
5599 ofmonitor_report(ofproto->connmgr, rule, NXFME_DELETED, reason,
5600 req ? req->ofconn : NULL,
5601 req ? req->request->xid : 0, NULL);
5602
5603 /* Send Vacancy Event for OF1.4+. */
5604 send_table_status(ofproto, rule->table_id);
5605
5606 learned_cookies_dec(ofproto, rule_get_actions(rule),
5607 &dead_cookies);
5608 }
5609 remove_rules_postponed(rules);
5610
5611 learned_cookies_flush(ofproto, &dead_cookies);
5612 }
5613}
5614
5615/* Deletes the rules listed in 'rules'.
5616 * The deleted rules will become invisible to the lookups in the next version.
5617 * Destroys 'rules'. */
5618static void
5619delete_flows__(struct rule_collection *rules,
5620 enum ofp_flow_removed_reason reason,
5621 const struct openflow_mod_requester *req)
5622 OVS_REQUIRES(ofproto_mutex)
5623{
5624 if (rule_collection_n(rules)) {
5625 struct ofproto *ofproto = rule_collection_rules(rules)[0]->ofproto;
5626
5627 delete_flows_start__(ofproto, ofproto->tables_version + 1, rules);
5628 ofproto_bump_tables_version(ofproto);
5629 delete_flows_finish__(ofproto, rules, reason, req);
5630 ofmonitor_flush(ofproto->connmgr);
5631 }
5632}
5633
5634static enum ofperr
5635delete_flows_init_loose(struct ofproto *ofproto OVS_UNUSED,
5636 struct ofproto_flow_mod *ofm,
5637 const struct ofputil_flow_mod *fm)
5638 OVS_EXCLUDED(ofproto_mutex)
5639{
5640 rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, 0,
5641 OVS_VERSION_MAX, fm->cookie, fm->cookie_mask,
5642 fm->out_port, fm->out_group);
5643 rule_criteria_require_rw(&ofm->criteria,
5644 (fm->flags & OFPUTIL_FF_NO_READONLY) != 0);
5645 return 0;
5646}
5647
5648/* Implements OFPFC_DELETE. */
5649static enum ofperr
5650delete_flows_start_loose(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
5651 OVS_REQUIRES(ofproto_mutex)
5652{
5653 struct rule_collection *rules = &ofm->old_rules;
5654 enum ofperr error;
5655
5656 error = collect_rules_loose(ofproto, &ofm->criteria, rules);
5657
5658 if (!error) {
5659 delete_flows_start__(ofproto, ofm->version, rules);
5660 }
5661
5662 return error;
5663}
5664
5665static void
5666delete_flows_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
5667 OVS_REQUIRES(ofproto_mutex)
5668{
5669 delete_flows_revert__(ofproto, &ofm->old_rules);
5670}
5671
5672static void
5673delete_flows_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm,
5674 const struct openflow_mod_requester *req)
5675 OVS_REQUIRES(ofproto_mutex)
5676{
5677 delete_flows_finish__(ofproto, &ofm->old_rules, OFPRR_DELETE, req);
5678}
5679
5680static enum ofperr
5681delete_flows_init_strict(struct ofproto *ofproto OVS_UNUSED,
5682 struct ofproto_flow_mod *ofm,
5683 const struct ofputil_flow_mod *fm)
5684 OVS_EXCLUDED(ofproto_mutex)
5685{
5686 rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, fm->priority,
5687 OVS_VERSION_MAX, fm->cookie, fm->cookie_mask,
5688 fm->out_port, fm->out_group);
5689 rule_criteria_require_rw(&ofm->criteria,
5690 (fm->flags & OFPUTIL_FF_NO_READONLY) != 0);
5691 return 0;
5692}
5693
5694/* Implements OFPFC_DELETE_STRICT. */
5695static enum ofperr
5696delete_flow_start_strict(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
5697 OVS_REQUIRES(ofproto_mutex)
5698{
5699 struct rule_collection *rules = &ofm->old_rules;
5700 enum ofperr error;
5701
5702 error = collect_rules_strict(ofproto, &ofm->criteria, rules);
5703
5704 if (!error) {
5705 delete_flows_start__(ofproto, ofm->version, rules);
5706 }
5707
5708 return error;
5709}
5710
5711/* This may only be called by rule_destroy_cb()! */
5712static void
5713ofproto_rule_send_removed(struct rule *rule)
5714 OVS_EXCLUDED(ofproto_mutex)
5715{
5716 struct ofputil_flow_removed fr;
5717 long long int used;
5718
5719 minimatch_expand(&rule->cr.match, &fr.match);
5720 fr.priority = rule->cr.priority;
5721
5722 /* Synchronize with connmgr_destroy() calls to prevent connmgr disappearing
5723 * while we use it. */
5724 ovs_mutex_lock(&ofproto_mutex);
5725 struct connmgr *connmgr = rule->ofproto->connmgr;
5726 if (!connmgr) {
5727 ovs_mutex_unlock(&ofproto_mutex);
5728 return;
5729 }
5730
5731 fr.cookie = rule->flow_cookie;
5732 fr.reason = rule->removed_reason;
5733 fr.table_id = rule->table_id;
5734 calc_duration(rule->created, time_msec(),
5735 &fr.duration_sec, &fr.duration_nsec);
5736 ovs_mutex_lock(&rule->mutex);
5737 fr.idle_timeout = rule->idle_timeout;
5738 fr.hard_timeout = rule->hard_timeout;
5739 ovs_mutex_unlock(&rule->mutex);
5740 rule->ofproto->ofproto_class->rule_get_stats(rule, &fr.packet_count,
5741 &fr.byte_count, &used);
5742 connmgr_send_flow_removed(connmgr, &fr);
5743 ovs_mutex_unlock(&ofproto_mutex);
5744}
5745
5746/* Sends an OpenFlow "flow removed" message with the given 'reason' (either
5747 * OFPRR_HARD_TIMEOUT or OFPRR_IDLE_TIMEOUT), and then removes 'rule' from its
5748 * ofproto.
5749 *
5750 * ofproto implementation ->run() functions should use this function to expire
5751 * OpenFlow flows. */
5752void
5753ofproto_rule_expire(struct rule *rule, uint8_t reason)
5754 OVS_REQUIRES(ofproto_mutex)
5755{
5756 struct rule_collection rules;
5757
5758 rule_collection_init(&rules);
5759 rule_collection_add(&rules, rule);
5760 delete_flows__(&rules, reason, NULL);
5761}
5762
5763/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
5764 * means "infinite". */
5765static void
5766reduce_timeout(uint16_t max, uint16_t *timeout)
5767{
5768 if (max && (!*timeout || *timeout > max)) {
5769 *timeout = max;
5770 }
5771}
5772
5773/* If 'idle_timeout' is nonzero, and 'rule' has no idle timeout or an idle
5774 * timeout greater than 'idle_timeout', lowers 'rule''s idle timeout to
5775 * 'idle_timeout' seconds. Similarly for 'hard_timeout'.
5776 *
5777 * Suitable for implementing OFPACT_FIN_TIMEOUT. */
5778void
5779ofproto_rule_reduce_timeouts__(struct rule *rule,
5780 uint16_t idle_timeout, uint16_t hard_timeout)
5781 OVS_REQUIRES(ofproto_mutex)
5782 OVS_EXCLUDED(rule->mutex)
5783{
5784 if (!idle_timeout && !hard_timeout) {
5785 return;
5786 }
5787
5788 if (ovs_list_is_empty(&rule->expirable)) {
5789 ovs_list_insert(&rule->ofproto->expirable, &rule->expirable);
5790 }
5791
5792 ovs_mutex_lock(&rule->mutex);
5793 reduce_timeout(idle_timeout, &rule->idle_timeout);
5794 reduce_timeout(hard_timeout, &rule->hard_timeout);
5795 ovs_mutex_unlock(&rule->mutex);
5796}
5797
5798void
5799ofproto_rule_reduce_timeouts(struct rule *rule,
5800 uint16_t idle_timeout, uint16_t hard_timeout)
5801 OVS_EXCLUDED(ofproto_mutex, rule->mutex)
5802{
5803 if (!idle_timeout && !hard_timeout) {
5804 return;
5805 }
5806
5807 ovs_mutex_lock(&ofproto_mutex);
5808 if (ovs_list_is_empty(&rule->expirable)) {
5809 ovs_list_insert(&rule->ofproto->expirable, &rule->expirable);
5810 }
5811 ovs_mutex_unlock(&ofproto_mutex);
5812
5813 ovs_mutex_lock(&rule->mutex);
5814 reduce_timeout(idle_timeout, &rule->idle_timeout);
5815 reduce_timeout(hard_timeout, &rule->hard_timeout);
5816 ovs_mutex_unlock(&rule->mutex);
5817}
5818\f
5819static enum ofperr
5820handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh)
5821 OVS_EXCLUDED(ofproto_mutex)
5822{
5823 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
5824 struct ofputil_flow_mod fm;
5825 uint64_t ofpacts_stub[1024 / 8];
5826 struct ofpbuf ofpacts;
5827 enum ofperr error;
5828
5829 error = reject_slave_controller(ofconn);
5830 if (error) {
5831 return error;
5832 }
5833
5834 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
5835 error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_protocol(ofconn),
5836 ofproto_get_tun_tab(ofproto),
5837 &ofproto->vl_mff_map, &ofpacts,
5838 u16_to_ofp(ofproto->max_ports),
5839 ofproto->n_tables);
5840 if (!error) {
5841 struct openflow_mod_requester req = { ofconn, oh };
5842 error = handle_flow_mod__(ofproto, &fm, &req);
5843 minimatch_destroy(&fm.match);
5844 }
5845
5846 ofpbuf_uninit(&ofpacts);
5847 return error;
5848}
5849
5850static enum ofperr
5851handle_flow_mod__(struct ofproto *ofproto, const struct ofputil_flow_mod *fm,
5852 const struct openflow_mod_requester *req)
5853 OVS_EXCLUDED(ofproto_mutex)
5854{
5855 struct ofproto_flow_mod ofm;
5856 enum ofperr error;
5857
5858 error = ofproto_flow_mod_init(ofproto, &ofm, fm, NULL);
5859 if (error) {
5860 return error;
5861 }
5862
5863 ovs_mutex_lock(&ofproto_mutex);
5864 ofm.version = ofproto->tables_version + 1;
5865 error = ofproto_flow_mod_start(ofproto, &ofm);
5866 if (!error) {
5867 ofproto_bump_tables_version(ofproto);
5868 ofproto_flow_mod_finish(ofproto, &ofm, req);
5869 ofmonitor_flush(ofproto->connmgr);
5870 }
5871 ovs_mutex_unlock(&ofproto_mutex);
5872
5873 return error;
5874}
5875
5876static enum ofperr
5877handle_role_request(struct ofconn *ofconn, const struct ofp_header *oh)
5878{
5879 struct ofputil_role_request request;
5880 struct ofputil_role_request reply;
5881 struct ofpbuf *buf;
5882 enum ofperr error;
5883
5884 error = ofputil_decode_role_message(oh, &request);
5885 if (error) {
5886 return error;
5887 }
5888
5889 if (request.role != OFPCR12_ROLE_NOCHANGE) {
5890 if (request.role != OFPCR12_ROLE_EQUAL
5891 && request.have_generation_id
5892 && !ofconn_set_master_election_id(ofconn, request.generation_id)) {
5893 return OFPERR_OFPRRFC_STALE;
5894 }
5895
5896 ofconn_set_role(ofconn, request.role);
5897 }
5898
5899 reply.role = ofconn_get_role(ofconn);
5900 reply.have_generation_id = ofconn_get_master_election_id(
5901 ofconn, &reply.generation_id);
5902 buf = ofputil_encode_role_reply(oh, &reply);
5903 ofconn_send_reply(ofconn, buf);
5904
5905 return 0;
5906}
5907
5908static enum ofperr
5909handle_nxt_flow_mod_table_id(struct ofconn *ofconn,
5910 const struct ofp_header *oh)
5911{
5912 bool enable = ofputil_decode_nx_flow_mod_table_id(oh);
5913 enum ofputil_protocol cur = ofconn_get_protocol(ofconn);
5914 ofconn_set_protocol(ofconn, ofputil_protocol_set_tid(cur, enable));
5915
5916 return 0;
5917}
5918
5919static enum ofperr
5920handle_nxt_set_flow_format(struct ofconn *ofconn, const struct ofp_header *oh)
5921{
5922 enum ofputil_protocol next_base = ofputil_decode_nx_set_flow_format(oh);
5923 if (!next_base) {
5924 return OFPERR_OFPBRC_EPERM;
5925 }
5926
5927 enum ofputil_protocol cur = ofconn_get_protocol(ofconn);
5928 ofconn_set_protocol(ofconn, ofputil_protocol_set_base(cur, next_base));
5929
5930 return 0;
5931}
5932
5933static enum ofperr
5934handle_nxt_set_packet_in_format(struct ofconn *ofconn,
5935 const struct ofp_header *oh)
5936{
5937 enum ofputil_packet_in_format format;
5938 enum ofperr error = ofputil_decode_set_packet_in_format(oh, &format);
5939 if (!error) {
5940 ofconn_set_packet_in_format(ofconn, format);
5941 }
5942 return error;
5943}
5944
5945static enum ofperr
5946handle_nxt_set_async_config(struct ofconn *ofconn, const struct ofp_header *oh)
5947{
5948 struct ofputil_async_cfg basis = ofconn_get_async_config(ofconn);
5949 struct ofputil_async_cfg ac;
5950 enum ofperr error;
5951
5952 error = ofputil_decode_set_async_config(oh, false, &basis, &ac);
5953 if (error) {
5954 return error;
5955 }
5956
5957 ofconn_set_async_config(ofconn, &ac);
5958 if (ofconn_get_type(ofconn) == OFCONN_SERVICE &&
5959 !ofconn_get_miss_send_len(ofconn)) {
5960 ofconn_set_miss_send_len(ofconn, OFP_DEFAULT_MISS_SEND_LEN);
5961 }
5962
5963 return 0;
5964}
5965
5966static enum ofperr
5967handle_nxt_get_async_request(struct ofconn *ofconn, const struct ofp_header *oh)
5968{
5969 struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
5970 ofconn_send_reply(ofconn, ofputil_encode_get_async_reply(oh, &ac));
5971
5972 return 0;
5973}
5974
5975static enum ofperr
5976handle_nxt_set_controller_id(struct ofconn *ofconn,
5977 const struct ofp_header *oh)
5978{
5979 const struct nx_controller_id *nci = ofpmsg_body(oh);
5980
5981 if (!is_all_zeros(nci->zero, sizeof nci->zero)) {
5982 return OFPERR_NXBRC_MUST_BE_ZERO;
5983 }
5984
5985 ofconn_set_controller_id(ofconn, ntohs(nci->controller_id));
5986 return 0;
5987}
5988
5989static enum ofperr
5990handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh)
5991{
5992 struct ofpbuf *buf;
5993
5994 buf = ofpraw_alloc_reply((oh->version == OFP10_VERSION
5995 ? OFPRAW_OFPT10_BARRIER_REPLY
5996 : OFPRAW_OFPT11_BARRIER_REPLY), oh, 0);
5997 ofconn_send_reply(ofconn, buf);
5998 return 0;
5999}
6000
6001static void
6002ofproto_compose_flow_refresh_update(const struct rule *rule,
6003 enum nx_flow_monitor_flags flags,
6004 struct ovs_list *msgs,
6005 const struct tun_table *tun_table)
6006 OVS_REQUIRES(ofproto_mutex)
6007{
6008 const struct rule_actions *actions;
6009 struct ofputil_flow_update fu;
6010
6011 fu.event = (flags & (NXFMF_INITIAL | NXFMF_ADD)
6012 ? NXFME_ADDED : NXFME_MODIFIED);
6013 fu.reason = 0;
6014 ovs_mutex_lock(&rule->mutex);
6015 fu.idle_timeout = rule->idle_timeout;
6016 fu.hard_timeout = rule->hard_timeout;
6017 ovs_mutex_unlock(&rule->mutex);
6018 fu.table_id = rule->table_id;
6019 fu.cookie = rule->flow_cookie;
6020 minimatch_expand(&rule->cr.match, &fu.match);
6021 fu.priority = rule->cr.priority;
6022
6023 actions = flags & NXFMF_ACTIONS ? rule_get_actions(rule) : NULL;
6024 fu.ofpacts = actions ? actions->ofpacts : NULL;
6025 fu.ofpacts_len = actions ? actions->ofpacts_len : 0;
6026
6027 if (ovs_list_is_empty(msgs)) {
6028 ofputil_start_flow_update(msgs);
6029 }
6030 ofputil_append_flow_update(&fu, msgs, tun_table);
6031}
6032
6033void
6034ofmonitor_compose_refresh_updates(struct rule_collection *rules,
6035 struct ovs_list *msgs)
6036 OVS_REQUIRES(ofproto_mutex)
6037{
6038 struct rule *rule;
6039
6040 RULE_COLLECTION_FOR_EACH (rule, rules) {
6041 enum nx_flow_monitor_flags flags = rule->monitor_flags;
6042 rule->monitor_flags = 0;
6043
6044 ofproto_compose_flow_refresh_update(rule, flags, msgs,
6045 ofproto_get_tun_tab(rule->ofproto));
6046 }
6047}
6048
6049static void
6050ofproto_collect_ofmonitor_refresh_rule(const struct ofmonitor *m,
6051 struct rule *rule, uint64_t seqno,
6052 struct rule_collection *rules)
6053 OVS_REQUIRES(ofproto_mutex)
6054{
6055 enum nx_flow_monitor_flags update;
6056
6057 if (rule_is_hidden(rule)) {
6058 return;
6059 }
6060
6061 if (!ofproto_rule_has_out_port(rule, m->out_port)) {
6062 return;
6063 }
6064
6065 if (seqno) {
6066 if (rule->add_seqno > seqno) {
6067 update = NXFMF_ADD | NXFMF_MODIFY;
6068 } else if (rule->modify_seqno > seqno) {
6069 update = NXFMF_MODIFY;
6070 } else {
6071 return;
6072 }
6073
6074 if (!(m->flags & update)) {
6075 return;
6076 }
6077 } else {
6078 update = NXFMF_INITIAL;
6079 }
6080
6081 if (!rule->monitor_flags) {
6082 rule_collection_add(rules, rule);
6083 }
6084 rule->monitor_flags |= update | (m->flags & NXFMF_ACTIONS);
6085}
6086
6087static void
6088ofproto_collect_ofmonitor_refresh_rules(const struct ofmonitor *m,
6089 uint64_t seqno,
6090 struct rule_collection *rules)
6091 OVS_REQUIRES(ofproto_mutex)
6092{
6093 const struct ofproto *ofproto = ofconn_get_ofproto(m->ofconn);
6094 const struct oftable *table;
6095 struct cls_rule target;
6096
6097 cls_rule_init_from_minimatch(&target, &m->match, 0);
6098 FOR_EACH_MATCHING_TABLE (table, m->table_id, ofproto) {
6099 struct rule *rule;
6100
6101 CLS_FOR_EACH_TARGET (rule, cr, &table->cls, &target, OVS_VERSION_MAX) {
6102 ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
6103 }
6104 }
6105 cls_rule_destroy(&target);
6106}
6107
6108static void
6109ofproto_collect_ofmonitor_initial_rules(struct ofmonitor *m,
6110 struct rule_collection *rules)
6111 OVS_REQUIRES(ofproto_mutex)
6112{
6113 if (m->flags & NXFMF_INITIAL) {
6114 ofproto_collect_ofmonitor_refresh_rules(m, 0, rules);
6115 }
6116}
6117
6118void
6119ofmonitor_collect_resume_rules(struct ofmonitor *m,
6120 uint64_t seqno, struct rule_collection *rules)
6121 OVS_REQUIRES(ofproto_mutex)
6122{
6123 ofproto_collect_ofmonitor_refresh_rules(m, seqno, rules);
6124}
6125
6126static enum ofperr
6127flow_monitor_delete(struct ofconn *ofconn, uint32_t id)
6128 OVS_REQUIRES(ofproto_mutex)
6129{
6130 struct ofmonitor *m;
6131 enum ofperr error;
6132
6133 m = ofmonitor_lookup(ofconn, id);
6134 if (m) {
6135 ofmonitor_destroy(m);
6136 error = 0;
6137 } else {
6138 error = OFPERR_OFPMOFC_UNKNOWN_MONITOR;
6139 }
6140
6141 return error;
6142}
6143
6144static enum ofperr
6145handle_flow_monitor_request(struct ofconn *ofconn, const struct ofp_header *oh)
6146 OVS_EXCLUDED(ofproto_mutex)
6147{
6148 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
6149
6150 struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length));
6151
6152 struct ofmonitor **monitors = NULL;
6153 size_t allocated_monitors = 0;
6154 size_t n_monitors = 0;
6155
6156 enum ofperr error;
6157
6158 ovs_mutex_lock(&ofproto_mutex);
6159 for (;;) {
6160 struct ofputil_flow_monitor_request request;
6161 struct ofmonitor *m;
6162 int retval;
6163
6164 retval = ofputil_decode_flow_monitor_request(&request, &b);
6165 if (retval == EOF) {
6166 break;
6167 } else if (retval) {
6168 error = retval;
6169 goto error;
6170 }
6171
6172 if (request.table_id != 0xff
6173 && request.table_id >= ofproto->n_tables) {
6174 error = OFPERR_OFPBRC_BAD_TABLE_ID;
6175 goto error;
6176 }
6177
6178 error = ofmonitor_create(&request, ofconn, &m);
6179 if (error) {
6180 goto error;
6181 }
6182
6183 if (n_monitors >= allocated_monitors) {
6184 monitors = x2nrealloc(monitors, &allocated_monitors,
6185 sizeof *monitors);
6186 }
6187 monitors[n_monitors++] = m;
6188 }
6189
6190 struct rule_collection rules;
6191 rule_collection_init(&rules);
6192 for (size_t i = 0; i < n_monitors; i++) {
6193 ofproto_collect_ofmonitor_initial_rules(monitors[i], &rules);
6194 }
6195
6196 struct ovs_list replies;
6197 ofpmp_init(&replies, oh);
6198 ofmonitor_compose_refresh_updates(&rules, &replies);
6199 ovs_mutex_unlock(&ofproto_mutex);
6200
6201 rule_collection_destroy(&rules);
6202
6203 ofconn_send_replies(ofconn, &replies);
6204 free(monitors);
6205
6206 return 0;
6207
6208error:
6209 for (size_t i = 0; i < n_monitors; i++) {
6210 ofmonitor_destroy(monitors[i]);
6211 }
6212 free(monitors);
6213 ovs_mutex_unlock(&ofproto_mutex);
6214
6215 return error;
6216}
6217
6218static enum ofperr
6219handle_flow_monitor_cancel(struct ofconn *ofconn, const struct ofp_header *oh)
6220 OVS_EXCLUDED(ofproto_mutex)
6221{
6222 enum ofperr error;
6223 uint32_t id;
6224
6225 id = ofputil_decode_flow_monitor_cancel(oh);
6226
6227 ovs_mutex_lock(&ofproto_mutex);
6228 error = flow_monitor_delete(ofconn, id);
6229 ovs_mutex_unlock(&ofproto_mutex);
6230
6231 return error;
6232}
6233
6234/* Meters implementation.
6235 *
6236 * Meter table entry, indexed by the OpenFlow meter_id.
6237 * 'created' is used to compute the duration for meter stats.
6238 * 'list rules' is needed so that we can delete the dependent rules when the
6239 * meter table entry is deleted.
6240 * 'provider_meter_id' is for the provider's private use.
6241 */
6242struct meter {
6243 struct hmap_node node; /* In ofproto->meters. */
6244 long long int created; /* Time created. */
6245 struct ovs_list rules; /* List of "struct rule_dpif"s. */
6246 uint32_t id; /* OpenFlow meter_id. */
6247 ofproto_meter_id provider_meter_id;
6248 uint16_t flags; /* Meter flags. */
6249 uint16_t n_bands; /* Number of meter bands. */
6250 struct ofputil_meter_band *bands;
6251};
6252
6253static struct meter *
6254ofproto_get_meter(const struct ofproto *ofproto, uint32_t meter_id)
6255{
6256 struct meter *meter;
6257 uint32_t hash = hash_int(meter_id, 0);
6258
6259 HMAP_FOR_EACH_WITH_HASH (meter, node, hash, &ofproto->meters) {
6260 if (meter->id == meter_id) {
6261 return meter;
6262 }
6263 }
6264
6265 return NULL;
6266}
6267
6268static uint32_t *
6269ofproto_upcall_meter_ptr(struct ofproto *ofproto, uint32_t meter_id)
6270{
6271 switch(meter_id) {
6272 case OFPM13_SLOWPATH:
6273 return &ofproto->slowpath_meter_id;
6274 break;
6275 case OFPM13_CONTROLLER:
6276 return &ofproto->controller_meter_id;
6277 break;
6278 case OFPM13_ALL:
6279 OVS_NOT_REACHED();
6280 default:
6281 return NULL;
6282 }
6283}
6284
6285static void
6286ofproto_add_meter(struct ofproto *ofproto, struct meter *meter)
6287{
6288 uint32_t *upcall_meter_ptr = ofproto_upcall_meter_ptr(ofproto, meter->id);
6289
6290 /* Cache datapath meter IDs of special meters. */
6291 if (upcall_meter_ptr) {
6292 *upcall_meter_ptr = meter->provider_meter_id.uint32;
6293 }
6294
6295 hmap_insert(&ofproto->meters, &meter->node, hash_int(meter->id, 0));
6296}
6297
6298/*
6299 * This is used in instruction validation at flow set-up time, to map
6300 * the OpenFlow meter ID to the corresponding datapath provider meter
6301 * ID. If either does not exist, returns false. Otherwise updates
6302 * the meter action and returns true.
6303 */
6304static bool
6305ofproto_fix_meter_action(const struct ofproto *ofproto,
6306 struct ofpact_meter *ma)
6307{
6308 if (ma->meter_id) {
6309 const struct meter *meter = ofproto_get_meter(ofproto, ma->meter_id);
6310
6311 if (meter && meter->provider_meter_id.uint32 != UINT32_MAX) {
6312 /* Update the action with the provider's meter ID, so that we
6313 * do not need any synchronization between ofproto_dpif_xlate
6314 * and ofproto for meter table access. */
6315 ma->provider_meter_id = meter->provider_meter_id.uint32;
6316 return true;
6317 }
6318 }
6319 return false;
6320}
6321
6322/* This is used in instruction validation at flow set-up time, to map
6323 * the OpenFlow meter ID in a controller action to the corresponding
6324 * datapath provider meter ID. If either does not exist, sets the
6325 * provider meter id to a value to prevent the provider from using it
6326 * and returns false. Otherwise, updates the meter action and returns
6327 * true. */
6328static bool
6329ofproto_fix_controller_action(const struct ofproto *ofproto,
6330 struct ofpact_controller *ca)
6331{
6332 if (ca->meter_id == NX_CTLR_NO_METER) {
6333 ca->provider_meter_id = UINT32_MAX;
6334 return true;
6335 }
6336
6337 const struct meter *meter = ofproto_get_meter(ofproto, ca->meter_id);
6338
6339 if (meter && meter->provider_meter_id.uint32 != UINT32_MAX) {
6340 /* Update the action with the provider's meter ID, so that we
6341 * do not need any synchronization between ofproto_dpif_xlate
6342 * and ofproto for meter table access. */
6343 ca->provider_meter_id = meter->provider_meter_id.uint32;
6344 return true;
6345 }
6346
6347 /* Prevent the meter from being set by the ofproto provider. */
6348 ca->provider_meter_id = UINT32_MAX;
6349 return false;
6350}
6351
6352/* Finds the meter invoked by 'rule''s actions and adds 'rule' to the meter's
6353 * list of rules. */
6354static void
6355meter_insert_rule(struct rule *rule)
6356{
6357 const struct rule_actions *a = rule_get_actions(rule);
6358 uint32_t meter_id = ofpacts_get_meter(a->ofpacts, a->ofpacts_len);
6359 struct meter *meter = ofproto_get_meter(rule->ofproto, meter_id);
6360
6361 ovs_list_insert(&meter->rules, &rule->meter_list_node);
6362}
6363
6364static void
6365meter_update(struct meter *meter, const struct ofputil_meter_config *config)
6366{
6367 free(meter->bands);
6368
6369 meter->flags = config->flags;
6370 meter->n_bands = config->n_bands;
6371 meter->bands = xmemdup(config->bands,
6372 config->n_bands * sizeof *meter->bands);
6373}
6374
6375static struct meter *
6376meter_create(const struct ofputil_meter_config *config,
6377 ofproto_meter_id provider_meter_id)
6378{
6379 struct meter *meter;
6380
6381 meter = xzalloc(sizeof *meter);
6382 meter->provider_meter_id = provider_meter_id;
6383 meter->created = time_msec();
6384 meter->id = config->meter_id;
6385 ovs_list_init(&meter->rules);
6386
6387 meter_update(meter, config);
6388
6389 return meter;
6390}
6391
6392static void
6393meter_destroy(struct ofproto *ofproto, struct meter *meter)
6394 OVS_REQUIRES(ofproto_mutex)
6395{
6396 uint32_t *upcall_meter_ptr;
6397 upcall_meter_ptr = ofproto_upcall_meter_ptr(ofproto, meter->id);
6398 if (upcall_meter_ptr) {
6399 *upcall_meter_ptr = UINT32_MAX;
6400 }
6401
6402 if (!ovs_list_is_empty(&meter->rules)) {
6403 struct rule_collection rules;
6404 struct rule *rule;
6405
6406 rule_collection_init(&rules);
6407 LIST_FOR_EACH (rule, meter_list_node, &meter->rules) {
6408 rule_collection_add(&rules, rule);
6409 }
6410 delete_flows__(&rules, OFPRR_METER_DELETE, NULL);
6411 }
6412
6413 ofproto->ofproto_class->meter_del(ofproto, meter->provider_meter_id);
6414 free(meter->bands);
6415 free(meter);
6416}
6417
6418static void
6419meter_delete(struct ofproto *ofproto, uint32_t meter_id)
6420 OVS_REQUIRES(ofproto_mutex)
6421{
6422 struct meter *meter = ofproto_get_meter(ofproto, meter_id);
6423
6424 if (meter) {
6425 hmap_remove(&ofproto->meters, &meter->node);
6426 meter_destroy(ofproto, meter);
6427 }
6428}
6429
6430static void
6431meter_delete_all(struct ofproto *ofproto)
6432 OVS_REQUIRES(ofproto_mutex)
6433{
6434 struct meter *meter, *next;
6435
6436 HMAP_FOR_EACH_SAFE (meter, next, node, &ofproto->meters) {
6437 hmap_remove(&ofproto->meters, &meter->node);
6438 meter_destroy(ofproto, meter);
6439 }
6440}
6441
6442static enum ofperr
6443handle_add_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm)
6444{
6445 ofproto_meter_id provider_meter_id = { UINT32_MAX };
6446 struct meter *meter = ofproto_get_meter(ofproto, mm->meter.meter_id);
6447 enum ofperr error;
6448
6449 if (meter) {
6450 return OFPERR_OFPMMFC_METER_EXISTS;
6451 }
6452
6453 error = ofproto->ofproto_class->meter_set(ofproto, &provider_meter_id,
6454 &mm->meter);
6455 if (!error) {
6456 ovs_assert(provider_meter_id.uint32 != UINT32_MAX);
6457 meter = meter_create(&mm->meter, provider_meter_id);
6458 ofproto_add_meter(ofproto, meter);
6459 }
6460 return error;
6461}
6462
6463static enum ofperr
6464handle_modify_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm)
6465{
6466 struct meter *meter = ofproto_get_meter(ofproto, mm->meter.meter_id);
6467 enum ofperr error;
6468 uint32_t provider_meter_id;
6469
6470 if (!meter) {
6471 return OFPERR_OFPMMFC_UNKNOWN_METER;
6472 }
6473
6474 provider_meter_id = meter->provider_meter_id.uint32;
6475 error = ofproto->ofproto_class->meter_set(ofproto,
6476 &meter->provider_meter_id,
6477 &mm->meter);
6478 ovs_assert(meter->provider_meter_id.uint32 == provider_meter_id);
6479 if (!error) {
6480 meter_update(meter, &mm->meter);
6481 }
6482 return error;
6483}
6484
6485static enum ofperr
6486handle_delete_meter(struct ofconn *ofconn, struct ofputil_meter_mod *mm)
6487 OVS_EXCLUDED(ofproto_mutex)
6488{
6489 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
6490 uint32_t meter_id = mm->meter.meter_id;
6491
6492 /* OpenFlow does not support Meter ID 0. */
6493 if (meter_id) {
6494 ovs_mutex_lock(&ofproto_mutex);
6495
6496 if (meter_id == OFPM13_ALL) {
6497 meter_delete_all(ofproto);
6498 } else {
6499 meter_delete(ofproto, meter_id);
6500 }
6501
6502 ovs_mutex_unlock(&ofproto_mutex);
6503 }
6504
6505 return 0;
6506}
6507
6508static enum ofperr
6509handle_meter_mod(struct ofconn *ofconn, const struct ofp_header *oh)
6510{
6511 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
6512 struct ofputil_meter_mod mm;
6513 uint64_t bands_stub[256 / 8];
6514 struct ofpbuf bands;
6515 uint32_t meter_id;
6516 enum ofperr error;
6517
6518 error = reject_slave_controller(ofconn);
6519 if (error) {
6520 return error;
6521 }
6522
6523 ofpbuf_use_stub(&bands, bands_stub, sizeof bands_stub);
6524
6525 error = ofputil_decode_meter_mod(oh, &mm, &bands);
6526 if (error) {
6527 goto exit_free_bands;
6528 }
6529
6530 meter_id = mm.meter.meter_id;
6531
6532 if (mm.command != OFPMC13_DELETE) {
6533 /* Fails also when meters are not implemented by the provider. */
6534 if (ofproto->meter_features.max_meters == 0) {
6535 error = OFPERR_OFPMMFC_INVALID_METER;
6536 goto exit_free_bands;
6537 }
6538
6539 if (meter_id == 0) {
6540 error = OFPERR_OFPMMFC_INVALID_METER;
6541 goto exit_free_bands;
6542 } else if (meter_id > OFPM13_MAX) {
6543 switch(meter_id) {
6544 case OFPM13_SLOWPATH:
6545 case OFPM13_CONTROLLER:
6546 break;
6547 case OFPM13_ALL:
6548 default:
6549 error = OFPERR_OFPMMFC_INVALID_METER;
6550 goto exit_free_bands;
6551 }
6552 }
6553 if (mm.meter.n_bands > ofproto->meter_features.max_bands) {
6554 error = OFPERR_OFPMMFC_OUT_OF_BANDS;
6555 goto exit_free_bands;
6556 }
6557 }
6558
6559 switch (mm.command) {
6560 case OFPMC13_ADD:
6561 error = handle_add_meter(ofproto, &mm);
6562 break;
6563
6564 case OFPMC13_MODIFY:
6565 error = handle_modify_meter(ofproto, &mm);
6566 break;
6567
6568 case OFPMC13_DELETE:
6569 error = handle_delete_meter(ofconn, &mm);
6570 break;
6571
6572 default:
6573 error = OFPERR_OFPMMFC_BAD_COMMAND;
6574 break;
6575 }
6576
6577 if (!error) {
6578 struct ofputil_requestforward rf;
6579 rf.xid = oh->xid;
6580 rf.reason = OFPRFR_METER_MOD;
6581 rf.meter_mod = &mm;
6582 connmgr_send_requestforward(ofproto->connmgr, ofconn, &rf);
6583 }
6584
6585exit_free_bands:
6586 ofpbuf_uninit(&bands);
6587 return error;
6588}
6589
6590static enum ofperr
6591handle_meter_features_request(struct ofconn *ofconn,
6592 const struct ofp_header *request)
6593{
6594 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
6595 struct ofputil_meter_features features;
6596 struct ofpbuf *b;
6597
6598 if (ofproto->ofproto_class->meter_get_features) {
6599 ofproto->ofproto_class->meter_get_features(ofproto, &features);
6600 } else {
6601 memset(&features, 0, sizeof features);
6602 }
6603 b = ofputil_encode_meter_features_reply(&features, request);
6604
6605 ofconn_send_reply(ofconn, b);
6606 return 0;
6607}
6608
6609static void
6610meter_request_reply(struct ofproto *ofproto, struct meter *meter,
6611 enum ofptype type, struct ovs_list *replies)
6612{
6613 uint64_t bands_stub[256 / 8];
6614 struct ofpbuf bands;
6615
6616 ofpbuf_use_stub(&bands, bands_stub, sizeof bands_stub);
6617
6618 if (type == OFPTYPE_METER_STATS_REQUEST) {
6619 struct ofputil_meter_stats stats;
6620
6621 stats.meter_id = meter->id;
6622
6623 /* Provider sets the packet and byte counts, we do the rest. */
6624 stats.flow_count = ovs_list_size(&meter->rules);
6625 calc_duration(meter->created, time_msec(),
6626 &stats.duration_sec, &stats.duration_nsec);
6627 stats.n_bands = meter->n_bands;
6628 ofpbuf_clear(&bands);
6629 stats.bands = ofpbuf_put_uninit(&bands, meter->n_bands
6630 * sizeof *stats.bands);
6631
6632 if (!ofproto->ofproto_class->meter_get(ofproto,
6633 meter->provider_meter_id,
6634 &stats, meter->n_bands)) {
6635 ofputil_append_meter_stats(replies, &stats);
6636 }
6637 } else { /* type == OFPTYPE_METER_CONFIG_REQUEST */
6638 struct ofputil_meter_config config;
6639
6640 config.meter_id = meter->id;
6641 config.flags = meter->flags;
6642 config.n_bands = meter->n_bands;
6643 config.bands = meter->bands;
6644 ofputil_append_meter_config(replies, &config);
6645 }
6646
6647 ofpbuf_uninit(&bands);
6648}
6649
6650static void
6651meter_request_reply_all(struct ofproto *ofproto, enum ofptype type,
6652 struct ovs_list *replies)
6653{
6654 struct meter *meter;
6655
6656 HMAP_FOR_EACH (meter, node, &ofproto->meters) {
6657 meter_request_reply(ofproto, meter, type, replies);
6658 }
6659}
6660
6661static enum ofperr
6662handle_meter_request(struct ofconn *ofconn, const struct ofp_header *request,
6663 enum ofptype type)
6664{
6665 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
6666 struct ovs_list replies;
6667 uint32_t meter_id;
6668 struct meter *meter;
6669
6670 ofputil_decode_meter_request(request, &meter_id);
6671
6672 if (meter_id != OFPM13_ALL) {
6673 meter = ofproto_get_meter(ofproto, meter_id);
6674 if (!meter) {
6675 /* Meter does not exist. */
6676 return OFPERR_OFPMMFC_UNKNOWN_METER;
6677 }
6678 } else {
6679 /* GCC 4.9.2 complains about 'meter' can potentially be used
6680 * uninitialized. Logically, this is a false alarm since
6681 * meter is only used when meter_id != OFPM13_ALL.
6682 * Set NULL to make compiler happy. */
6683 meter = NULL;
6684 }
6685
6686 ofpmp_init(&replies, request);
6687 if (meter_id == OFPM13_ALL) {
6688 meter_request_reply_all(ofproto, type, &replies);
6689 } else {
6690 meter_request_reply(ofproto, meter, type, &replies);
6691 }
6692 ofconn_send_replies(ofconn, &replies);
6693 return 0;
6694}
6695
6696/* Returned group is RCU protected. */
6697static struct ofgroup *
6698ofproto_group_lookup__(const struct ofproto *ofproto, uint32_t group_id,
6699 ovs_version_t version)
6700{
6701 struct ofgroup *group;
6702
6703 CMAP_FOR_EACH_WITH_HASH (group, cmap_node, hash_int(group_id, 0),
6704 &ofproto->groups) {
6705 if (group->group_id == group_id
6706 && versions_visible_in_version(&group->versions, version)) {
6707 return group;
6708 }
6709 }
6710
6711 return NULL;
6712}
6713
6714/* If the group exists, this function increments the groups's reference count.
6715 *
6716 * Make sure to call ofproto_group_unref() after no longer needing to maintain
6717 * a reference to the group. */
6718struct ofgroup *
6719ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id,
6720 ovs_version_t version, bool take_ref)
6721{
6722 struct ofgroup *group;
6723
6724 group = ofproto_group_lookup__(ofproto, group_id, version);
6725 if (group && take_ref) {
6726 /* Not holding a lock, so it is possible that another thread releases
6727 * the last reference just before we manage to get one. */
6728 return ofproto_group_try_ref(group) ? group : NULL;
6729 }
6730 return group;
6731}
6732
6733/* Caller should hold 'ofproto_mutex' if it is important that the
6734 * group is not removed by someone else. */
6735static bool
6736ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id)
6737{
6738 return ofproto_group_lookup__(ofproto, group_id, OVS_VERSION_MAX) != NULL;
6739}
6740
6741static void
6742group_add_rule(struct ofgroup *group, struct rule *rule)
6743{
6744 rule_collection_add(&group->rules, rule);
6745}
6746
6747static void
6748group_remove_rule(struct ofgroup *group, struct rule *rule)
6749{
6750 rule_collection_remove(&group->rules, rule);
6751}
6752
6753static void
6754append_group_stats(struct ofgroup *group, struct ovs_list *replies)
6755 OVS_REQUIRES(ofproto_mutex)
6756{
6757 struct ofputil_group_stats ogs;
6758 const struct ofproto *ofproto = group->ofproto;
6759 long long int now = time_msec();
6760 int error;
6761
6762 ogs.bucket_stats = xmalloc(group->n_buckets * sizeof *ogs.bucket_stats);
6763
6764 /* Provider sets the packet and byte counts, we do the rest. */
6765 ogs.ref_count = rule_collection_n(&group->rules);
6766 ogs.n_buckets = group->n_buckets;
6767
6768 error = (ofproto->ofproto_class->group_get_stats
6769 ? ofproto->ofproto_class->group_get_stats(group, &ogs)
6770 : EOPNOTSUPP);
6771 if (error) {
6772 ogs.packet_count = UINT64_MAX;
6773 ogs.byte_count = UINT64_MAX;
6774 memset(ogs.bucket_stats, 0xff,
6775 ogs.n_buckets * sizeof *ogs.bucket_stats);
6776 }
6777
6778 ogs.group_id = group->group_id;
6779 calc_duration(group->created, now, &ogs.duration_sec, &ogs.duration_nsec);
6780
6781 ofputil_append_group_stats(replies, &ogs);
6782
6783 free(ogs.bucket_stats);
6784}
6785
6786static void
6787handle_group_request(struct ofconn *ofconn,
6788 const struct ofp_header *request, uint32_t group_id,
6789 void (*cb)(struct ofgroup *, struct ovs_list *replies))
6790 OVS_EXCLUDED(ofproto_mutex)
6791{
6792 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
6793 struct ofgroup *group;
6794 struct ovs_list replies;
6795
6796 ofpmp_init(&replies, request);
6797 /* Must exclude modifications to guarantee iterating groups. */
6798 ovs_mutex_lock(&ofproto_mutex);
6799 if (group_id == OFPG_ALL) {
6800 CMAP_FOR_EACH (group, cmap_node, &ofproto->groups) {
6801 if (versions_visible_in_version(&group->versions,
6802 OVS_VERSION_MAX)) {
6803 cb(group, &replies);
6804 }
6805 }
6806 } else {
6807 group = ofproto_group_lookup__(ofproto, group_id, OVS_VERSION_MAX);
6808 if (group) {
6809 cb(group, &replies);
6810 }
6811 }
6812 ovs_mutex_unlock(&ofproto_mutex);
6813 ofconn_send_replies(ofconn, &replies);
6814}
6815
6816static enum ofperr
6817handle_group_stats_request(struct ofconn *ofconn,
6818 const struct ofp_header *request)
6819{
6820 uint32_t group_id;
6821 enum ofperr error;
6822
6823 error = ofputil_decode_group_stats_request(request, &group_id);
6824 if (error) {
6825 return error;
6826 }
6827
6828 handle_group_request(ofconn, request, group_id, append_group_stats);
6829 return 0;
6830}
6831
6832static void
6833append_group_desc(struct ofgroup *group, struct ovs_list *replies)
6834{
6835 struct ofputil_group_desc gds;
6836
6837 gds.group_id = group->group_id;
6838 gds.type = group->type;
6839 gds.props = group->props;
6840
6841 ofputil_append_group_desc_reply(&gds, &group->buckets, replies);
6842}
6843
6844static enum ofperr
6845handle_group_desc_stats_request(struct ofconn *ofconn,
6846 const struct ofp_header *request)
6847{
6848 handle_group_request(ofconn, request,
6849 ofputil_decode_group_desc_request(request),
6850 append_group_desc);
6851 return 0;
6852}
6853
6854static enum ofperr
6855handle_group_features_stats_request(struct ofconn *ofconn,
6856 const struct ofp_header *request)
6857{
6858 struct ofproto *p = ofconn_get_ofproto(ofconn);
6859 struct ofpbuf *msg;
6860
6861 msg = ofputil_encode_group_features_reply(&p->ogf, request);
6862 if (msg) {
6863 ofconn_send_reply(ofconn, msg);
6864 }
6865
6866 return 0;
6867}
6868
6869static void
6870put_queue_get_config_reply(struct ofport *port, uint32_t queue,
6871 struct ovs_list *replies)
6872{
6873 struct ofputil_queue_config qc;
6874
6875 /* None of the existing queues have compatible properties, so we hard-code
6876 * omitting min_rate and max_rate. */
6877 qc.port = port->ofp_port;
6878 qc.queue = queue;
6879 qc.min_rate = UINT16_MAX;
6880 qc.max_rate = UINT16_MAX;
6881 ofputil_append_queue_get_config_reply(&qc, replies);
6882}
6883
6884static int
6885handle_queue_get_config_request_for_port(struct ofport *port, uint32_t queue,
6886 struct ovs_list *replies)
6887{
6888 struct smap details = SMAP_INITIALIZER(&details);
6889 if (queue != OFPQ_ALL) {
6890 int error = netdev_get_queue(port->netdev, queue, &details);
6891 switch (error) {
6892 case 0:
6893 put_queue_get_config_reply(port, queue, replies);
6894 break;
6895 case EOPNOTSUPP:
6896 case EINVAL:
6897 return OFPERR_OFPQOFC_BAD_QUEUE;
6898 default:
6899 return OFPERR_NXQOFC_QUEUE_ERROR;
6900 }
6901 } else {
6902 struct netdev_queue_dump queue_dump;
6903 uint32_t queue_id;
6904
6905 NETDEV_QUEUE_FOR_EACH (&queue_id, &details, &queue_dump,
6906 port->netdev) {
6907 put_queue_get_config_reply(port, queue_id, replies);
6908 }
6909 }
6910 smap_destroy(&details);
6911 return 0;
6912}
6913
6914static enum ofperr
6915handle_queue_get_config_request(struct ofconn *ofconn,
6916 const struct ofp_header *oh)
6917{
6918 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
6919 struct ovs_list replies;
6920 struct ofport *port;
6921 ofp_port_t req_port;
6922 uint32_t req_queue;
6923 enum ofperr error;
6924
6925 error = ofputil_decode_queue_get_config_request(oh, &req_port, &req_queue);
6926 if (error) {
6927 return error;
6928 }
6929
6930 ofputil_start_queue_get_config_reply(oh, &replies);
6931 if (req_port == OFPP_ANY) {
6932 error = OFPERR_OFPQOFC_BAD_QUEUE;
6933 HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
6934 if (!handle_queue_get_config_request_for_port(port, req_queue,
6935 &replies)) {
6936 error = 0;
6937 }
6938 }
6939 } else {
6940 port = ofproto_get_port(ofproto, req_port);
6941 error = (port
6942 ? handle_queue_get_config_request_for_port(port, req_queue,
6943 &replies)
6944 : OFPERR_OFPQOFC_BAD_PORT);
6945 }
6946 if (!error) {
6947 ofconn_send_replies(ofconn, &replies);
6948 } else {
6949 ofpbuf_list_delete(&replies);
6950 }
6951
6952 return error;
6953}
6954
6955/* Allocates, initializes, and constructs a new group in 'ofproto', obtaining
6956 * all the attributes for it from 'gm', and stores a pointer to it in
6957 * '*ofgroup'. Makes the new group visible from the flow table starting from
6958 * 'version'.
6959 *
6960 * Returns 0 if successful, otherwise an error code. If there is an error then
6961 * '*ofgroup' is indeterminate upon return. */
6962static enum ofperr
6963init_group(struct ofproto *ofproto, const struct ofputil_group_mod *gm,
6964 ovs_version_t version, struct ofgroup **ofgroup)
6965{
6966 enum ofperr error;
6967 const long long int now = time_msec();
6968
6969 if (gm->group_id > OFPG_MAX) {
6970 return OFPERR_OFPGMFC_INVALID_GROUP;
6971 }
6972 if (gm->type > OFPGT11_FF) {
6973 return OFPERR_OFPGMFC_BAD_TYPE;
6974 }
6975
6976 *ofgroup = ofproto->ofproto_class->group_alloc();
6977 if (!*ofgroup) {
6978 VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name);
6979 return OFPERR_OFPGMFC_OUT_OF_GROUPS;
6980 }
6981
6982 *CONST_CAST(struct ofproto **, &(*ofgroup)->ofproto) = ofproto;
6983 *CONST_CAST(uint32_t *, &((*ofgroup)->group_id)) = gm->group_id;
6984 *CONST_CAST(enum ofp11_group_type *, &(*ofgroup)->type) = gm->type;
6985 *CONST_CAST(long long int *, &((*ofgroup)->created)) = now;
6986 *CONST_CAST(long long int *, &((*ofgroup)->modified)) = now;
6987 ovs_refcount_init(&(*ofgroup)->ref_count);
6988 (*ofgroup)->being_deleted = false;
6989
6990 ovs_list_init(CONST_CAST(struct ovs_list *, &(*ofgroup)->buckets));
6991 ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *,
6992 &(*ofgroup)->buckets),
6993 &gm->buckets, NULL);
6994
6995 *CONST_CAST(uint32_t *, &(*ofgroup)->n_buckets) =
6996 ovs_list_size(&(*ofgroup)->buckets);
6997
6998 ofputil_group_properties_copy(CONST_CAST(struct ofputil_group_props *,
6999 &(*ofgroup)->props),
7000 &gm->props);
7001 rule_collection_init(&(*ofgroup)->rules);
7002
7003 /* Make group visible from 'version'. */
7004 (*ofgroup)->versions = VERSIONS_INITIALIZER(version,
7005 OVS_VERSION_NOT_REMOVED);
7006
7007 /* Construct called BEFORE any locks are held. */
7008 error = ofproto->ofproto_class->group_construct(*ofgroup);
7009 if (error) {
7010 ofputil_group_properties_destroy(CONST_CAST(struct ofputil_group_props *,
7011 &(*ofgroup)->props));
7012 ofputil_bucket_list_destroy(CONST_CAST(struct ovs_list *,
7013 &(*ofgroup)->buckets));
7014 ofproto->ofproto_class->group_dealloc(*ofgroup);
7015 }
7016 return error;
7017}
7018
7019/* Implements the OFPGC11_ADD operation specified by 'gm', adding a group to
7020 * 'ofproto''s group table. Returns 0 on success or an OpenFlow error code on
7021 * failure. */
7022static enum ofperr
7023add_group_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm)
7024 OVS_REQUIRES(ofproto_mutex)
7025{
7026 enum ofperr error;
7027
7028 if (ofproto_group_exists(ofproto, ogm->gm.group_id)) {
7029 return OFPERR_OFPGMFC_GROUP_EXISTS;
7030 }
7031
7032 if (ofproto->n_groups[ogm->gm.type]
7033 >= ofproto->ogf.max_groups[ogm->gm.type]) {
7034 return OFPERR_OFPGMFC_OUT_OF_GROUPS;
7035 }
7036
7037 /* Allocate new group and initialize it. */
7038 error = init_group(ofproto, &ogm->gm, ogm->version, &ogm->new_group);
7039 if (!error) {
7040 /* Insert new group. */
7041 cmap_insert(&ofproto->groups, &ogm->new_group->cmap_node,
7042 hash_int(ogm->new_group->group_id, 0));
7043 ofproto->n_groups[ogm->new_group->type]++;
7044 }
7045 return error;
7046}
7047
7048/* Adds all of the buckets from 'ofgroup' to 'new_ofgroup'. The buckets
7049 * already in 'new_ofgroup' will be placed just after the (copy of the) bucket
7050 * in 'ofgroup' with bucket ID 'command_bucket_id'. Special
7051 * 'command_bucket_id' values OFPG15_BUCKET_FIRST and OFPG15_BUCKET_LAST are
7052 * also honored. */
7053static enum ofperr
7054copy_buckets_for_insert_bucket(const struct ofgroup *ofgroup,
7055 struct ofgroup *new_ofgroup,
7056 uint32_t command_bucket_id)
7057{
7058 struct ofputil_bucket *last = NULL;
7059
7060 if (command_bucket_id <= OFPG15_BUCKET_MAX) {
7061 /* Check here to ensure that a bucket corresponding to
7062 * command_bucket_id exists in the old bucket list.
7063 *
7064 * The subsequent search of below of new_ofgroup covers
7065 * both buckets in the old bucket list and buckets added
7066 * by the insert buckets group mod message this function processes. */
7067 if (!ofputil_bucket_find(&ofgroup->buckets, command_bucket_id)) {
7068 return OFPERR_OFPGMFC_UNKNOWN_BUCKET;
7069 }
7070
7071 if (!ovs_list_is_empty(&new_ofgroup->buckets)) {
7072 last = ofputil_bucket_list_back(&new_ofgroup->buckets);
7073 }
7074 }
7075
7076 ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *,
7077 &new_ofgroup->buckets),
7078 &ofgroup->buckets, NULL);
7079
7080 if (ofputil_bucket_check_duplicate_id(&new_ofgroup->buckets)) {
7081 VLOG_INFO_RL(&rl, "Duplicate bucket id");
7082 return OFPERR_OFPGMFC_BUCKET_EXISTS;
7083 }
7084
7085 /* Rearrange list according to command_bucket_id */
7086 if (command_bucket_id == OFPG15_BUCKET_LAST) {
7087 if (!ovs_list_is_empty(&ofgroup->buckets)) {
7088 struct ofputil_bucket *new_first;
7089 const struct ofputil_bucket *first;
7090
7091 first = ofputil_bucket_list_front(&ofgroup->buckets);
7092 new_first = ofputil_bucket_find(&new_ofgroup->buckets,
7093 first->bucket_id);
7094
7095 ovs_list_splice(new_ofgroup->buckets.next, &new_first->list_node,
7096 CONST_CAST(struct ovs_list *,
7097 &new_ofgroup->buckets));
7098 }
7099 } else if (command_bucket_id <= OFPG15_BUCKET_MAX && last) {
7100 struct ofputil_bucket *after;
7101
7102 /* Presence of bucket is checked above so after should never be NULL */
7103 after = ofputil_bucket_find(&new_ofgroup->buckets, command_bucket_id);
7104
7105 ovs_list_splice(after->list_node.next, new_ofgroup->buckets.next,
7106 last->list_node.next);
7107 }
7108
7109 return 0;
7110}
7111
7112/* Appends all of the a copy of all the buckets from 'ofgroup' to 'new_ofgroup'
7113 * with the exception of the bucket whose bucket id is 'command_bucket_id'.
7114 * Special 'command_bucket_id' values OFPG15_BUCKET_FIRST, OFPG15_BUCKET_LAST
7115 * and OFPG15_BUCKET_ALL are also honored. */
7116static enum ofperr
7117copy_buckets_for_remove_bucket(const struct ofgroup *ofgroup,
7118 struct ofgroup *new_ofgroup,
7119 uint32_t command_bucket_id)
7120{
7121 const struct ofputil_bucket *skip = NULL;
7122
7123 if (command_bucket_id == OFPG15_BUCKET_ALL) {
7124 return 0;
7125 }
7126
7127 if (command_bucket_id == OFPG15_BUCKET_FIRST) {
7128 if (!ovs_list_is_empty(&ofgroup->buckets)) {
7129 skip = ofputil_bucket_list_front(&ofgroup->buckets);
7130 }
7131 } else if (command_bucket_id == OFPG15_BUCKET_LAST) {
7132 if (!ovs_list_is_empty(&ofgroup->buckets)) {
7133 skip = ofputil_bucket_list_back(&ofgroup->buckets);
7134 }
7135 } else {
7136 skip = ofputil_bucket_find(&ofgroup->buckets, command_bucket_id);
7137 if (!skip) {
7138 return OFPERR_OFPGMFC_UNKNOWN_BUCKET;
7139 }
7140 }
7141
7142 ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *,
7143 &new_ofgroup->buckets),
7144 &ofgroup->buckets, skip);
7145
7146 return 0;
7147}
7148
7149/* Implements OFPGC11_MODIFY, OFPGC15_INSERT_BUCKET and
7150 * OFPGC15_REMOVE_BUCKET. Returns 0 on success or an OpenFlow error code
7151 * on failure.
7152 *
7153 * Note that the group is re-created and then replaces the old group in
7154 * ofproto's ofgroup hash map. Thus, the group is never altered while users of
7155 * the xlate module hold a pointer to the group. */
7156static enum ofperr
7157modify_group_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm)
7158 OVS_REQUIRES(ofproto_mutex)
7159{
7160 struct ofgroup *old_group; /* Modified group. */
7161 struct ofgroup *new_group;
7162 enum ofperr error;
7163
7164 old_group = ofproto_group_lookup__(ofproto, ogm->gm.group_id,
7165 OVS_VERSION_MAX);
7166 if (!old_group) {
7167 return OFPERR_OFPGMFC_UNKNOWN_GROUP;
7168 }
7169
7170 /* Inserting or deleting a bucket should not change the group's type or
7171 * properties, so change the group mod so that these aspects match the old
7172 * group. (See EXT-570.) */
7173 if (ogm->gm.command == OFPGC15_INSERT_BUCKET ||
7174 ogm->gm.command == OFPGC15_REMOVE_BUCKET) {
7175 ogm->gm.type = old_group->type;
7176 ofputil_group_properties_destroy(&ogm->gm.props);
7177 ofputil_group_properties_copy(&ogm->gm.props, &old_group->props);
7178 }
7179
7180 if (old_group->type != ogm->gm.type
7181 && (ofproto->n_groups[ogm->gm.type]
7182 >= ofproto->ogf.max_groups[ogm->gm.type])) {
7183 return OFPERR_OFPGMFC_OUT_OF_GROUPS;
7184 }
7185
7186 error = init_group(ofproto, &ogm->gm, ogm->version, &ogm->new_group);
7187 if (error) {
7188 return error;
7189 }
7190 new_group = ogm->new_group;
7191
7192 /* Manipulate bucket list for bucket commands */
7193 if (ogm->gm.command == OFPGC15_INSERT_BUCKET) {
7194 error = copy_buckets_for_insert_bucket(old_group, new_group,
7195 ogm->gm.command_bucket_id);
7196 } else if (ogm->gm.command == OFPGC15_REMOVE_BUCKET) {
7197 error = copy_buckets_for_remove_bucket(old_group, new_group,
7198 ogm->gm.command_bucket_id);
7199 }
7200 if (error) {
7201 goto out;
7202 }
7203
7204 /* The group creation time does not change during modification. */
7205 *CONST_CAST(long long int *, &(new_group->created)) = old_group->created;
7206 *CONST_CAST(long long int *, &(new_group->modified)) = time_msec();
7207
7208 group_collection_add(&ogm->old_groups, old_group);
7209
7210 /* Mark the old group for deletion. */
7211 versions_set_remove_version(&old_group->versions, ogm->version);
7212 /* Insert replacement group. */
7213 cmap_insert(&ofproto->groups, &new_group->cmap_node,
7214 hash_int(new_group->group_id, 0));
7215 /* Transfer rules. */
7216 rule_collection_move(&new_group->rules, &old_group->rules);
7217
7218 if (old_group->type != new_group->type) {
7219 ofproto->n_groups[old_group->type]--;
7220 ofproto->n_groups[new_group->type]++;
7221 }
7222 return 0;
7223
7224out:
7225 ofproto_group_unref(new_group);
7226 return error;
7227}
7228
7229/* Implements the OFPGC11_ADD_OR_MOD command which creates the group when it does not
7230 * exist yet and modifies it otherwise */
7231static enum ofperr
7232add_or_modify_group_start(struct ofproto *ofproto,
7233 struct ofproto_group_mod *ogm)
7234 OVS_REQUIRES(ofproto_mutex)
7235{
7236 enum ofperr error;
7237
7238 if (!ofproto_group_exists(ofproto, ogm->gm.group_id)) {
7239 error = add_group_start(ofproto, ogm);
7240 } else {
7241 error = modify_group_start(ofproto, ogm);
7242 }
7243
7244 return error;
7245}
7246
7247static void
7248delete_group_start(struct ofproto *ofproto, ovs_version_t version,
7249 struct group_collection *groups, struct ofgroup *group)
7250 OVS_REQUIRES(ofproto_mutex)
7251{
7252 /* Makes flow deletion code leave the rule pointers in 'group->rules'
7253 * intact, so that we can later refer to the rules deleted due to the group
7254 * deletion. Rule pointers will be removed from all other groups, if any,
7255 * so we will never try to delete the same rule twice. */
7256 group->being_deleted = true;
7257
7258 /* Mark all the referring groups for deletion. */
7259 delete_flows_start__(ofproto, version, &group->rules);
7260 group_collection_add(groups, group);
7261 versions_set_remove_version(&group->versions, version);
7262 ofproto->n_groups[group->type]--;
7263}
7264
7265static void
7266delete_group_finish(struct ofproto *ofproto, struct ofgroup *group)
7267 OVS_REQUIRES(ofproto_mutex)
7268{
7269 /* Finish deletion of all flow entries containing this group in a group
7270 * action. */
7271 delete_flows_finish__(ofproto, &group->rules, OFPRR_GROUP_DELETE, NULL);
7272
7273 /* Group removal is postponed by the caller. */
7274}
7275
7276/* Implements OFPGC11_DELETE. */
7277static void
7278delete_groups_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm)
7279 OVS_REQUIRES(ofproto_mutex)
7280{
7281 struct ofgroup *group;
7282
7283 if (ogm->gm.group_id == OFPG_ALL) {
7284 CMAP_FOR_EACH (group, cmap_node, &ofproto->groups) {
7285 if (versions_visible_in_version(&group->versions, ogm->version)) {
7286 delete_group_start(ofproto, ogm->version, &ogm->old_groups,
7287 group);
7288 }
7289 }
7290 } else {
7291 group = ofproto_group_lookup__(ofproto, ogm->gm.group_id, ogm->version);
7292 if (group) {
7293 delete_group_start(ofproto, ogm->version, &ogm->old_groups, group);
7294 }
7295 }
7296}
7297
7298static enum ofperr
7299ofproto_group_mod_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm)
7300 OVS_REQUIRES(ofproto_mutex)
7301{
7302 enum ofperr error;
7303
7304 ogm->new_group = NULL;
7305 group_collection_init(&ogm->old_groups);
7306
7307 switch (ogm->gm.command) {
7308 case OFPGC11_ADD:
7309 error = add_group_start(ofproto, ogm);
7310 break;
7311
7312 case OFPGC11_MODIFY:
7313 error = modify_group_start(ofproto, ogm);
7314 break;
7315
7316 case OFPGC11_ADD_OR_MOD:
7317 error = add_or_modify_group_start(ofproto, ogm);
7318 break;
7319
7320 case OFPGC11_DELETE:
7321 delete_groups_start(ofproto, ogm);
7322 error = 0;
7323 break;
7324
7325 case OFPGC15_INSERT_BUCKET:
7326 error = modify_group_start(ofproto, ogm);
7327 break;
7328
7329 case OFPGC15_REMOVE_BUCKET:
7330 error = modify_group_start(ofproto, ogm);
7331 break;
7332
7333 default:
7334 if (ogm->gm.command > OFPGC11_DELETE) {
7335 VLOG_INFO_RL(&rl, "%s: Invalid group_mod command type %d",
7336 ofproto->name, ogm->gm.command);
7337 }
7338 error = OFPERR_OFPGMFC_BAD_COMMAND;
7339 break;
7340 }
7341 return error;
7342}
7343
7344static void
7345ofproto_group_mod_revert(struct ofproto *ofproto,
7346 struct ofproto_group_mod *ogm)
7347 OVS_REQUIRES(ofproto_mutex)
7348{
7349 struct ofgroup *new_group = ogm->new_group;
7350 struct ofgroup *old_group;
7351
7352 /* Restore replaced or deleted groups. */
7353 GROUP_COLLECTION_FOR_EACH (old_group, &ogm->old_groups) {
7354 ofproto->n_groups[old_group->type]++;
7355 if (new_group) {
7356 ovs_assert(group_collection_n(&ogm->old_groups) == 1);
7357 /* Transfer rules back. */
7358 rule_collection_move(&old_group->rules, &new_group->rules);
7359 } else {
7360 old_group->being_deleted = false;
7361 /* Revert rule deletion. */
7362 delete_flows_revert__(ofproto, &old_group->rules);
7363 }
7364 /* Restore visibility. */
7365 versions_set_remove_version(&old_group->versions,
7366 OVS_VERSION_NOT_REMOVED);
7367 }
7368 if (new_group) {
7369 /* Remove the new group immediately. It was never visible to
7370 * lookups. */
7371 cmap_remove(&ofproto->groups, &new_group->cmap_node,
7372 hash_int(new_group->group_id, 0));
7373 ofproto->n_groups[new_group->type]--;
7374 ofproto_group_unref(new_group);
7375 }
7376}
7377
7378static void
7379ofproto_group_mod_finish(struct ofproto *ofproto,
7380 struct ofproto_group_mod *ogm,
7381 const struct openflow_mod_requester *req)
7382 OVS_REQUIRES(ofproto_mutex)
7383{
7384 struct ofgroup *new_group = ogm->new_group;
7385 struct ofgroup *old_group;
7386
7387 if (new_group && group_collection_n(&ogm->old_groups) &&
7388 ofproto->ofproto_class->group_modify) {
7389 /* Modify a group. */
7390 ovs_assert(group_collection_n(&ogm->old_groups) == 1);
7391
7392 /* XXX: OK to lose old group's stats? */
7393 ofproto->ofproto_class->group_modify(new_group);
7394 }
7395
7396 /* Delete old groups. */
7397 GROUP_COLLECTION_FOR_EACH(old_group, &ogm->old_groups) {
7398 delete_group_finish(ofproto, old_group);
7399 }
7400 remove_groups_postponed(&ogm->old_groups);
7401
7402 if (req) {
7403 struct ofputil_requestforward rf;
7404 rf.xid = req->request->xid;
7405 rf.reason = OFPRFR_GROUP_MOD;
7406 rf.group_mod = &ogm->gm;
7407 rf.new_buckets = new_group ? &new_group->buckets : NULL;
7408 rf.group_existed = group_collection_n(&ogm->old_groups) > 0;
7409 connmgr_send_requestforward(ofproto->connmgr, req->ofconn, &rf);
7410 }
7411}
7412
7413/* Delete all groups from 'ofproto'.
7414 *
7415 * This is intended for use within an ofproto provider's 'destruct'
7416 * function. */
7417static void
7418ofproto_group_delete_all__(struct ofproto *ofproto)
7419 OVS_REQUIRES(ofproto_mutex)
7420{
7421 struct ofproto_group_mod ogm;
7422 ogm.gm.command = OFPGC11_DELETE;
7423 ogm.gm.group_id = OFPG_ALL;
7424 ogm.version = ofproto->tables_version + 1;
7425
7426 ofproto_group_mod_start(ofproto, &ogm);
7427 ofproto_bump_tables_version(ofproto);
7428 ofproto_group_mod_finish(ofproto, &ogm, NULL);
7429}
7430
7431/* Delete all groups from 'ofproto'.
7432 *
7433 * This is intended for use within an ofproto provider's 'destruct'
7434 * function. */
7435void
7436ofproto_group_delete_all(struct ofproto *ofproto)
7437 OVS_EXCLUDED(ofproto_mutex)
7438{
7439 ovs_mutex_lock(&ofproto_mutex);
7440 ofproto_group_delete_all__(ofproto);
7441 ovs_mutex_unlock(&ofproto_mutex);
7442}
7443
7444static enum ofperr
7445handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh)
7446 OVS_EXCLUDED(ofproto_mutex)
7447{
7448 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
7449 struct ofproto_group_mod ogm;
7450 enum ofperr error;
7451
7452 error = reject_slave_controller(ofconn);
7453 if (error) {
7454 return error;
7455 }
7456
7457 error = ofputil_decode_group_mod(oh, &ogm.gm);
7458 if (error) {
7459 return error;
7460 }
7461
7462 ovs_mutex_lock(&ofproto_mutex);
7463 ogm.version = ofproto->tables_version + 1;
7464 error = ofproto_group_mod_start(ofproto, &ogm);
7465 if (!error) {
7466 struct openflow_mod_requester req = { ofconn, oh };
7467
7468 ofproto_bump_tables_version(ofproto);
7469 ofproto_group_mod_finish(ofproto, &ogm, &req);
7470 ofmonitor_flush(ofproto->connmgr);
7471 }
7472 ovs_mutex_unlock(&ofproto_mutex);
7473
7474 ofputil_uninit_group_mod(&ogm.gm);
7475
7476 return error;
7477}
7478
7479enum ofputil_table_miss
7480ofproto_table_get_miss_config(const struct ofproto *ofproto, uint8_t table_id)
7481{
7482 enum ofputil_table_miss miss;
7483
7484 atomic_read_relaxed(&ofproto->tables[table_id].miss_config, &miss);
7485 return miss;
7486}
7487
7488static void
7489table_mod__(struct oftable *oftable,
7490 const struct ofputil_table_mod *tm)
7491{
7492 if (tm->miss == OFPUTIL_TABLE_MISS_DEFAULT) {
7493 /* This is how an OFPT_TABLE_MOD decodes if it doesn't specify any
7494 * table-miss configuration (because the protocol used doesn't have
7495 * such a concept), so there's nothing to do. */
7496 } else {
7497 atomic_store_relaxed(&oftable->miss_config, tm->miss);
7498 }
7499
7500 unsigned int new_eviction = oftable->eviction;
7501 if (tm->eviction == OFPUTIL_TABLE_EVICTION_ON) {
7502 new_eviction |= EVICTION_OPENFLOW;
7503 } else if (tm->eviction == OFPUTIL_TABLE_EVICTION_OFF) {
7504 new_eviction &= ~EVICTION_OPENFLOW;
7505 }
7506
7507 if (new_eviction != oftable->eviction) {
7508 ovs_mutex_lock(&ofproto_mutex);
7509 oftable_configure_eviction(oftable, new_eviction,
7510 oftable->eviction_fields,
7511 oftable->n_eviction_fields);
7512 ovs_mutex_unlock(&ofproto_mutex);
7513 }
7514
7515 if (tm->vacancy != OFPUTIL_TABLE_VACANCY_DEFAULT) {
7516 ovs_mutex_lock(&ofproto_mutex);
7517 oftable->vacancy_down = tm->table_vacancy.vacancy_down;
7518 oftable->vacancy_up = tm->table_vacancy.vacancy_up;
7519 if (tm->vacancy == OFPUTIL_TABLE_VACANCY_OFF) {
7520 oftable->vacancy_event = 0;
7521 } else if (!oftable->vacancy_event) {
7522 uint8_t vacancy = oftable_vacancy(oftable);
7523 oftable->vacancy_event = (vacancy < oftable->vacancy_up
7524 ? OFPTR_VACANCY_UP
7525 : OFPTR_VACANCY_DOWN);
7526 }
7527 ovs_mutex_unlock(&ofproto_mutex);
7528 }
7529}
7530
7531static enum ofperr
7532table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm)
7533{
7534 if (!check_table_id(ofproto, tm->table_id)) {
7535 return OFPERR_OFPTMFC_BAD_TABLE;
7536 }
7537
7538 /* Don't allow the eviction flags to be changed (except to the only fixed
7539 * value that OVS supports). OF1.4 says this is normal: "The
7540 * OFPTMPT_EVICTION property usually cannot be modified using a
7541 * OFP_TABLE_MOD request, because the eviction mechanism is switch
7542 * defined". */
7543 if (tm->eviction_flags != UINT32_MAX
7544 && tm->eviction_flags != OFPROTO_EVICTION_FLAGS) {
7545 return OFPERR_OFPTMFC_BAD_CONFIG;
7546 }
7547
7548 if (tm->table_id == OFPTT_ALL) {
7549 struct oftable *oftable;
7550 OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
7551 if (!(oftable->flags & (OFTABLE_HIDDEN | OFTABLE_READONLY))) {
7552 table_mod__(oftable, tm);
7553 }
7554 }
7555 } else {
7556 struct oftable *oftable = &ofproto->tables[tm->table_id];
7557 if (oftable->flags & OFTABLE_READONLY) {
7558 return OFPERR_OFPTMFC_EPERM;
7559 }
7560 table_mod__(oftable, tm);
7561 }
7562
7563 return 0;
7564}
7565
7566static enum ofperr
7567handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh)
7568{
7569 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
7570 struct ofputil_table_mod tm;
7571 enum ofperr error;
7572
7573 error = reject_slave_controller(ofconn);
7574 if (error) {
7575 return error;
7576 }
7577
7578 error = ofputil_decode_table_mod(oh, &tm);
7579 if (error) {
7580 return error;
7581 }
7582
7583 return table_mod(ofproto, &tm);
7584}
7585
7586/* Free resources that may be allocated by ofproto_flow_mod_init(). */
7587void
7588ofproto_flow_mod_uninit(struct ofproto_flow_mod *ofm)
7589{
7590 if (ofm->temp_rule) {
7591 ofproto_rule_unref(ofm->temp_rule);
7592 ofm->temp_rule = NULL;
7593 }
7594 if (ofm->criteria.version != OVS_VERSION_NOT_REMOVED) {
7595 rule_criteria_destroy(&ofm->criteria);
7596 }
7597 if (ofm->conjs) {
7598 free(ofm->conjs);
7599 ofm->conjs = NULL;
7600 ofm->n_conjs = 0;
7601 }
7602}
7603
7604/* Initializes 'ofm' with 'ofproto', 'fm', and 'rule'. 'rule' may be null, but
7605 * if it is nonnull then the caller must own a reference to it, which on
7606 * success is transferred to 'ofm' and on failure is unreffed. */
7607static enum ofperr
7608ofproto_flow_mod_init(struct ofproto *ofproto, struct ofproto_flow_mod *ofm,
7609 const struct ofputil_flow_mod *fm, struct rule *rule)
7610 OVS_EXCLUDED(ofproto_mutex)
7611{
7612 enum ofperr error;
7613
7614 /* Forward flow mod fields we need later. */
7615 ofm->command = fm->command;
7616 ofm->modify_cookie = fm->modify_cookie;
7617
7618 ofm->modify_may_add_flow = (fm->new_cookie != OVS_BE64_MAX
7619 && fm->cookie_mask == htonll(0));
7620 /* Old flags must be kept when modifying a flow, but we still must
7621 * honor the reset counts flag if present in the flow mod. */
7622 ofm->modify_keep_counts = !(fm->flags & OFPUTIL_FF_RESET_COUNTS);
7623
7624 /* Initialize state needed by ofproto_flow_mod_uninit(). */
7625 ofm->temp_rule = rule;
7626 ofm->criteria.version = OVS_VERSION_NOT_REMOVED;
7627 ofm->conjs = NULL;
7628 ofm->n_conjs = 0;
7629
7630 bool check_buffer_id = false;
7631
7632 switch (ofm->command) {
7633 case OFPFC_ADD:
7634 check_buffer_id = true;
7635 error = add_flow_init(ofproto, ofm, fm);
7636 break;
7637 case OFPFC_MODIFY:
7638 check_buffer_id = true;
7639 error = modify_flows_init_loose(ofproto, ofm, fm);
7640 break;
7641 case OFPFC_MODIFY_STRICT:
7642 check_buffer_id = true;
7643 error = modify_flow_init_strict(ofproto, ofm, fm);
7644 break;
7645 case OFPFC_DELETE:
7646 error = delete_flows_init_loose(ofproto, ofm, fm);
7647 break;
7648 case OFPFC_DELETE_STRICT:
7649 error = delete_flows_init_strict(ofproto, ofm, fm);
7650 break;
7651 default:
7652 error = OFPERR_OFPFMFC_BAD_COMMAND;
7653 break;
7654 }
7655 if (!error && check_buffer_id && fm->buffer_id != UINT32_MAX) {
7656 error = OFPERR_OFPBRC_BUFFER_UNKNOWN;
7657 }
7658
7659 if (error) {
7660 ofproto_flow_mod_uninit(ofm);
7661 }
7662 return error;
7663}
7664
7665static enum ofperr
7666ofproto_flow_mod_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
7667 OVS_REQUIRES(ofproto_mutex)
7668{
7669 enum ofperr error;
7670
7671 rule_collection_init(&ofm->old_rules);
7672 rule_collection_init(&ofm->new_rules);
7673
7674 switch (ofm->command) {
7675 case OFPFC_ADD:
7676 error = add_flow_start(ofproto, ofm);
7677 break;
7678 case OFPFC_MODIFY:
7679 error = modify_flows_start_loose(ofproto, ofm);
7680 break;
7681 case OFPFC_MODIFY_STRICT:
7682 error = modify_flow_start_strict(ofproto, ofm);
7683 break;
7684 case OFPFC_DELETE:
7685 error = delete_flows_start_loose(ofproto, ofm);
7686 break;
7687 case OFPFC_DELETE_STRICT:
7688 error = delete_flow_start_strict(ofproto, ofm);
7689 break;
7690 default:
7691 OVS_NOT_REACHED();
7692 }
7693 /* Release resources not needed after start. */
7694 ofproto_flow_mod_uninit(ofm);
7695
7696 if (error) {
7697 rule_collection_destroy(&ofm->old_rules);
7698 rule_collection_destroy(&ofm->new_rules);
7699 }
7700 return error;
7701}
7702
7703static void
7704ofproto_flow_mod_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm)
7705 OVS_REQUIRES(ofproto_mutex)
7706{
7707 switch (ofm->command) {
7708 case OFPFC_ADD:
7709 add_flow_revert(ofproto, ofm);
7710 break;
7711
7712 case OFPFC_MODIFY:
7713 case OFPFC_MODIFY_STRICT:
7714 modify_flows_revert(ofproto, ofm);
7715 break;
7716
7717 case OFPFC_DELETE:
7718 case OFPFC_DELETE_STRICT:
7719 delete_flows_revert(ofproto, ofm);
7720 break;
7721
7722 default:
7723 break;
7724 }
7725
7726 rule_collection_destroy(&ofm->old_rules);
7727 rule_collection_destroy(&ofm->new_rules);
7728}
7729
7730static void
7731ofproto_flow_mod_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm,
7732 const struct openflow_mod_requester *req)
7733 OVS_REQUIRES(ofproto_mutex)
7734{
7735 switch (ofm->command) {
7736 case OFPFC_ADD:
7737 add_flow_finish(ofproto, ofm, req);
7738 break;
7739
7740 case OFPFC_MODIFY:
7741 case OFPFC_MODIFY_STRICT:
7742 modify_flows_finish(ofproto, ofm, req);
7743 break;
7744
7745 case OFPFC_DELETE:
7746 case OFPFC_DELETE_STRICT:
7747 delete_flows_finish(ofproto, ofm, req);
7748 break;
7749
7750 default:
7751 break;
7752 }
7753
7754 rule_collection_destroy(&ofm->old_rules);
7755 rule_collection_destroy(&ofm->new_rules);
7756
7757 if (req) {
7758 ofconn_report_flow_mod(req->ofconn, ofm->command);
7759 }
7760}
7761
7762/* Commit phases (all while locking ofproto_mutex):
7763 *
7764 * 1. Begin: Gather resources and make changes visible in the next version.
7765 * - Mark affected rules for removal in the next version.
7766 * - Create new replacement rules, make visible in the next
7767 * version.
7768 * - Do not send any events or notifications.
7769 *
7770 * 2. Revert: Fail if any errors are found. After this point no errors are
7771 * possible. No visible changes were made, so rollback is minimal (remove
7772 * added invisible rules, restore visibility of rules marked for removal).
7773 *
7774 * 3. Finish: Make the changes visible for lookups. Insert replacement rules to
7775 * the ofproto provider. Remove replaced and deleted rules from ofproto data
7776 * structures, and Schedule postponed removal of deleted rules from the
7777 * classifier. Send notifications, buffered packets, etc.
7778 */
7779static enum ofperr
7780do_bundle_commit(struct ofconn *ofconn, uint32_t id, uint16_t flags)
7781{
7782 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
7783 ovs_version_t version = ofproto->tables_version + 1;
7784 struct ofp_bundle *bundle;
7785 struct ofp_bundle_entry *be;
7786 enum ofperr error;
7787
7788 bundle = ofconn_get_bundle(ofconn, id);
7789
7790 if (!bundle) {
7791 return OFPERR_OFPBFC_BAD_ID;
7792 }
7793 if (bundle->flags != flags) {
7794 error = OFPERR_OFPBFC_BAD_FLAGS;
7795 } else {
7796 bool prev_is_port_mod = false;
7797
7798 error = 0;
7799 ovs_mutex_lock(&ofproto_mutex);
7800
7801 /* 1. Begin. */
7802 LIST_FOR_EACH (be, node, &bundle->msg_list) {
7803 if (be->type == OFPTYPE_PORT_MOD) {
7804 /* Our port mods are not atomic. */
7805 if (flags & OFPBF_ATOMIC) {
7806 error = OFPERR_OFPBFC_MSG_FAILED;
7807 } else {
7808 prev_is_port_mod = true;
7809 error = port_mod_start(ofconn, &be->opm.pm, &be->opm.port);
7810 }
7811 } else {
7812 /* Flow & group mods between port mods are applied as a single
7813 * version, but the versions are published only after we know
7814 * the commit is successful. */
7815 if (prev_is_port_mod) {
7816 prev_is_port_mod = false;
7817 ++version;
7818 }
7819 if (be->type == OFPTYPE_FLOW_MOD) {
7820 /* Store the version in which the changes should take
7821 * effect. */
7822 be->ofm.version = version;
7823 error = ofproto_flow_mod_start(ofproto, &be->ofm);
7824 } else if (be->type == OFPTYPE_GROUP_MOD) {
7825 /* Store the version in which the changes should take
7826 * effect. */
7827 be->ogm.version = version;
7828 error = ofproto_group_mod_start(ofproto, &be->ogm);
7829 } else if (be->type == OFPTYPE_PACKET_OUT) {
7830 be->opo.version = version;
7831 error = ofproto_packet_out_start(ofproto, &be->opo);
7832 } else {
7833 OVS_NOT_REACHED();
7834 }
7835 }
7836 if (error) {
7837 break;
7838 }
7839 }
7840
7841 if (error) {
7842 /* Send error referring to the original message. */
7843 if (error) {
7844 ofconn_send_error(ofconn, be->msg, error);
7845 error = OFPERR_OFPBFC_MSG_FAILED;
7846 }
7847
7848 /* 2. Revert. Undo all the changes made above. */
7849 LIST_FOR_EACH_REVERSE_CONTINUE(be, node, &bundle->msg_list) {
7850 if (be->type == OFPTYPE_FLOW_MOD) {
7851 ofproto_flow_mod_revert(ofproto, &be->ofm);
7852 } else if (be->type == OFPTYPE_GROUP_MOD) {
7853 ofproto_group_mod_revert(ofproto, &be->ogm);
7854 } else if (be->type == OFPTYPE_PACKET_OUT) {
7855 ofproto_packet_out_revert(ofproto, &be->opo);
7856 }
7857 /* Nothing needs to be reverted for a port mod. */
7858 }
7859 } else {
7860 /* 4. Finish. */
7861 LIST_FOR_EACH (be, node, &bundle->msg_list) {
7862 if (be->type == OFPTYPE_PORT_MOD) {
7863 /* Perform the actual port mod. This is not atomic, i.e.,
7864 * the effects will be immediately seen by upcall
7865 * processing regardless of the lookup version. It should
7866 * be noted that port configuration changes can originate
7867 * also from OVSDB changes asynchronously to all upcall
7868 * processing. */
7869 port_mod_finish(ofconn, &be->opm.pm, be->opm.port);
7870 } else {
7871 version =
7872 (be->type == OFPTYPE_FLOW_MOD) ? be->ofm.version :
7873 (be->type == OFPTYPE_GROUP_MOD) ? be->ogm.version :
7874 (be->type == OFPTYPE_PACKET_OUT) ? be->opo.version :
7875 version;
7876
7877 /* Bump the lookup version to the one of the current
7878 * message. This makes all the changes in the bundle at
7879 * this version visible to lookups at once. */
7880 if (ofproto->tables_version < version) {
7881 ofproto->tables_version = version;
7882 ofproto->ofproto_class->set_tables_version(
7883 ofproto, ofproto->tables_version);
7884 }
7885
7886 struct openflow_mod_requester req = { ofconn, be->msg };
7887
7888 if (be->type == OFPTYPE_FLOW_MOD) {
7889 ofproto_flow_mod_finish(ofproto, &be->ofm, &req);
7890 } else if (be->type == OFPTYPE_GROUP_MOD) {
7891 ofproto_group_mod_finish(ofproto, &be->ogm, &req);
7892 } else if (be->type == OFPTYPE_PACKET_OUT) {
7893 ofproto_packet_out_finish(ofproto, &be->opo);
7894 }
7895 }
7896 }
7897 }
7898
7899 ofmonitor_flush(ofproto->connmgr);
7900 ovs_mutex_unlock(&ofproto_mutex);
7901 }
7902
7903 /* The bundle is discarded regardless the outcome. */
7904 ofp_bundle_remove__(ofconn, bundle);
7905 return error;
7906}
7907
7908static enum ofperr
7909handle_bundle_control(struct ofconn *ofconn, const struct ofp_header *oh)
7910{
7911 struct ofputil_bundle_ctrl_msg bctrl;
7912 struct ofputil_bundle_ctrl_msg reply;
7913 struct ofpbuf *buf;
7914 enum ofperr error;
7915
7916 error = reject_slave_controller(ofconn);
7917 if (error) {
7918 return error;
7919 }
7920
7921 error = ofputil_decode_bundle_ctrl(oh, &bctrl);
7922 if (error) {
7923 return error;
7924 }
7925 reply.flags = 0;
7926 reply.bundle_id = bctrl.bundle_id;
7927
7928 switch (bctrl.type) {
7929 case OFPBCT_OPEN_REQUEST:
7930 error = ofp_bundle_open(ofconn, bctrl.bundle_id, bctrl.flags, oh);
7931 reply.type = OFPBCT_OPEN_REPLY;
7932 break;
7933 case OFPBCT_CLOSE_REQUEST:
7934 error = ofp_bundle_close(ofconn, bctrl.bundle_id, bctrl.flags);
7935 reply.type = OFPBCT_CLOSE_REPLY;
7936 break;
7937 case OFPBCT_COMMIT_REQUEST:
7938 error = do_bundle_commit(ofconn, bctrl.bundle_id, bctrl.flags);
7939 reply.type = OFPBCT_COMMIT_REPLY;
7940 break;
7941 case OFPBCT_DISCARD_REQUEST:
7942 error = ofp_bundle_discard(ofconn, bctrl.bundle_id);
7943 reply.type = OFPBCT_DISCARD_REPLY;
7944 break;
7945
7946 case OFPBCT_OPEN_REPLY:
7947 case OFPBCT_CLOSE_REPLY:
7948 case OFPBCT_COMMIT_REPLY:
7949 case OFPBCT_DISCARD_REPLY:
7950 return OFPERR_OFPBFC_BAD_TYPE;
7951 break;
7952 }
7953
7954 if (!error) {
7955 buf = ofputil_encode_bundle_ctrl_reply(oh, &reply);
7956 ofconn_send_reply(ofconn, buf);
7957 }
7958 return error;
7959}
7960
7961static enum ofperr
7962handle_bundle_add(struct ofconn *ofconn, const struct ofp_header *oh)
7963 OVS_EXCLUDED(ofproto_mutex)
7964{
7965 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
7966 enum ofperr error;
7967 struct ofputil_bundle_add_msg badd;
7968 enum ofptype type;
7969
7970 error = reject_slave_controller(ofconn);
7971 if (error) {
7972 return error;
7973 }
7974
7975 error = ofputil_decode_bundle_add(oh, &badd, &type);
7976 if (error) {
7977 return error;
7978 }
7979
7980 /* Allocate bundle entry and decode the embedded message. */
7981 struct ofp_bundle_entry *bmsg = xmalloc(sizeof *bmsg);
7982
7983 struct ofpbuf ofpacts;
7984 uint64_t ofpacts_stub[1024 / 8];
7985 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
7986
7987 if (type == OFPTYPE_PORT_MOD) {
7988 error = ofputil_decode_port_mod(badd.msg, &bmsg->opm.pm, false);
7989 } else if (type == OFPTYPE_FLOW_MOD) {
7990 struct ofputil_flow_mod fm;
7991
7992 error = ofputil_decode_flow_mod(&fm, badd.msg,
7993 ofconn_get_protocol(ofconn),
7994 ofproto_get_tun_tab(ofproto),
7995 &ofproto->vl_mff_map, &ofpacts,
7996 u16_to_ofp(ofproto->max_ports),
7997 ofproto->n_tables);
7998 if (!error) {
7999 error = ofproto_flow_mod_init(ofproto, &bmsg->ofm, &fm, NULL);
8000 minimatch_destroy(&fm.match);
8001 }
8002 } else if (type == OFPTYPE_GROUP_MOD) {
8003 error = ofputil_decode_group_mod(badd.msg, &bmsg->ogm.gm);
8004 } else if (type == OFPTYPE_PACKET_OUT) {
8005 struct ofputil_packet_out po;
8006
8007 COVERAGE_INC(ofproto_packet_out);
8008
8009 /* Decode message. */
8010 error = ofputil_decode_packet_out(&po, badd.msg,
8011 ofproto_get_tun_tab(ofproto),
8012 &ofpacts);
8013 if (!error) {
8014 po.ofpacts = ofpbuf_steal_data(&ofpacts); /* Move to heap. */
8015 error = ofproto_packet_out_init(ofproto, ofconn, &bmsg->opo, &po);
8016 }
8017 } else {
8018 OVS_NOT_REACHED();
8019 }
8020 ofpbuf_uninit(&ofpacts);
8021 if (error) {
8022 free(bmsg);
8023 return error;
8024 }
8025
8026 /* Now that the embedded message has been successfully decoded, finish up
8027 * initializing the bundle entry. */
8028 bmsg->type = type;
8029 bmsg->msg = xmemdup(oh, ntohs(oh->length));
8030
8031 /* Add bundle entry to bundle. */
8032 error = ofp_bundle_add_message(ofconn, badd.bundle_id, badd.flags,
8033 bmsg, oh);
8034 if (error) {
8035 ofp_bundle_entry_free(bmsg);
8036 }
8037 return error;
8038}
8039
8040static enum ofperr
8041handle_tlv_table_mod(struct ofconn *ofconn, const struct ofp_header *oh)
8042{
8043 struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
8044 struct tun_table *old_tab, *new_tab;
8045 struct ofputil_tlv_table_mod ttm;
8046 enum ofperr error;
8047
8048 error = reject_slave_controller(ofconn);
8049 if (error) {
8050 return error;
8051 }
8052
8053 error = ofputil_decode_tlv_table_mod(oh, &ttm);
8054 if (error) {
8055 return error;
8056 }
8057
8058 old_tab = ovsrcu_get_protected(struct tun_table *, &ofproto->metadata_tab);
8059 error = tun_metadata_table_mod(&ttm, old_tab, &new_tab);
8060 if (!error) {
8061 ovs_mutex_lock(&ofproto->vl_mff_map.mutex);
8062 error = mf_vl_mff_map_mod_from_tun_metadata(&ofproto->vl_mff_map,
8063 &ttm);
8064 ovs_mutex_unlock(&ofproto->vl_mff_map.mutex);
8065 if (!error) {
8066 ovsrcu_set(&ofproto->metadata_tab, new_tab);
8067 tun_metadata_postpone_free(old_tab);
8068 } else {
8069 tun_metadata_free(new_tab);
8070 }
8071 }
8072
8073 ofputil_uninit_tlv_table(&ttm.mappings);
8074 return error;
8075}
8076
8077static enum ofperr
8078handle_tlv_table_request(struct ofconn *ofconn, const struct ofp_header *oh)
8079{
8080 const struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
8081 struct ofputil_tlv_table_reply ttr;
8082 struct ofpbuf *b;
8083
8084 tun_metadata_table_request(ofproto_get_tun_tab(ofproto), &ttr);
8085
8086 b = ofputil_encode_tlv_table_reply(oh, &ttr);
8087 ofputil_uninit_tlv_table(&ttr.mappings);
8088
8089 ofconn_send_reply(ofconn, b);
8090 return 0;
8091}
8092
8093static enum ofperr
8094handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
8095 OVS_EXCLUDED(ofproto_mutex)
8096{
8097 const struct ofp_header *oh = msg->data;
8098 enum ofptype type;
8099 enum ofperr error;
8100
8101 error = ofptype_decode(&type, oh);
8102 if (error) {
8103 return error;
8104 }
8105 if (oh->version >= OFP13_VERSION && ofpmsg_is_stat_request(oh)
8106 && ofpmp_more(oh)) {
8107 /* We have no buffer implementation for multipart requests.
8108 * Report overflow for requests which consists of multiple
8109 * messages. */
8110 return OFPERR_OFPBRC_MULTIPART_BUFFER_OVERFLOW;
8111 }
8112
8113 switch (type) {
8114 /* OpenFlow requests. */
8115 case OFPTYPE_ECHO_REQUEST:
8116 return handle_echo_request(ofconn, oh);
8117
8118 case OFPTYPE_FEATURES_REQUEST:
8119 return handle_features_request(ofconn, oh);
8120
8121 case OFPTYPE_GET_CONFIG_REQUEST:
8122 return handle_get_config_request(ofconn, oh);
8123
8124 case OFPTYPE_SET_CONFIG:
8125 return handle_set_config(ofconn, oh);
8126
8127 case OFPTYPE_PACKET_OUT:
8128 return handle_packet_out(ofconn, oh);
8129
8130 case OFPTYPE_PORT_MOD:
8131 return handle_port_mod(ofconn, oh);
8132
8133 case OFPTYPE_FLOW_MOD:
8134 return handle_flow_mod(ofconn, oh);
8135
8136 case OFPTYPE_GROUP_MOD:
8137 return handle_group_mod(ofconn, oh);
8138
8139 case OFPTYPE_TABLE_MOD:
8140 return handle_table_mod(ofconn, oh);
8141
8142 case OFPTYPE_METER_MOD:
8143 return handle_meter_mod(ofconn, oh);
8144
8145 case OFPTYPE_BARRIER_REQUEST:
8146 return handle_barrier_request(ofconn, oh);
8147
8148 case OFPTYPE_ROLE_REQUEST:
8149 return handle_role_request(ofconn, oh);
8150
8151 /* OpenFlow replies. */
8152 case OFPTYPE_ECHO_REPLY:
8153 return 0;
8154
8155 /* Nicira extension requests. */
8156 case OFPTYPE_FLOW_MOD_TABLE_ID:
8157 return handle_nxt_flow_mod_table_id(ofconn, oh);
8158
8159 case OFPTYPE_SET_FLOW_FORMAT:
8160 return handle_nxt_set_flow_format(ofconn, oh);
8161
8162 case OFPTYPE_SET_PACKET_IN_FORMAT:
8163 return handle_nxt_set_packet_in_format(ofconn, oh);
8164
8165 case OFPTYPE_SET_CONTROLLER_ID:
8166 return handle_nxt_set_controller_id(ofconn, oh);
8167
8168 case OFPTYPE_FLOW_AGE:
8169 /* Nothing to do. */
8170 return 0;
8171
8172 case OFPTYPE_FLOW_MONITOR_CANCEL:
8173 return handle_flow_monitor_cancel(ofconn, oh);
8174
8175 case OFPTYPE_SET_ASYNC_CONFIG:
8176 return handle_nxt_set_async_config(ofconn, oh);
8177
8178 case OFPTYPE_GET_ASYNC_REQUEST:
8179 return handle_nxt_get_async_request(ofconn, oh);
8180
8181 case OFPTYPE_NXT_RESUME:
8182 return handle_nxt_resume(ofconn, oh);
8183
8184 /* Statistics requests. */
8185 case OFPTYPE_DESC_STATS_REQUEST:
8186 return handle_desc_stats_request(ofconn, oh);
8187
8188 case OFPTYPE_FLOW_STATS_REQUEST:
8189 return handle_flow_stats_request(ofconn, oh);
8190
8191 case OFPTYPE_AGGREGATE_STATS_REQUEST:
8192 return handle_aggregate_stats_request(ofconn, oh);
8193
8194 case OFPTYPE_TABLE_STATS_REQUEST:
8195 return handle_table_stats_request(ofconn, oh);
8196
8197 case OFPTYPE_TABLE_FEATURES_STATS_REQUEST:
8198 return handle_table_features_request(ofconn, oh);
8199
8200 case OFPTYPE_TABLE_DESC_REQUEST:
8201 return handle_table_desc_request(ofconn, oh);
8202
8203 case OFPTYPE_PORT_STATS_REQUEST:
8204 return handle_port_stats_request(ofconn, oh);
8205
8206 case OFPTYPE_QUEUE_STATS_REQUEST:
8207 return handle_queue_stats_request(ofconn, oh);
8208
8209 case OFPTYPE_PORT_DESC_STATS_REQUEST:
8210 return handle_port_desc_stats_request(ofconn, oh);
8211
8212 case OFPTYPE_FLOW_MONITOR_STATS_REQUEST:
8213 return handle_flow_monitor_request(ofconn, oh);
8214
8215 case OFPTYPE_METER_STATS_REQUEST:
8216 case OFPTYPE_METER_CONFIG_STATS_REQUEST:
8217 return handle_meter_request(ofconn, oh, type);
8218
8219 case OFPTYPE_METER_FEATURES_STATS_REQUEST:
8220 return handle_meter_features_request(ofconn, oh);
8221
8222 case OFPTYPE_GROUP_STATS_REQUEST:
8223 return handle_group_stats_request(ofconn, oh);
8224
8225 case OFPTYPE_GROUP_DESC_STATS_REQUEST:
8226 return handle_group_desc_stats_request(ofconn, oh);
8227
8228 case OFPTYPE_GROUP_FEATURES_STATS_REQUEST:
8229 return handle_group_features_stats_request(ofconn, oh);
8230
8231 case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
8232 return handle_queue_get_config_request(ofconn, oh);
8233
8234 case OFPTYPE_BUNDLE_CONTROL:
8235 return handle_bundle_control(ofconn, oh);
8236
8237 case OFPTYPE_BUNDLE_ADD_MESSAGE:
8238 return handle_bundle_add(ofconn, oh);
8239
8240 case OFPTYPE_NXT_TLV_TABLE_MOD:
8241 return handle_tlv_table_mod(ofconn, oh);
8242
8243 case OFPTYPE_NXT_TLV_TABLE_REQUEST:
8244 return handle_tlv_table_request(ofconn, oh);
8245
8246 case OFPTYPE_IPFIX_BRIDGE_STATS_REQUEST:
8247 return handle_ipfix_bridge_stats_request(ofconn, oh);
8248
8249 case OFPTYPE_IPFIX_FLOW_STATS_REQUEST:
8250 return handle_ipfix_flow_stats_request(ofconn, oh);
8251
8252 case OFPTYPE_CT_FLUSH_ZONE:
8253 return handle_nxt_ct_flush_zone(ofconn, oh);
8254
8255 case OFPTYPE_HELLO:
8256 case OFPTYPE_ERROR:
8257 case OFPTYPE_FEATURES_REPLY:
8258 case OFPTYPE_GET_CONFIG_REPLY:
8259 case OFPTYPE_PACKET_IN:
8260 case OFPTYPE_FLOW_REMOVED:
8261 case OFPTYPE_PORT_STATUS:
8262 case OFPTYPE_BARRIER_REPLY:
8263 case OFPTYPE_QUEUE_GET_CONFIG_REPLY:
8264 case OFPTYPE_DESC_STATS_REPLY:
8265 case OFPTYPE_FLOW_STATS_REPLY:
8266 case OFPTYPE_QUEUE_STATS_REPLY:
8267 case OFPTYPE_PORT_STATS_REPLY:
8268 case OFPTYPE_TABLE_STATS_REPLY:
8269 case OFPTYPE_AGGREGATE_STATS_REPLY:
8270 case OFPTYPE_PORT_DESC_STATS_REPLY:
8271 case OFPTYPE_ROLE_REPLY:
8272 case OFPTYPE_FLOW_MONITOR_PAUSED:
8273 case OFPTYPE_FLOW_MONITOR_RESUMED:
8274 case OFPTYPE_FLOW_MONITOR_STATS_REPLY:
8275 case OFPTYPE_GET_ASYNC_REPLY:
8276 case OFPTYPE_GROUP_STATS_REPLY:
8277 case OFPTYPE_GROUP_DESC_STATS_REPLY:
8278 case OFPTYPE_GROUP_FEATURES_STATS_REPLY:
8279 case OFPTYPE_METER_STATS_REPLY:
8280 case OFPTYPE_METER_CONFIG_STATS_REPLY:
8281 case OFPTYPE_METER_FEATURES_STATS_REPLY:
8282 case OFPTYPE_TABLE_FEATURES_STATS_REPLY:
8283 case OFPTYPE_TABLE_DESC_REPLY:
8284 case OFPTYPE_ROLE_STATUS:
8285 case OFPTYPE_REQUESTFORWARD:
8286 case OFPTYPE_TABLE_STATUS:
8287 case OFPTYPE_NXT_TLV_TABLE_REPLY:
8288 case OFPTYPE_IPFIX_BRIDGE_STATS_REPLY:
8289 case OFPTYPE_IPFIX_FLOW_STATS_REPLY:
8290 default:
8291 if (ofpmsg_is_stat_request(oh)) {
8292 return OFPERR_OFPBRC_BAD_STAT;
8293 } else {
8294 return OFPERR_OFPBRC_BAD_TYPE;
8295 }
8296 }
8297}
8298
8299static void
8300handle_openflow(struct ofconn *ofconn, const struct ofpbuf *ofp_msg)
8301 OVS_EXCLUDED(ofproto_mutex)
8302{
8303 enum ofperr error = handle_openflow__(ofconn, ofp_msg);
8304
8305 if (error) {
8306 ofconn_send_error(ofconn, ofp_msg->data, error);
8307 }
8308 COVERAGE_INC(ofproto_recv_openflow);
8309}
8310\f
8311static uint64_t
8312pick_datapath_id(const struct ofproto *ofproto)
8313{
8314 const struct ofport *port;
8315
8316 port = ofproto_get_port(ofproto, OFPP_LOCAL);
8317 if (port) {
8318 struct eth_addr ea;
8319 int error;
8320
8321 error = netdev_get_etheraddr(port->netdev, &ea);
8322 if (!error) {
8323 return eth_addr_to_uint64(ea);
8324 }
8325 VLOG_WARN("%s: could not get MAC address for %s (%s)",
8326 ofproto->name, netdev_get_name(port->netdev),
8327 ovs_strerror(error));
8328 }
8329 return ofproto->fallback_dpid;
8330}
8331
8332static uint64_t
8333pick_fallback_dpid(void)
8334{
8335 struct eth_addr ea;
8336 eth_addr_nicira_random(&ea);
8337 return eth_addr_to_uint64(ea);
8338}
8339\f
8340/* Table overflow policy. */
8341
8342/* Chooses and updates 'rulep' with a rule to evict from 'table'. Sets 'rulep'
8343 * to NULL if the table is not configured to evict rules or if the table
8344 * contains no evictable rules. (Rules with a readlock on their evict rwlock,
8345 * or with no timeouts are not evictable.) */
8346static bool
8347choose_rule_to_evict(struct oftable *table, struct rule **rulep)
8348 OVS_REQUIRES(ofproto_mutex)
8349{
8350 struct eviction_group *evg;
8351
8352 *rulep = NULL;
8353 if (!table->eviction) {
8354 return false;
8355 }
8356
8357 /* In the common case, the outer and inner loops here will each be entered
8358 * exactly once:
8359 *
8360 * - The inner loop normally "return"s in its first iteration. If the
8361 * eviction group has any evictable rules, then it always returns in
8362 * some iteration.
8363 *
8364 * - The outer loop only iterates more than once if the largest eviction
8365 * group has no evictable rules.
8366 *
8367 * - The outer loop can exit only if table's 'max_flows' is all filled up
8368 * by unevictable rules. */
8369 HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) {
8370 struct rule *rule;
8371
8372 HEAP_FOR_EACH (rule, evg_node, &evg->rules) {
8373 *rulep = rule;
8374 return true;
8375 }
8376 }
8377
8378 return false;
8379}
8380\f
8381/* Eviction groups. */
8382
8383/* Returns the priority to use for an eviction_group that contains 'n_rules'
8384 * rules. The priority contains low-order random bits to ensure that eviction
8385 * groups with the same number of rules are prioritized randomly. */
8386static uint32_t
8387eviction_group_priority(size_t n_rules)
8388{
8389 uint16_t size = MIN(UINT16_MAX, n_rules);
8390 return (size << 16) | random_uint16();
8391}
8392
8393/* Updates 'evg', an eviction_group within 'table', following a change that
8394 * adds or removes rules in 'evg'. */
8395static void
8396eviction_group_resized(struct oftable *table, struct eviction_group *evg)
8397 OVS_REQUIRES(ofproto_mutex)
8398{
8399 heap_change(&table->eviction_groups_by_size, &evg->size_node,
8400 eviction_group_priority(heap_count(&evg->rules)));
8401}
8402
8403/* Destroys 'evg', an eviction_group within 'table':
8404 *
8405 * - Removes all the rules, if any, from 'evg'. (It doesn't destroy the
8406 * rules themselves, just removes them from the eviction group.)
8407 *
8408 * - Removes 'evg' from 'table'.
8409 *
8410 * - Frees 'evg'. */
8411static void
8412eviction_group_destroy(struct oftable *table, struct eviction_group *evg)
8413 OVS_REQUIRES(ofproto_mutex)
8414{
8415 while (!heap_is_empty(&evg->rules)) {
8416 struct rule *rule;
8417
8418 rule = CONTAINER_OF(heap_pop(&evg->rules), struct rule, evg_node);
8419 rule->eviction_group = NULL;
8420 }
8421 hmap_remove(&table->eviction_groups_by_id, &evg->id_node);
8422 heap_remove(&table->eviction_groups_by_size, &evg->size_node);
8423 heap_destroy(&evg->rules);
8424 free(evg);
8425}
8426
8427/* Removes 'rule' from its eviction group, if any. */
8428static void
8429eviction_group_remove_rule(struct rule *rule)
8430 OVS_REQUIRES(ofproto_mutex)
8431{
8432 if (rule->eviction_group) {
8433 struct oftable *table = &rule->ofproto->tables[rule->table_id];
8434 struct eviction_group *evg = rule->eviction_group;
8435
8436 rule->eviction_group = NULL;
8437 heap_remove(&evg->rules, &rule->evg_node);
8438 if (heap_is_empty(&evg->rules)) {
8439 eviction_group_destroy(table, evg);
8440 } else {
8441 eviction_group_resized(table, evg);
8442 }
8443 }
8444}
8445
8446/* Hashes the 'rule''s values for the eviction_fields of 'rule''s table, and
8447 * returns the hash value. */
8448static uint32_t
8449eviction_group_hash_rule(struct rule *rule)
8450 OVS_REQUIRES(ofproto_mutex)
8451{
8452 struct oftable *table = &rule->ofproto->tables[rule->table_id];
8453 const struct mf_subfield *sf;
8454 struct flow flow;
8455 uint32_t hash;
8456
8457 hash = table->eviction_group_id_basis;
8458 miniflow_expand(rule->cr.match.flow, &flow);
8459 for (sf = table->eviction_fields;
8460 sf < &table->eviction_fields[table->n_eviction_fields];
8461 sf++)
8462 {
8463 if (mf_are_prereqs_ok(sf->field, &flow, NULL)) {
8464 union mf_value value;
8465
8466 mf_get_value(sf->field, &flow, &value);
8467 if (sf->ofs) {
8468 bitwise_zero(&value, sf->field->n_bytes, 0, sf->ofs);
8469 }
8470 if (sf->ofs + sf->n_bits < sf->field->n_bytes * 8) {
8471 unsigned int start = sf->ofs + sf->n_bits;
8472 bitwise_zero(&value, sf->field->n_bytes, start,
8473 sf->field->n_bytes * 8 - start);
8474 }
8475 hash = hash_bytes(&value, sf->field->n_bytes, hash);
8476 } else {
8477 hash = hash_int(hash, 0);
8478 }
8479 }
8480
8481 return hash;
8482}
8483
8484/* Returns an eviction group within 'table' with the given 'id', creating one
8485 * if necessary. */
8486static struct eviction_group *
8487eviction_group_find(struct oftable *table, uint32_t id)
8488 OVS_REQUIRES(ofproto_mutex)
8489{
8490 struct eviction_group *evg;
8491
8492 HMAP_FOR_EACH_WITH_HASH (evg, id_node, id, &table->eviction_groups_by_id) {
8493 return evg;
8494 }
8495
8496 evg = xmalloc(sizeof *evg);
8497 hmap_insert(&table->eviction_groups_by_id, &evg->id_node, id);
8498 heap_insert(&table->eviction_groups_by_size, &evg->size_node,
8499 eviction_group_priority(0));
8500 heap_init(&evg->rules);
8501
8502 return evg;
8503}
8504
8505/* Returns an eviction priority for 'rule'. The return value should be
8506 * interpreted so that higher priorities make a rule a more attractive
8507 * candidate for eviction. */
8508static uint64_t
8509rule_eviction_priority(struct ofproto *ofproto, struct rule *rule)
8510 OVS_REQUIRES(ofproto_mutex)
8511{
8512 /* Calculate absolute time when this flow will expire. If it will never
8513 * expire, then return 0 to make it unevictable. */
8514 long long int expiration = LLONG_MAX;
8515 if (rule->hard_timeout) {
8516 /* 'modified' needs protection even when we hold 'ofproto_mutex'. */
8517 ovs_mutex_lock(&rule->mutex);
8518 long long int modified = rule->modified;
8519 ovs_mutex_unlock(&rule->mutex);
8520
8521 expiration = modified + rule->hard_timeout * 1000;
8522 }
8523 if (rule->idle_timeout) {
8524 uint64_t packets, bytes;
8525 long long int used;
8526 long long int idle_expiration;
8527
8528 ofproto->ofproto_class->rule_get_stats(rule, &packets, &bytes, &used);
8529 idle_expiration = used + rule->idle_timeout * 1000;
8530 expiration = MIN(expiration, idle_expiration);
8531 }
8532 if (expiration == LLONG_MAX) {
8533 return 0;
8534 }
8535
8536 /* Calculate the time of expiration as a number of (approximate) seconds
8537 * after program startup.
8538 *
8539 * This should work OK for program runs that last UINT32_MAX seconds or
8540 * less. Therefore, please restart OVS at least once every 136 years. */
8541 uint32_t expiration_ofs = (expiration >> 10) - (time_boot_msec() >> 10);
8542
8543 /* Combine expiration time with OpenFlow "importance" to form a single
8544 * priority value. We want flows with relatively low "importance" to be
8545 * evicted before even considering expiration time, so put "importance" in
8546 * the most significant bits and expiration time in the least significant
8547 * bits.
8548 *
8549 * Small 'priority' should be evicted before those with large 'priority'.
8550 * The caller expects the opposite convention (a large return value being
8551 * more attractive for eviction) so we invert it before returning. */
8552 uint64_t priority = ((uint64_t) rule->importance << 32) + expiration_ofs;
8553 return UINT64_MAX - priority;
8554}
8555
8556/* Adds 'rule' to an appropriate eviction group for its oftable's
8557 * configuration. Does nothing if 'rule''s oftable doesn't have eviction
8558 * enabled, or if 'rule' is a permanent rule (one that will never expire on its
8559 * own).
8560 *
8561 * The caller must ensure that 'rule' is not already in an eviction group. */
8562static void
8563eviction_group_add_rule(struct rule *rule)
8564 OVS_REQUIRES(ofproto_mutex)
8565{
8566 struct ofproto *ofproto = rule->ofproto;
8567 struct oftable *table = &ofproto->tables[rule->table_id];
8568 bool has_timeout;
8569
8570 /* Timeouts may be modified only when holding 'ofproto_mutex'. We have it
8571 * so no additional protection is needed. */
8572 has_timeout = rule->hard_timeout || rule->idle_timeout;
8573
8574 if (table->eviction && has_timeout) {
8575 struct eviction_group *evg;
8576
8577 evg = eviction_group_find(table, eviction_group_hash_rule(rule));
8578
8579 rule->eviction_group = evg;
8580 heap_insert(&evg->rules, &rule->evg_node,
8581 rule_eviction_priority(ofproto, rule));
8582 eviction_group_resized(table, evg);
8583 }
8584}
8585\f
8586/* oftables. */
8587
8588/* Initializes 'table'. */
8589static void
8590oftable_init(struct oftable *table)
8591{
8592 memset(table, 0, sizeof *table);
8593 classifier_init(&table->cls, flow_segment_u64s);
8594 table->max_flows = UINT_MAX;
8595 table->n_flows = 0;
8596 hmap_init(&table->eviction_groups_by_id);
8597 heap_init(&table->eviction_groups_by_size);
8598 atomic_init(&table->miss_config, OFPUTIL_TABLE_MISS_DEFAULT);
8599
8600 classifier_set_prefix_fields(&table->cls, default_prefix_fields,
8601 ARRAY_SIZE(default_prefix_fields));
8602
8603 atomic_init(&table->n_matched, 0);
8604 atomic_init(&table->n_missed, 0);
8605}
8606
8607/* Destroys 'table', including its classifier and eviction groups.
8608 *
8609 * The caller is responsible for freeing 'table' itself. */
8610static void
8611oftable_destroy(struct oftable *table)
8612{
8613 ovs_assert(classifier_is_empty(&table->cls));
8614
8615 ovs_mutex_lock(&ofproto_mutex);
8616 oftable_configure_eviction(table, 0, NULL, 0);
8617 ovs_mutex_unlock(&ofproto_mutex);
8618
8619 hmap_destroy(&table->eviction_groups_by_id);
8620 heap_destroy(&table->eviction_groups_by_size);
8621 classifier_destroy(&table->cls);
8622 free(table->name);
8623}
8624
8625/* Changes the name of 'table' to 'name'. If 'name' is NULL or the empty
8626 * string, then 'table' will use its default name.
8627 *
8628 * This only affects the name exposed for a table exposed through the OpenFlow
8629 * OFPST_TABLE (as printed by "ovs-ofctl dump-tables"). */
8630static void
8631oftable_set_name(struct oftable *table, const char *name)
8632{
8633 if (name && name[0]) {
8634 int len = strnlen(name, OFP_MAX_TABLE_NAME_LEN);
8635 if (!table->name || strncmp(name, table->name, len)) {
8636 free(table->name);
8637 table->name = xmemdup0(name, len);
8638 }
8639 } else {
8640 free(table->name);
8641 table->name = NULL;
8642 }
8643}
8644
8645/* oftables support a choice of two policies when adding a rule would cause the
8646 * number of flows in the table to exceed the configured maximum number: either
8647 * they can refuse to add the new flow or they can evict some existing flow.
8648 * This function configures the latter policy on 'table', with fairness based
8649 * on the values of the 'n_fields' fields specified in 'fields'. (Specifying
8650 * 'n_fields' as 0 disables fairness.) */
8651static void
8652oftable_configure_eviction(struct oftable *table, unsigned int eviction,
8653 const struct mf_subfield *fields, size_t n_fields)
8654 OVS_REQUIRES(ofproto_mutex)
8655{
8656 struct rule *rule;
8657
8658 if ((table->eviction != 0) == (eviction != 0)
8659 && n_fields == table->n_eviction_fields
8660 && (!n_fields
8661 || !memcmp(fields, table->eviction_fields,
8662 n_fields * sizeof *fields))) {
8663 /* The set of eviction fields did not change. If 'eviction' changed,
8664 * it remains nonzero, so that we can just update table->eviction
8665 * without fussing with the eviction groups. */
8666 table->eviction = eviction;
8667 return;
8668 }
8669
8670 /* Destroy existing eviction groups, then destroy and recreate data
8671 * structures to recover memory. */
8672 struct eviction_group *evg, *next;
8673 HMAP_FOR_EACH_SAFE (evg, next, id_node, &table->eviction_groups_by_id) {
8674 eviction_group_destroy(table, evg);
8675 }
8676 hmap_destroy(&table->eviction_groups_by_id);
8677 hmap_init(&table->eviction_groups_by_id);
8678 heap_destroy(&table->eviction_groups_by_size);
8679 heap_init(&table->eviction_groups_by_size);
8680
8681 /* Replace eviction groups by the new ones, if there is a change. Free the
8682 * old fields only after allocating the new ones, because 'fields ==
8683 * table->eviction_fields' is possible. */
8684 struct mf_subfield *old_fields = table->eviction_fields;
8685 table->n_eviction_fields = n_fields;
8686 table->eviction_fields = (fields
8687 ? xmemdup(fields, n_fields * sizeof *fields)
8688 : NULL);
8689 free(old_fields);
8690
8691 /* Add the new eviction groups, if enabled. */
8692 table->eviction = eviction;
8693 if (table->eviction) {
8694 table->eviction_group_id_basis = random_uint32();
8695 CLS_FOR_EACH (rule, cr, &table->cls) {
8696 eviction_group_add_rule(rule);
8697 }
8698 }
8699}
8700
8701/* Inserts 'rule' from the ofproto data structures BEFORE caller has inserted
8702 * it to the classifier. */
8703static void
8704ofproto_rule_insert__(struct ofproto *ofproto, struct rule *rule)
8705 OVS_REQUIRES(ofproto_mutex)
8706{
8707 const struct rule_actions *actions = rule_get_actions(rule);
8708
8709 /* A rule may not be reinserted. */
8710 ovs_assert(rule->state != RULE_INSERTED);
8711
8712 if (rule->hard_timeout || rule->idle_timeout) {
8713 ovs_list_insert(&ofproto->expirable, &rule->expirable);
8714 }
8715 cookies_insert(ofproto, rule);
8716 eviction_group_add_rule(rule);
8717 if (actions->has_meter) {
8718 meter_insert_rule(rule);
8719 }
8720 if (actions->has_groups) {
8721 const struct ofpact_group *a;
8722 OFPACT_FOR_EACH_TYPE_FLATTENED (a, GROUP, actions->ofpacts,
8723 actions->ofpacts_len) {
8724 struct ofgroup *group;
8725
8726 group = ofproto_group_lookup(ofproto, a->group_id, OVS_VERSION_MAX,
8727 false);
8728 ovs_assert(group != NULL);
8729 group_add_rule(group, rule);
8730 }
8731 }
8732
8733 rule->state = RULE_INSERTED;
8734}
8735
8736/* Removes 'rule' from the ofproto data structures. Caller may have deferred
8737 * the removal from the classifier. */
8738static void
8739ofproto_rule_remove__(struct ofproto *ofproto, struct rule *rule)
8740 OVS_REQUIRES(ofproto_mutex)
8741{
8742 ovs_assert(rule->state == RULE_INSERTED);
8743
8744 cookies_remove(ofproto, rule);
8745
8746 eviction_group_remove_rule(rule);
8747 if (!ovs_list_is_empty(&rule->expirable)) {
8748 ovs_list_remove(&rule->expirable);
8749 }
8750 if (!ovs_list_is_empty(&rule->meter_list_node)) {
8751 ovs_list_remove(&rule->meter_list_node);
8752 ovs_list_init(&rule->meter_list_node);
8753 }
8754
8755 /* Remove the rule from any groups, except from the group that is being
8756 * deleted, if any. */
8757 const struct rule_actions *actions = rule_get_actions(rule);
8758
8759 if (actions->has_groups) {
8760 const struct ofpact_group *a;
8761
8762 OFPACT_FOR_EACH_TYPE_FLATTENED(a, GROUP, actions->ofpacts,
8763 actions->ofpacts_len) {
8764 struct ofgroup *group;
8765
8766 group = ofproto_group_lookup(ofproto, a->group_id, OVS_VERSION_MAX,
8767 false);
8768 ovs_assert(group);
8769
8770 /* Leave the rule for the group that is being deleted, if any,
8771 * as we still need the list of rules for clean-up. */
8772 if (!group->being_deleted) {
8773 group_remove_rule(group, rule);
8774 }
8775 }
8776 }
8777
8778 rule->state = RULE_REMOVED;
8779}
8780\f
8781/* unixctl commands. */
8782
8783struct ofproto *
8784ofproto_lookup(const char *name)
8785{
8786 struct ofproto *ofproto;
8787
8788 HMAP_FOR_EACH_WITH_HASH (ofproto, hmap_node, hash_string(name, 0),
8789 &all_ofprotos) {
8790 if (!strcmp(ofproto->name, name)) {
8791 return ofproto;
8792 }
8793 }
8794 return NULL;
8795}
8796
8797static void
8798ofproto_unixctl_list(struct unixctl_conn *conn, int argc OVS_UNUSED,
8799 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
8800{
8801 struct ofproto *ofproto;
8802 struct ds results;
8803
8804 ds_init(&results);
8805 HMAP_FOR_EACH (ofproto, hmap_node, &all_ofprotos) {
8806 ds_put_format(&results, "%s\n", ofproto->name);
8807 }
8808 unixctl_command_reply(conn, ds_cstr(&results));
8809 ds_destroy(&results);
8810}
8811
8812static void
8813ofproto_unixctl_init(void)
8814{
8815 static bool registered;
8816 if (registered) {
8817 return;
8818 }
8819 registered = true;
8820
8821 unixctl_command_register("ofproto/list", "", 0, 0,
8822 ofproto_unixctl_list, NULL);
8823}
8824
8825void
8826ofproto_set_vlan_limit(int vlan_limit)
8827{
8828 flow_limit_vlans(vlan_limit);
8829}