]> git.proxmox.com Git - ovs.git/blob - lib/cfm.c
dpif-netdev: Add ovs-appctl dpif-netdev/pmd-rxq-rebalance.
[ovs.git] / lib / cfm.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "cfm.h"
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <string.h>
23
24 #include "byte-order.h"
25 #include "connectivity.h"
26 #include "dp-packet.h"
27 #include "openvswitch/dynamic-string.h"
28 #include "flow.h"
29 #include "hash.h"
30 #include "openvswitch/hmap.h"
31 #include "netdev.h"
32 #include "ovs-atomic.h"
33 #include "packets.h"
34 #include "poll-loop.h"
35 #include "random.h"
36 #include "seq.h"
37 #include "timer.h"
38 #include "timeval.h"
39 #include "unixctl.h"
40 #include "openvswitch/vlog.h"
41 #include "util.h"
42
43 VLOG_DEFINE_THIS_MODULE(cfm);
44
45 #define CFM_MAX_RMPS 256
46
47 /* Ethernet destination address of CCM packets. */
48 static const struct eth_addr eth_addr_ccm = {
49 { { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x30 } } };
50 static const struct eth_addr eth_addr_ccm_x = {
51 { { 0x01, 0x23, 0x20, 0x00, 0x00, 0x30 } } };
52
53 #define ETH_TYPE_CFM 0x8902
54
55 /* A 'ccm' represents a Continuity Check Message from the 802.1ag
56 * specification. Continuity Check Messages are broadcast periodically so that
57 * hosts can determine whom they have connectivity to.
58 *
59 * The minimum length of a CCM as specified by IEEE 802.1ag is 75 bytes.
60 * Previous versions of Open vSwitch generated 74-byte CCM messages, so we
61 * accept such messages too. */
62 #define CCM_LEN 75
63 #define CCM_ACCEPT_LEN 74
64 #define CCM_MAID_LEN 48
65 #define CCM_OPCODE 1 /* CFM message opcode meaning CCM. */
66 #define CCM_RDI_MASK 0x80
67 #define CFM_HEALTH_INTERVAL 6
68
69 OVS_PACKED(
70 struct ccm {
71 uint8_t mdlevel_version; /* MD Level and Version */
72 uint8_t opcode;
73 uint8_t flags;
74 uint8_t tlv_offset;
75 ovs_be32 seq;
76 ovs_be16 mpid;
77 uint8_t maid[CCM_MAID_LEN];
78
79 /* Defined by ITU-T Y.1731 should be zero */
80 ovs_be16 interval_ms_x; /* Transmission interval in ms. */
81 ovs_be64 mpid64; /* MPID in extended mode. */
82 uint8_t opdown; /* Operationally down. */
83 uint8_t zero[5];
84
85 /* TLV space. */
86 uint8_t end_tlv;
87 });
88 BUILD_ASSERT_DECL(CCM_LEN == sizeof(struct ccm));
89
90 struct cfm {
91 const char *name; /* Name of this CFM object. */
92 struct hmap_node hmap_node; /* Node in all_cfms list. */
93
94 struct netdev *netdev;
95 uint64_t rx_packets; /* Packets received by 'netdev'. */
96
97 uint64_t mpid;
98 bool demand; /* Demand mode. */
99 bool booted; /* A full fault interval has occurred. */
100 enum cfm_fault_reason fault; /* Connectivity fault status. */
101 enum cfm_fault_reason recv_fault; /* Bit mask of faults occurring on
102 receive. */
103 bool opup; /* Operational State. */
104 bool remote_opup; /* Remote Operational State. */
105
106 int fault_override; /* Manual override of 'fault' status.
107 Ignored if negative. */
108
109 uint32_t seq; /* The sequence number of our last CCM. */
110 uint8_t ccm_interval; /* The CCM transmission interval. */
111 int ccm_interval_ms; /* 'ccm_interval' in milliseconds. */
112 uint16_t ccm_vlan; /* Vlan tag of CCM PDUs. CFM_RANDOM_VLAN if
113 random. */
114 uint8_t ccm_pcp; /* Priority of CCM PDUs. */
115 uint8_t maid[CCM_MAID_LEN]; /* The MAID of this CFM. */
116
117 struct timer tx_timer; /* Send CCM when expired. */
118 struct timer fault_timer; /* Check for faults when expired. */
119
120 struct hmap remote_mps; /* Remote MPs. */
121
122 /* Result of cfm_get_remote_mpids(). Updated only during fault check to
123 * avoid flapping. */
124 uint64_t *rmps_array; /* Cache of remote_mps. */
125 size_t rmps_array_len; /* Number of rmps in 'rmps_array'. */
126
127 int health; /* Percentage of the number of CCM frames
128 received. */
129 int health_interval; /* Number of fault_intervals since health was
130 recomputed. */
131 long long int last_tx; /* Last CCM transmission time. */
132
133 /* These bools are atomic to allow readers to check their values
134 * without taking 'mutex'. Such readers do not assume the values they
135 * read are synchronized with any other members. */
136 atomic_bool check_tnl_key; /* Verify the tunnel key of inbound packets? */
137 atomic_bool extended; /* Extended mode. */
138 struct ovs_refcount ref_cnt;
139
140 uint64_t flap_count; /* Count the flaps since boot. */
141
142 /* True when the variables returned by cfm_get_*() are changed
143 * since last check. */
144 bool status_changed;
145
146 /* When 'cfm->demand' is set, at least one ccm is required to be received
147 * every 100 * cfm_interval. If ccm is not received within this interval,
148 * even if data packets are received, the cfm fault will be set. */
149 struct timer demand_rx_ccm_t;
150 };
151
152 /* Remote MPs represent foreign network entities that are configured to have
153 * the same MAID as this CFM instance. */
154 struct remote_mp {
155 uint64_t mpid; /* The Maintenance Point ID of this 'remote_mp'. */
156 struct hmap_node node; /* Node in 'remote_mps' map. */
157
158 bool recv; /* CCM was received since last fault check. */
159 bool opup; /* Operational State. */
160 uint32_t seq; /* Most recently received sequence number. */
161 uint8_t num_health_ccm; /* Number of received ccm frames every
162 CFM_HEALTH_INTERVAL * 'fault_interval'. */
163 long long int last_rx; /* Last CCM reception time. */
164
165 };
166
167 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(20, 30);
168
169 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
170 static struct hmap all_cfms__ = HMAP_INITIALIZER(&all_cfms__);
171 static struct hmap *const all_cfms OVS_GUARDED_BY(mutex) = &all_cfms__;
172
173 static unixctl_cb_func cfm_unixctl_show;
174 static unixctl_cb_func cfm_unixctl_set_fault;
175
176 static uint64_t
177 cfm_rx_packets(const struct cfm *cfm) OVS_REQUIRES(mutex)
178 {
179 struct netdev_stats stats;
180
181 if (!netdev_get_stats(cfm->netdev, &stats)) {
182 return stats.rx_packets;
183 } else {
184 return 0;
185 }
186 }
187
188 static struct eth_addr
189 cfm_ccm_addr(struct cfm *cfm)
190 {
191 bool extended;
192
193 atomic_read_relaxed(&cfm->extended, &extended);
194
195 return extended ? eth_addr_ccm_x : eth_addr_ccm;
196 }
197
198 /* Returns the string representation of the given cfm_fault_reason 'reason'. */
199 const char *
200 cfm_fault_reason_to_str(int reason)
201 {
202 switch (reason) {
203 #define CFM_FAULT_REASON(NAME, STR) case CFM_FAULT_##NAME: return #STR;
204 CFM_FAULT_REASONS
205 #undef CFM_FAULT_REASON
206 default: return "<unknown>";
207 }
208 }
209
210 static void
211 ds_put_cfm_fault(struct ds *ds, int fault)
212 {
213 int i;
214
215 for (i = 0; i < CFM_FAULT_N_REASONS; i++) {
216 int reason = 1 << i;
217
218 if (fault & reason) {
219 ds_put_format(ds, "%s ", cfm_fault_reason_to_str(reason));
220 }
221 }
222
223 ds_chomp(ds, ' ');
224 }
225
226 static void
227 cfm_generate_maid(struct cfm *cfm) OVS_REQUIRES(mutex)
228 {
229 const char *ovs_md_name = "ovs";
230 const char *ovs_ma_name = "ovs";
231 uint8_t *ma_p;
232 size_t md_len, ma_len;
233
234 memset(cfm->maid, 0, CCM_MAID_LEN);
235
236 md_len = strlen(ovs_md_name);
237 ma_len = strlen(ovs_ma_name);
238
239 ovs_assert(md_len && ma_len && md_len + ma_len + 4 <= CCM_MAID_LEN);
240
241 cfm->maid[0] = 4; /* MD name string format. */
242 cfm->maid[1] = md_len; /* MD name size. */
243 memcpy(&cfm->maid[2], ovs_md_name, md_len); /* MD name. */
244
245 ma_p = cfm->maid + 2 + md_len;
246 ma_p[0] = 2; /* MA name string format. */
247 ma_p[1] = ma_len; /* MA name size. */
248 memcpy(&ma_p[2], ovs_ma_name, ma_len); /* MA name. */
249 }
250
251 static int
252 ccm_interval_to_ms(uint8_t interval)
253 {
254 switch (interval) {
255 case 0: OVS_NOT_REACHED(); /* Explicitly not supported by 802.1ag. */
256 case 1: return 3; /* Not recommended due to timer resolution. */
257 case 2: return 10; /* Not recommended due to timer resolution. */
258 case 3: return 100;
259 case 4: return 1000;
260 case 5: return 10000;
261 case 6: return 60000;
262 case 7: return 600000;
263 default: OVS_NOT_REACHED(); /* Explicitly not supported by 802.1ag. */
264 }
265
266 OVS_NOT_REACHED();
267 }
268
269 static long long int
270 cfm_fault_interval(struct cfm *cfm) OVS_REQUIRES(mutex)
271 {
272 /* According to the 802.1ag specification we should assume every other MP
273 * with the same MAID has the same transmission interval that we have. If
274 * an MP has a different interval, cfm_process_heartbeat will register it
275 * as a fault (likely due to a configuration error). Thus we can check all
276 * MPs at once making this quite a bit simpler.
277 *
278 * When cfm is not in demand mode, we check when (ccm_interval_ms * 3.5) ms
279 * have passed. When cfm is in demand mode, we check when
280 * (MAX(ccm_interval_ms, 500) * 3.5) ms have passed. This ensures that
281 * ovs-vswitchd has enough time to pull statistics from the datapath. */
282
283 return (MAX(cfm->ccm_interval_ms, cfm->demand ? 500 : cfm->ccm_interval_ms)
284 * 7) / 2;
285 }
286
287 static uint8_t
288 ms_to_ccm_interval(int interval_ms)
289 {
290 uint8_t i;
291
292 for (i = 7; i > 0; i--) {
293 if (ccm_interval_to_ms(i) <= interval_ms) {
294 return i;
295 }
296 }
297
298 return 1;
299 }
300
301 static uint32_t
302 hash_mpid(uint64_t mpid)
303 {
304 return hash_uint64(mpid);
305 }
306
307 static bool
308 cfm_is_valid_mpid(bool extended, uint64_t mpid)
309 {
310 /* 802.1ag specification requires MPIDs to be within the range [1, 8191].
311 * In extended mode we relax this requirement. */
312 return mpid >= 1 && (extended || mpid <= 8191);
313 }
314
315 static struct remote_mp *
316 lookup_remote_mp(const struct cfm *cfm, uint64_t mpid) OVS_REQUIRES(mutex)
317 {
318 struct remote_mp *rmp;
319
320 HMAP_FOR_EACH_IN_BUCKET (rmp, node, hash_mpid(mpid), &cfm->remote_mps) {
321 if (rmp->mpid == mpid) {
322 return rmp;
323 }
324 }
325
326 return NULL;
327 }
328
329 void
330 cfm_init(void)
331 {
332 unixctl_command_register("cfm/show", "[interface]", 0, 1, cfm_unixctl_show,
333 NULL);
334 unixctl_command_register("cfm/set-fault", "[interface] normal|false|true",
335 1, 2, cfm_unixctl_set_fault, NULL);
336 }
337
338 /* Records the status change and changes the global connectivity seq. */
339 static void
340 cfm_status_changed(struct cfm *cfm) OVS_REQUIRES(mutex)
341 {
342 seq_change(connectivity_seq_get());
343 cfm->status_changed = true;
344 }
345
346 /* Allocates a 'cfm' object called 'name'. 'cfm' should be initialized by
347 * cfm_configure() before use. */
348 struct cfm *
349 cfm_create(const struct netdev *netdev) OVS_EXCLUDED(mutex)
350 {
351 struct cfm *cfm;
352
353 cfm = xzalloc(sizeof *cfm);
354 cfm->netdev = netdev_ref(netdev);
355 cfm->name = netdev_get_name(cfm->netdev);
356 hmap_init(&cfm->remote_mps);
357 cfm->remote_opup = true;
358 cfm->fault_override = -1;
359 cfm->health = -1;
360 cfm->last_tx = 0;
361 cfm->flap_count = 0;
362 atomic_init(&cfm->extended, false);
363 atomic_init(&cfm->check_tnl_key, false);
364 ovs_refcount_init(&cfm->ref_cnt);
365
366 ovs_mutex_lock(&mutex);
367 cfm_status_changed(cfm);
368 cfm_generate_maid(cfm);
369 hmap_insert(all_cfms, &cfm->hmap_node, hash_string(cfm->name, 0));
370 ovs_mutex_unlock(&mutex);
371
372 return cfm;
373 }
374
375 void
376 cfm_unref(struct cfm *cfm) OVS_EXCLUDED(mutex)
377 {
378 struct remote_mp *rmp;
379
380 if (!cfm) {
381 return;
382 }
383
384 if (ovs_refcount_unref_relaxed(&cfm->ref_cnt) != 1) {
385 return;
386 }
387
388 ovs_mutex_lock(&mutex);
389 cfm_status_changed(cfm);
390 hmap_remove(all_cfms, &cfm->hmap_node);
391 ovs_mutex_unlock(&mutex);
392
393 HMAP_FOR_EACH_POP (rmp, node, &cfm->remote_mps) {
394 free(rmp);
395 }
396
397 hmap_destroy(&cfm->remote_mps);
398 netdev_close(cfm->netdev);
399 free(cfm->rmps_array);
400
401 free(cfm);
402 }
403
404 struct cfm *
405 cfm_ref(const struct cfm *cfm_)
406 {
407 struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
408 if (cfm) {
409 ovs_refcount_ref(&cfm->ref_cnt);
410 }
411 return cfm;
412 }
413
414 /* Should be run periodically to update fault statistics messages. */
415 void
416 cfm_run(struct cfm *cfm) OVS_EXCLUDED(mutex)
417 {
418 ovs_mutex_lock(&mutex);
419 if (timer_expired(&cfm->fault_timer)) {
420 long long int interval = cfm_fault_interval(cfm);
421 struct remote_mp *rmp, *rmp_next;
422 enum cfm_fault_reason old_cfm_fault = cfm->fault;
423 uint64_t old_flap_count = cfm->flap_count;
424 int old_health = cfm->health;
425 size_t old_rmps_array_len = cfm->rmps_array_len;
426 bool old_rmps_deleted = false;
427 bool old_rmp_opup = cfm->remote_opup;
428 bool demand_override;
429 bool rmp_set_opup = false;
430 bool rmp_set_opdown = false;
431
432 cfm->fault = cfm->recv_fault;
433 cfm->recv_fault = 0;
434
435 cfm->rmps_array_len = 0;
436 free(cfm->rmps_array);
437 cfm->rmps_array = xmalloc(hmap_count(&cfm->remote_mps) *
438 sizeof *cfm->rmps_array);
439
440 if (cfm->health_interval == CFM_HEALTH_INTERVAL) {
441 /* Calculate the cfm health of the interface. If the number of
442 * remote_mpids of a cfm interface is > 1, the cfm health is
443 * undefined. If the number of remote_mpids is 1, the cfm health is
444 * the percentage of the ccm frames received in the
445 * (CFM_HEALTH_INTERVAL * 3.5)ms, else it is 0. */
446 if (hmap_count(&cfm->remote_mps) > 1) {
447 cfm->health = -1;
448 } else if (hmap_is_empty(&cfm->remote_mps)) {
449 cfm->health = 0;
450 } else {
451 int exp_ccm_recvd;
452
453 rmp = CONTAINER_OF(hmap_first(&cfm->remote_mps),
454 struct remote_mp, node);
455 exp_ccm_recvd = (CFM_HEALTH_INTERVAL * 7) / 2;
456 /* Calculate the percentage of healthy ccm frames received.
457 * Since the 'fault_interval' is (3.5 * cfm_interval), and
458 * 1 CCM packet must be received every cfm_interval,
459 * the 'remote_mpid' health reports the percentage of
460 * healthy CCM frames received every
461 * 'CFM_HEALTH_INTERVAL'th 'fault_interval'. */
462 cfm->health = (rmp->num_health_ccm * 100) / exp_ccm_recvd;
463 cfm->health = MIN(cfm->health, 100);
464 rmp->num_health_ccm = 0;
465 ovs_assert(cfm->health >= 0 && cfm->health <= 100);
466 }
467 cfm->health_interval = 0;
468 }
469 cfm->health_interval++;
470
471 demand_override = false;
472 if (cfm->demand) {
473 uint64_t rx_packets = cfm_rx_packets(cfm);
474 demand_override = hmap_count(&cfm->remote_mps) == 1
475 && rx_packets > cfm->rx_packets
476 && !timer_expired(&cfm->demand_rx_ccm_t);
477 cfm->rx_packets = rx_packets;
478 }
479
480 HMAP_FOR_EACH_SAFE (rmp, rmp_next, node, &cfm->remote_mps) {
481 if (!rmp->recv) {
482 VLOG_INFO("%s: Received no CCM from RMP %"PRIu64" in the last"
483 " %lldms", cfm->name, rmp->mpid,
484 time_msec() - rmp->last_rx);
485 if (!demand_override) {
486 old_rmps_deleted = true;
487 hmap_remove(&cfm->remote_mps, &rmp->node);
488 free(rmp);
489 }
490 } else {
491 rmp->recv = false;
492
493 if (rmp->opup) {
494 rmp_set_opup = true;
495 } else {
496 rmp_set_opdown = true;
497 }
498
499 cfm->rmps_array[cfm->rmps_array_len++] = rmp->mpid;
500 }
501 }
502
503 if (rmp_set_opdown) {
504 cfm->remote_opup = false;
505 }
506 else if (rmp_set_opup) {
507 cfm->remote_opup = true;
508 }
509
510 if (hmap_is_empty(&cfm->remote_mps)) {
511 cfm->fault |= CFM_FAULT_RECV;
512 }
513
514 if (old_cfm_fault != cfm->fault) {
515 if (!VLOG_DROP_INFO(&rl)) {
516 struct ds ds = DS_EMPTY_INITIALIZER;
517
518 ds_put_cstr(&ds, "from [");
519 ds_put_cfm_fault(&ds, old_cfm_fault);
520 ds_put_cstr(&ds, "] to [");
521 ds_put_cfm_fault(&ds, cfm->fault);
522 ds_put_char(&ds, ']');
523 VLOG_INFO("%s: CFM faults changed %s.", cfm->name, ds_cstr(&ds));
524 ds_destroy(&ds);
525 }
526
527 /* If there is a flap, increments the counter. */
528 if (old_cfm_fault == 0 || cfm->fault == 0) {
529 cfm->flap_count++;
530 }
531 }
532
533 /* These variables represent the cfm session status, it is desirable
534 * to update them to database immediately after change. */
535 if (old_health != cfm->health
536 || old_rmp_opup != cfm->remote_opup
537 || (old_rmps_array_len != cfm->rmps_array_len || old_rmps_deleted)
538 || old_cfm_fault != cfm->fault
539 || old_flap_count != cfm->flap_count) {
540 cfm_status_changed(cfm);
541 }
542
543 cfm->booted = true;
544 timer_set_duration(&cfm->fault_timer, interval);
545 VLOG_DBG("%s: new fault interval", cfm->name);
546 }
547 ovs_mutex_unlock(&mutex);
548 }
549
550 /* Should be run periodically to check if the CFM module has a CCM message it
551 * wishes to send. */
552 bool
553 cfm_should_send_ccm(struct cfm *cfm) OVS_EXCLUDED(mutex)
554 {
555 bool ret;
556
557 ovs_mutex_lock(&mutex);
558 ret = timer_expired(&cfm->tx_timer);
559 ovs_mutex_unlock(&mutex);
560 return ret;
561 }
562
563 /* Composes a CCM message into 'packet'. Messages generated with this function
564 * should be sent whenever cfm_should_send_ccm() indicates. */
565 void
566 cfm_compose_ccm(struct cfm *cfm, struct dp_packet *packet,
567 const struct eth_addr eth_src) OVS_EXCLUDED(mutex)
568 {
569 uint16_t ccm_vlan;
570 struct ccm *ccm;
571 bool extended;
572
573 ovs_mutex_lock(&mutex);
574 timer_set_duration(&cfm->tx_timer, cfm->ccm_interval_ms);
575 eth_compose(packet, cfm_ccm_addr(cfm), eth_src, ETH_TYPE_CFM, sizeof *ccm);
576
577 ccm_vlan = (cfm->ccm_vlan != CFM_RANDOM_VLAN
578 ? cfm->ccm_vlan
579 : random_uint16());
580 ccm_vlan = ccm_vlan & VLAN_VID_MASK;
581
582 if (ccm_vlan || cfm->ccm_pcp) {
583 uint16_t tci = ccm_vlan | (cfm->ccm_pcp << VLAN_PCP_SHIFT);
584 eth_push_vlan(packet, htons(ETH_TYPE_VLAN), htons(tci));
585 }
586
587 atomic_read_relaxed(&cfm->extended, &extended);
588
589 ccm = dp_packet_l3(packet);
590 ccm->mdlevel_version = 0;
591 ccm->opcode = CCM_OPCODE;
592 ccm->tlv_offset = 70;
593 ccm->seq = htonl(++cfm->seq);
594 ccm->flags = cfm->ccm_interval;
595 memcpy(ccm->maid, cfm->maid, sizeof ccm->maid);
596 memset(ccm->zero, 0, sizeof ccm->zero);
597 ccm->end_tlv = 0;
598
599 if (extended) {
600 ccm->mpid = htons(hash_mpid(cfm->mpid));
601 ccm->mpid64 = htonll(cfm->mpid);
602 ccm->opdown = !cfm->opup;
603 } else {
604 ccm->mpid = htons(cfm->mpid);
605 ccm->mpid64 = htonll(0);
606 ccm->opdown = 0;
607 }
608
609 if (cfm->ccm_interval == 0) {
610 ovs_assert(extended);
611 ccm->interval_ms_x = htons(cfm->ccm_interval_ms);
612 } else {
613 ccm->interval_ms_x = htons(0);
614 }
615
616 if (cfm->booted && hmap_is_empty(&cfm->remote_mps)) {
617 ccm->flags |= CCM_RDI_MASK;
618 }
619
620 if (cfm->last_tx) {
621 long long int delay = time_msec() - cfm->last_tx;
622 if (delay > (cfm->ccm_interval_ms * 3 / 2)) {
623 VLOG_INFO("%s: long delay of %lldms (expected %dms) sending CCM"
624 " seq %"PRIu32, cfm->name, delay, cfm->ccm_interval_ms,
625 cfm->seq);
626 }
627 }
628 cfm->last_tx = time_msec();
629 ovs_mutex_unlock(&mutex);
630 }
631
632 long long int
633 cfm_wait(struct cfm *cfm) OVS_EXCLUDED(mutex)
634 {
635 long long int wake_time = cfm_wake_time(cfm);
636 poll_timer_wait_until(wake_time);
637 return wake_time;
638 }
639
640
641 /* Returns the next cfm wakeup time. */
642 long long int
643 cfm_wake_time(struct cfm *cfm) OVS_EXCLUDED(mutex)
644 {
645 long long int retval;
646
647 if (!cfm) {
648 return LLONG_MAX;
649 }
650
651 ovs_mutex_lock(&mutex);
652 retval = MIN(cfm->tx_timer.t, cfm->fault_timer.t);
653 ovs_mutex_unlock(&mutex);
654 return retval;
655 }
656
657
658 /* Configures 'cfm' with settings from 's'. */
659 bool
660 cfm_configure(struct cfm *cfm, const struct cfm_settings *s)
661 OVS_EXCLUDED(mutex)
662 {
663 uint8_t interval;
664 int interval_ms;
665
666 if (!cfm_is_valid_mpid(s->extended, s->mpid) || s->interval <= 0) {
667 return false;
668 }
669
670 ovs_mutex_lock(&mutex);
671 cfm->mpid = s->mpid;
672 cfm->opup = s->opup;
673 interval = ms_to_ccm_interval(s->interval);
674 interval_ms = ccm_interval_to_ms(interval);
675
676 atomic_store_relaxed(&cfm->check_tnl_key, s->check_tnl_key);
677 atomic_store_relaxed(&cfm->extended, s->extended);
678
679 cfm->ccm_vlan = s->ccm_vlan;
680 cfm->ccm_pcp = s->ccm_pcp & (VLAN_PCP_MASK >> VLAN_PCP_SHIFT);
681 if (s->extended && interval_ms != s->interval) {
682 interval = 0;
683 interval_ms = MIN(s->interval, UINT16_MAX);
684 }
685
686 if (s->extended && s->demand) {
687 if (!cfm->demand) {
688 cfm->demand = true;
689 cfm->rx_packets = cfm_rx_packets(cfm);
690 }
691 } else {
692 cfm->demand = false;
693 }
694
695 if (interval != cfm->ccm_interval || interval_ms != cfm->ccm_interval_ms) {
696 cfm->ccm_interval = interval;
697 cfm->ccm_interval_ms = interval_ms;
698
699 timer_set_expired(&cfm->tx_timer);
700 timer_set_duration(&cfm->fault_timer, cfm_fault_interval(cfm));
701 }
702
703 ovs_mutex_unlock(&mutex);
704 return true;
705 }
706
707 /* Must be called when the netdev owned by 'cfm' should change. */
708 void
709 cfm_set_netdev(struct cfm *cfm, const struct netdev *netdev)
710 OVS_EXCLUDED(mutex)
711 {
712 ovs_mutex_lock(&mutex);
713 if (cfm->netdev != netdev) {
714 netdev_close(cfm->netdev);
715 cfm->netdev = netdev_ref(netdev);
716 }
717 ovs_mutex_unlock(&mutex);
718 }
719
720 /* Returns true if 'cfm' should process packets from 'flow'. Sets
721 * fields in 'wc' that were used to make the determination. */
722 bool
723 cfm_should_process_flow(const struct cfm *cfm_, const struct flow *flow,
724 struct flow_wildcards *wc)
725 {
726 struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
727 bool check_tnl_key;
728
729 /* Most packets are not CFM. */
730 if (OVS_LIKELY(flow->dl_type != htons(ETH_TYPE_CFM))) {
731 return false;
732 }
733
734 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
735 if (OVS_UNLIKELY(!eth_addr_equals(flow->dl_dst, cfm_ccm_addr(cfm)))) {
736 return false;
737 }
738
739 atomic_read_relaxed(&cfm->check_tnl_key, &check_tnl_key);
740
741 if (check_tnl_key) {
742 memset(&wc->masks.tunnel.tun_id, 0xff, sizeof wc->masks.tunnel.tun_id);
743 return flow->tunnel.tun_id == htonll(0);
744 }
745 return true;
746 }
747
748 /* Updates internal statistics relevant to packet 'p'. Should be called on
749 * every packet whose flow returned true when passed to
750 * cfm_should_process_flow. */
751 void
752 cfm_process_heartbeat(struct cfm *cfm, const struct dp_packet *p)
753 OVS_EXCLUDED(mutex)
754 {
755 struct ccm *ccm;
756 struct eth_header *eth;
757 bool extended;
758
759 ovs_mutex_lock(&mutex);
760
761 atomic_read_relaxed(&cfm->extended, &extended);
762
763 eth = dp_packet_eth(p);
764 ccm = dp_packet_at(p, (uint8_t *)dp_packet_l3(p) - (uint8_t *)dp_packet_data(p),
765 CCM_ACCEPT_LEN);
766
767 if (!ccm) {
768 VLOG_INFO_RL(&rl, "%s: Received an unparseable 802.1ag CCM heartbeat.",
769 cfm->name);
770 goto out;
771 }
772
773 if (ccm->opcode != CCM_OPCODE) {
774 VLOG_INFO_RL(&rl, "%s: Received an unsupported 802.1ag message. "
775 "(opcode %u)", cfm->name, ccm->opcode);
776 goto out;
777 }
778
779 /* According to the 802.1ag specification, reception of a CCM with an
780 * incorrect ccm_interval, unexpected MAID, or unexpected MPID should
781 * trigger a fault. We ignore this requirement for several reasons.
782 *
783 * Faults can cause a controller or Open vSwitch to make potentially
784 * expensive changes to the network topology. It seems prudent to trigger
785 * them judiciously, especially when CFM is used to check slave status of
786 * bonds. Furthermore, faults can be maliciously triggered by crafting
787 * unexpected CCMs. */
788 if (memcmp(ccm->maid, cfm->maid, sizeof ccm->maid)) {
789 cfm->recv_fault |= CFM_FAULT_MAID;
790 VLOG_WARN_RL(&rl, "%s: Received unexpected remote MAID from MAC "
791 ETH_ADDR_FMT, cfm->name, ETH_ADDR_ARGS(eth->eth_src));
792 } else {
793 uint8_t ccm_interval = ccm->flags & 0x7;
794 bool ccm_rdi = ccm->flags & CCM_RDI_MASK;
795 uint16_t ccm_interval_ms_x = ntohs(ccm->interval_ms_x);
796
797 struct remote_mp *rmp;
798 uint64_t ccm_mpid;
799 uint32_t ccm_seq;
800 bool ccm_opdown;
801 enum cfm_fault_reason cfm_fault = 0;
802
803 if (extended) {
804 ccm_mpid = ntohll(ccm->mpid64);
805 ccm_opdown = ccm->opdown;
806 } else {
807 ccm_mpid = ntohs(ccm->mpid);
808 ccm_opdown = false;
809 }
810 ccm_seq = ntohl(ccm->seq);
811
812 if (ccm_interval != cfm->ccm_interval) {
813 VLOG_WARN_RL(&rl, "%s: received a CCM with an unexpected interval"
814 " (%"PRIu8") from RMP %"PRIu64, cfm->name,
815 ccm_interval, ccm_mpid);
816 }
817
818 if (extended && ccm_interval == 0
819 && ccm_interval_ms_x != cfm->ccm_interval_ms) {
820 VLOG_WARN_RL(&rl, "%s: received a CCM with an unexpected extended"
821 " interval (%"PRIu16"ms) from RMP %"PRIu64, cfm->name,
822 ccm_interval_ms_x, ccm_mpid);
823 }
824
825 rmp = lookup_remote_mp(cfm, ccm_mpid);
826 if (!rmp) {
827 if (hmap_count(&cfm->remote_mps) < CFM_MAX_RMPS) {
828 rmp = xzalloc(sizeof *rmp);
829 hmap_insert(&cfm->remote_mps, &rmp->node, hash_mpid(ccm_mpid));
830 } else {
831 cfm_fault |= CFM_FAULT_OVERFLOW;
832 VLOG_WARN_RL(&rl,
833 "%s: dropped CCM with MPID %"PRIu64" from MAC "
834 ETH_ADDR_FMT, cfm->name, ccm_mpid,
835 ETH_ADDR_ARGS(eth->eth_src));
836 }
837 }
838
839 if (ccm_rdi) {
840 cfm_fault |= CFM_FAULT_RDI;
841 VLOG_DBG("%s: RDI bit flagged from RMP %"PRIu64, cfm->name,
842 ccm_mpid);
843 }
844
845 VLOG_DBG("%s: received CCM (seq %"PRIu32") (mpid %"PRIu64")"
846 " (interval %"PRIu8") (RDI %s)", cfm->name, ccm_seq,
847 ccm_mpid, ccm_interval, ccm_rdi ? "true" : "false");
848
849 if (rmp) {
850 if (rmp->mpid == cfm->mpid) {
851 cfm_fault |= CFM_FAULT_LOOPBACK;
852 VLOG_WARN_RL(&rl,"%s: received CCM with local MPID"
853 " %"PRIu64, cfm->name, rmp->mpid);
854 }
855
856 if (rmp->seq && ccm_seq != (rmp->seq + 1)) {
857 VLOG_WARN_RL(&rl, "%s: (mpid %"PRIu64") detected sequence"
858 " numbers which indicate possible connectivity"
859 " problems (previous %"PRIu32") (current %"PRIu32
860 ")", cfm->name, ccm_mpid, rmp->seq, ccm_seq);
861 }
862
863 rmp->mpid = ccm_mpid;
864 if (!cfm_fault) {
865 rmp->num_health_ccm++;
866 if (cfm->demand) {
867 timer_set_duration(&cfm->demand_rx_ccm_t,
868 100 * cfm->ccm_interval_ms);
869 }
870 }
871 rmp->recv = true;
872 cfm->recv_fault |= cfm_fault;
873 rmp->seq = ccm_seq;
874 rmp->opup = !ccm_opdown;
875 rmp->last_rx = time_msec();
876 }
877 }
878
879 out:
880 ovs_mutex_unlock(&mutex);
881 }
882
883 /* Returns and resets the 'cfm->status_changed'. */
884 bool
885 cfm_check_status_change(struct cfm *cfm) OVS_EXCLUDED(mutex)
886 {
887 bool ret;
888
889 ovs_mutex_lock(&mutex);
890 ret = cfm->status_changed;
891 cfm->status_changed = false;
892 ovs_mutex_unlock(&mutex);
893
894 return ret;
895 }
896
897 static int
898 cfm_get_fault__(const struct cfm *cfm) OVS_REQUIRES(mutex)
899 {
900 if (cfm->fault_override >= 0) {
901 return cfm->fault_override ? CFM_FAULT_OVERRIDE : 0;
902 }
903 return cfm->fault;
904 }
905
906 /* Gets the fault status of 'cfm'. Returns a bit mask of 'cfm_fault_reason's
907 * indicating the cause of the connectivity fault, or zero if there is no
908 * fault. */
909 int
910 cfm_get_fault(const struct cfm *cfm) OVS_EXCLUDED(mutex)
911 {
912 int fault;
913
914 ovs_mutex_lock(&mutex);
915 fault = cfm_get_fault__(cfm);
916 ovs_mutex_unlock(&mutex);
917 return fault;
918 }
919
920 /* Gets the number of cfm fault flapping since start. */
921 uint64_t
922 cfm_get_flap_count(const struct cfm *cfm) OVS_EXCLUDED(mutex)
923 {
924 uint64_t flap_count;
925 ovs_mutex_lock(&mutex);
926 flap_count = cfm->flap_count;
927 ovs_mutex_unlock(&mutex);
928 return flap_count;
929 }
930
931 /* Gets the health of 'cfm'. Returns an integer between 0 and 100 indicating
932 * the health of the link as a percentage of ccm frames received in
933 * CFM_HEALTH_INTERVAL * 'fault_interval' if there is only 1 remote_mpid,
934 * returns 0 if there are no remote_mpids, and returns -1 if there are more
935 * than 1 remote_mpids. */
936 int
937 cfm_get_health(const struct cfm *cfm) OVS_EXCLUDED(mutex)
938 {
939 int health;
940
941 ovs_mutex_lock(&mutex);
942 health = cfm->health;
943 ovs_mutex_unlock(&mutex);
944 return health;
945 }
946
947 static int
948 cfm_get_opup__(const struct cfm *cfm_) OVS_REQUIRES(mutex)
949 {
950 struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
951 bool extended;
952
953 atomic_read_relaxed(&cfm->extended, &extended);
954
955 return extended ? cfm->remote_opup : -1;
956 }
957
958 /* Gets the operational state of 'cfm'. 'cfm' is considered operationally down
959 * if it has received a CCM with the operationally down bit set from any of its
960 * remote maintenance points. Returns 1 if 'cfm' is operationally up, 0 if
961 * 'cfm' is operationally down, or -1 if 'cfm' has no operational state
962 * (because it isn't in extended mode). */
963 int
964 cfm_get_opup(const struct cfm *cfm) OVS_EXCLUDED(mutex)
965 {
966 int opup;
967
968 ovs_mutex_lock(&mutex);
969 opup = cfm_get_opup__(cfm);
970 ovs_mutex_unlock(&mutex);
971
972 return opup;
973 }
974
975 static void
976 cfm_get_remote_mpids__(const struct cfm *cfm, uint64_t **rmps, size_t *n_rmps)
977 OVS_REQUIRES(mutex)
978 {
979 *rmps = xmemdup(cfm->rmps_array, cfm->rmps_array_len * sizeof **rmps);
980 *n_rmps = cfm->rmps_array_len;
981 }
982
983 /* Populates 'rmps' with an array of remote maintenance points reachable by
984 * 'cfm'. The number of remote maintenance points is written to 'n_rmps'.
985 * 'cfm' retains ownership of the array written to 'rmps' */
986 void
987 cfm_get_remote_mpids(const struct cfm *cfm, uint64_t **rmps, size_t *n_rmps)
988 OVS_EXCLUDED(mutex)
989 {
990 ovs_mutex_lock(&mutex);
991 cfm_get_remote_mpids__(cfm, rmps, n_rmps);
992 ovs_mutex_unlock(&mutex);
993 }
994
995 /* Extracts the status of 'cfm' and fills in the 's'. */
996 void
997 cfm_get_status(const struct cfm *cfm, struct cfm_status *s) OVS_EXCLUDED(mutex)
998 {
999 ovs_mutex_lock(&mutex);
1000 s->faults = cfm_get_fault__(cfm);
1001 s->remote_opstate = cfm_get_opup__(cfm);
1002 s->flap_count = cfm->flap_count;
1003 s->health = cfm->health;
1004 cfm_get_remote_mpids__(cfm, &s->rmps, &s->n_rmps);
1005 ovs_mutex_unlock(&mutex);
1006 }
1007
1008 static struct cfm *
1009 cfm_find(const char *name) OVS_REQUIRES(mutex)
1010 {
1011 struct cfm *cfm;
1012
1013 HMAP_FOR_EACH_WITH_HASH (cfm, hmap_node, hash_string(name, 0), all_cfms) {
1014 if (!strcmp(cfm->name, name)) {
1015 return cfm;
1016 }
1017 }
1018 return NULL;
1019 }
1020
1021 static void
1022 cfm_print_details(struct ds *ds, struct cfm *cfm) OVS_REQUIRES(mutex)
1023 {
1024 struct remote_mp *rmp;
1025 bool extended;
1026 int fault;
1027
1028 atomic_read_relaxed(&cfm->extended, &extended);
1029
1030 ds_put_format(ds, "---- %s ----\n", cfm->name);
1031 ds_put_format(ds, "MPID %"PRIu64":%s%s\n", cfm->mpid,
1032 extended ? " extended" : "",
1033 cfm->fault_override >= 0 ? " fault_override" : "");
1034
1035 fault = cfm_get_fault__(cfm);
1036 if (fault) {
1037 ds_put_cstr(ds, "\tfault: ");
1038 ds_put_cfm_fault(ds, fault);
1039 ds_put_cstr(ds, "\n");
1040 }
1041
1042 if (cfm->health == -1) {
1043 ds_put_format(ds, "\taverage health: undefined\n");
1044 } else {
1045 ds_put_format(ds, "\taverage health: %d\n", cfm->health);
1046 }
1047 ds_put_format(ds, "\topstate: %s\n", cfm->opup ? "up" : "down");
1048 ds_put_format(ds, "\tremote_opstate: %s\n",
1049 cfm->remote_opup ? "up" : "down");
1050 ds_put_format(ds, "\tinterval: %dms\n", cfm->ccm_interval_ms);
1051 ds_put_format(ds, "\tnext CCM tx: %lldms\n",
1052 timer_msecs_until_expired(&cfm->tx_timer));
1053 ds_put_format(ds, "\tnext fault check: %lldms\n",
1054 timer_msecs_until_expired(&cfm->fault_timer));
1055
1056 HMAP_FOR_EACH (rmp, node, &cfm->remote_mps) {
1057 ds_put_format(ds, "Remote MPID %"PRIu64"\n", rmp->mpid);
1058 ds_put_format(ds, "\trecv since check: %s\n",
1059 rmp->recv ? "true" : "false");
1060 ds_put_format(ds, "\topstate: %s\n", rmp->opup? "up" : "down");
1061 }
1062 }
1063
1064 static void
1065 cfm_unixctl_show(struct unixctl_conn *conn, int argc, const char *argv[],
1066 void *aux OVS_UNUSED) OVS_EXCLUDED(mutex)
1067 {
1068 struct ds ds = DS_EMPTY_INITIALIZER;
1069 struct cfm *cfm;
1070
1071 ovs_mutex_lock(&mutex);
1072 if (argc > 1) {
1073 cfm = cfm_find(argv[1]);
1074 if (!cfm) {
1075 unixctl_command_reply_error(conn, "no such CFM object");
1076 goto out;
1077 }
1078 cfm_print_details(&ds, cfm);
1079 } else {
1080 HMAP_FOR_EACH (cfm, hmap_node, all_cfms) {
1081 cfm_print_details(&ds, cfm);
1082 }
1083 }
1084
1085 unixctl_command_reply(conn, ds_cstr(&ds));
1086 ds_destroy(&ds);
1087 out:
1088 ovs_mutex_unlock(&mutex);
1089 }
1090
1091 static void
1092 cfm_unixctl_set_fault(struct unixctl_conn *conn, int argc, const char *argv[],
1093 void *aux OVS_UNUSED) OVS_EXCLUDED(mutex)
1094 {
1095 const char *fault_str = argv[argc - 1];
1096 int fault_override;
1097 struct cfm *cfm;
1098
1099 ovs_mutex_lock(&mutex);
1100 if (!strcasecmp("true", fault_str)) {
1101 fault_override = 1;
1102 } else if (!strcasecmp("false", fault_str)) {
1103 fault_override = 0;
1104 } else if (!strcasecmp("normal", fault_str)) {
1105 fault_override = -1;
1106 } else {
1107 unixctl_command_reply_error(conn, "unknown fault string");
1108 goto out;
1109 }
1110
1111 if (argc > 2) {
1112 cfm = cfm_find(argv[1]);
1113 if (!cfm) {
1114 unixctl_command_reply_error(conn, "no such CFM object");
1115 goto out;
1116 }
1117 cfm->fault_override = fault_override;
1118 cfm_status_changed(cfm);
1119 } else {
1120 HMAP_FOR_EACH (cfm, hmap_node, all_cfms) {
1121 cfm->fault_override = fault_override;
1122 cfm_status_changed(cfm);
1123 }
1124 }
1125
1126 unixctl_command_reply(conn, "OK");
1127
1128 out:
1129 ovs_mutex_unlock(&mutex);
1130 }