]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_keepalives.c
Merge pull request #2719 from pguibert6WIND/fix_tableno_vrf
[mirror_frr.git] / bgpd / bgp_keepalives.c
1 /* BGP Keepalives.
2 * Implements a producer thread to generate BGP keepalives for peers.
3 * Copyright (C) 2017 Cumulus Networks, Inc.
4 * Quentin Young
5 *
6 * This file is part of FRRouting.
7 *
8 * FRRouting is free software; you can redistribute it and/or modify it under
9 * the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2, or (at your option) any later
11 * version.
12 *
13 * FRRouting is distributed in the hope that it will be useful, but WITHOUT ANY
14 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
15 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /* clang-format off */
24 #include <zebra.h>
25 #include <pthread.h> // for pthread_mutex_lock, pthread_mutex_unlock
26
27 #include "frr_pthread.h" // for frr_pthread
28 #include "hash.h" // for hash, hash_clean, hash_create_size...
29 #include "log.h" // for zlog_debug
30 #include "memory.h" // for MTYPE_TMP, XFREE, XCALLOC, XMALLOC
31 #include "monotime.h" // for monotime, monotime_since
32
33 #include "bgpd/bgpd.h" // for peer, PEER_THREAD_KEEPALIVES_ON, peer...
34 #include "bgpd/bgp_debug.h" // for bgp_debug_neighbor_events
35 #include "bgpd/bgp_packet.h" // for bgp_keepalive_send
36 #include "bgpd/bgp_keepalives.h"
37 /* clang-format on */
38
39 /*
40 * Peer KeepAlive Timer.
41 * Associates a peer with the time of its last keepalive.
42 */
43 struct pkat {
44 /* the peer to send keepalives to */
45 struct peer *peer;
46 /* absolute time of last keepalive sent */
47 struct timeval last;
48 };
49
50 /* List of peers we are sending keepalives for, and associated mutex. */
51 static pthread_mutex_t *peerhash_mtx;
52 static pthread_cond_t *peerhash_cond;
53 static struct hash *peerhash;
54
55 static struct pkat *pkat_new(struct peer *peer)
56 {
57 struct pkat *pkat = XMALLOC(MTYPE_TMP, sizeof(struct pkat));
58 pkat->peer = peer;
59 monotime(&pkat->last);
60 return pkat;
61 }
62
63 static void pkat_del(void *pkat)
64 {
65 XFREE(MTYPE_TMP, pkat);
66 }
67
68
69 /*
70 * Callback for hash_iterate. Determines if a peer needs a keepalive and if so,
71 * generates and sends it.
72 *
73 * For any given peer, if the elapsed time since its last keepalive exceeds its
74 * configured keepalive timer, a keepalive is sent to the peer and its
75 * last-sent time is reset. Additionally, If the elapsed time does not exceed
76 * the configured keepalive timer, but the time until the next keepalive is due
77 * is within a hardcoded tolerance, a keepalive is sent as if the configured
78 * timer was exceeded. Doing this helps alleviate nanosecond sleeps between
79 * ticks by grouping together peers who are due for keepalives at roughly the
80 * same time. This tolerance value is arbitrarily chosen to be 100ms.
81 *
82 * In addition, this function calculates the maximum amount of time that the
83 * keepalive thread can sleep before another tick needs to take place. This is
84 * equivalent to shortest time until a keepalive is due for any one peer.
85 *
86 * @return maximum time to wait until next update (0 if infinity)
87 */
88 static void peer_process(struct hash_backet *hb, void *arg)
89 {
90 struct pkat *pkat = hb->data;
91
92 struct timeval *next_update = arg;
93
94 static struct timeval elapsed; // elapsed time since keepalive
95 static struct timeval ka = {0}; // peer->v_keepalive as a timeval
96 static struct timeval diff; // ka - elapsed
97
98 static struct timeval tolerance = {0, 100000};
99
100 /* calculate elapsed time since last keepalive */
101 monotime_since(&pkat->last, &elapsed);
102
103 /* calculate difference between elapsed time and configured time */
104 ka.tv_sec = pkat->peer->v_keepalive;
105 timersub(&ka, &elapsed, &diff);
106
107 int send_keepalive =
108 elapsed.tv_sec >= ka.tv_sec || timercmp(&diff, &tolerance, <);
109
110 if (send_keepalive) {
111 if (bgp_debug_neighbor_events(pkat->peer))
112 zlog_debug("%s [FSM] Timer (keepalive timer expire)",
113 pkat->peer->host);
114
115 bgp_keepalive_send(pkat->peer);
116 monotime(&pkat->last);
117 memset(&elapsed, 0x00, sizeof(struct timeval));
118 diff = ka;
119 }
120
121 /* if calculated next update for this peer < current delay, use it */
122 if (next_update->tv_sec < 0 || timercmp(&diff, next_update, <))
123 *next_update = diff;
124 }
125
126 static int peer_hash_cmp(const void *f, const void *s)
127 {
128 const struct pkat *p1 = f;
129 const struct pkat *p2 = s;
130 return p1->peer == p2->peer;
131 }
132
133 static unsigned int peer_hash_key(void *arg)
134 {
135 struct pkat *pkat = arg;
136 return (uintptr_t)pkat->peer;
137 }
138
139 /* Cleanup handler / deinitializer. */
140 static void bgp_keepalives_finish(void *arg)
141 {
142 if (peerhash) {
143 hash_clean(peerhash, pkat_del);
144 hash_free(peerhash);
145 }
146
147 peerhash = NULL;
148
149 pthread_mutex_unlock(peerhash_mtx);
150 pthread_mutex_destroy(peerhash_mtx);
151 pthread_cond_destroy(peerhash_cond);
152
153 XFREE(MTYPE_TMP, peerhash_mtx);
154 XFREE(MTYPE_TMP, peerhash_cond);
155 }
156
157 /*
158 * Entry function for peer keepalive generation pthread.
159 */
160 void *bgp_keepalives_start(void *arg)
161 {
162 struct frr_pthread *fpt = arg;
163 fpt->master->owner = pthread_self();
164
165 struct timeval currtime = {0, 0};
166 struct timeval aftertime = {0, 0};
167 struct timeval next_update = {0, 0};
168 struct timespec next_update_ts = {0, 0};
169
170 peerhash_mtx = XCALLOC(MTYPE_TMP, sizeof(pthread_mutex_t));
171 peerhash_cond = XCALLOC(MTYPE_TMP, sizeof(pthread_cond_t));
172
173 /* initialize mutex */
174 pthread_mutex_init(peerhash_mtx, NULL);
175
176 /* use monotonic clock with condition variable */
177 pthread_condattr_t attrs;
178 pthread_condattr_init(&attrs);
179 pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC);
180 pthread_cond_init(peerhash_cond, &attrs);
181 pthread_condattr_destroy(&attrs);
182
183 #ifdef GNU_LINUX
184 pthread_setname_np(fpt->thread, "bgpd_ka");
185 #elif defined(OPEN_BSD)
186 pthread_set_name_np(fpt->thread, "bgpd_ka");
187 #endif
188
189 /* initialize peer hashtable */
190 peerhash = hash_create_size(2048, peer_hash_key, peer_hash_cmp, NULL);
191 pthread_mutex_lock(peerhash_mtx);
192
193 /* register cleanup handler */
194 pthread_cleanup_push(&bgp_keepalives_finish, NULL);
195
196 /* notify anybody waiting on us that we are done starting up */
197 frr_pthread_notify_running(fpt);
198
199 while (atomic_load_explicit(&fpt->running, memory_order_relaxed)) {
200 if (peerhash->count > 0)
201 pthread_cond_timedwait(peerhash_cond, peerhash_mtx,
202 &next_update_ts);
203 else
204 while (peerhash->count == 0
205 && atomic_load_explicit(&fpt->running,
206 memory_order_relaxed))
207 pthread_cond_wait(peerhash_cond, peerhash_mtx);
208
209 monotime(&currtime);
210
211 next_update.tv_sec = -1;
212
213 hash_iterate(peerhash, peer_process, &next_update);
214 if (next_update.tv_sec == -1)
215 memset(&next_update, 0x00, sizeof(next_update));
216
217 monotime_since(&currtime, &aftertime);
218
219 timeradd(&currtime, &next_update, &next_update);
220 TIMEVAL_TO_TIMESPEC(&next_update, &next_update_ts);
221 }
222
223 /* clean up */
224 pthread_cleanup_pop(1);
225
226 return NULL;
227 }
228
229 /* --- thread external functions ------------------------------------------- */
230
231 void bgp_keepalives_on(struct peer *peer)
232 {
233 if (CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON))
234 return;
235
236 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_KEEPALIVES);
237 assert(fpt->running);
238
239 /* placeholder bucket data to use for fast key lookups */
240 static struct pkat holder = {0};
241
242 if (!peerhash_mtx) {
243 zlog_warn("%s: call bgp_keepalives_init() first", __func__);
244 return;
245 }
246
247 pthread_mutex_lock(peerhash_mtx);
248 {
249 holder.peer = peer;
250 if (!hash_lookup(peerhash, &holder)) {
251 struct pkat *pkat = pkat_new(peer);
252 hash_get(peerhash, pkat, hash_alloc_intern);
253 peer_lock(peer);
254 }
255 SET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
256 }
257 pthread_mutex_unlock(peerhash_mtx);
258 bgp_keepalives_wake();
259 }
260
261 void bgp_keepalives_off(struct peer *peer)
262 {
263 if (!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON))
264 return;
265
266 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_KEEPALIVES);
267 assert(fpt->running);
268
269 /* placeholder bucket data to use for fast key lookups */
270 static struct pkat holder = {0};
271
272 if (!peerhash_mtx) {
273 zlog_warn("%s: call bgp_keepalives_init() first", __func__);
274 return;
275 }
276
277 pthread_mutex_lock(peerhash_mtx);
278 {
279 holder.peer = peer;
280 struct pkat *res = hash_release(peerhash, &holder);
281 if (res) {
282 pkat_del(res);
283 peer_unlock(peer);
284 }
285 UNSET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
286 }
287 pthread_mutex_unlock(peerhash_mtx);
288 }
289
290 void bgp_keepalives_wake()
291 {
292 pthread_mutex_lock(peerhash_mtx);
293 {
294 pthread_cond_signal(peerhash_cond);
295 }
296 pthread_mutex_unlock(peerhash_mtx);
297 }
298
299 int bgp_keepalives_stop(struct frr_pthread *fpt, void **result)
300 {
301 assert(fpt->running);
302
303 atomic_store_explicit(&fpt->running, false, memory_order_relaxed);
304 bgp_keepalives_wake();
305
306 pthread_join(fpt->thread, result);
307 return 0;
308 }