2 * Implements packet I/O in a pthread.
3 * Copyright (C) 2017 Cumulus Networks
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
22 /* clang-format off */
24 #include <pthread.h> // for pthread_mutex_unlock, pthread_mutex_lock
26 #include "frr_pthread.h" // for frr_pthread_get, frr_pthread
27 #include "linklist.h" // for list_delete, list_delete_all_node, lis...
28 #include "log.h" // for zlog_debug, safe_strerror, zlog_err
29 #include "memory.h" // for MTYPE_TMP, XCALLOC, XFREE
30 #include "network.h" // for ERRNO_IO_RETRY
31 #include "stream.h" // for stream_get_endp, stream_getw_from, str...
32 #include "thread.h" // for THREAD_OFF, THREAD_ARG, thread, thread...
33 #include "zassert.h" // for assert
35 #include "bgpd/bgp_io.h"
36 #include "bgpd/bgp_debug.h" // for bgp_debug_neighbor_events, bgp_type_str
37 #include "bgpd/bgp_fsm.h" // for BGP_EVENT_ADD, bgp_event
38 #include "bgpd/bgp_packet.h" // for bgp_notify_send_with_data, bgp_notify...
39 #include "bgpd/bgpd.h" // for peer, BGP_MARKER_SIZE, bgp_master, bm
42 /* forward declarations */
43 static uint16_t bgp_write(struct peer
*);
44 static uint16_t bgp_read(struct peer
*);
45 static int bgp_process_writes(struct thread
*);
46 static int bgp_process_reads(struct thread
*);
47 static bool validate_header(struct peer
*);
49 /* generic i/o status codes */
50 #define BGP_IO_TRANS_ERR (1 << 0) // EAGAIN or similar occurred
51 #define BGP_IO_FATAL_ERR (1 << 1) // some kind of fatal TCP error
53 /* Start and stop routines for I/O pthread + control variables
54 * ------------------------------------------------------------------------ */
55 _Atomic
bool bgp_io_thread_run
;
56 _Atomic
bool bgp_io_thread_started
;
60 bgp_io_thread_run
= false;
61 bgp_io_thread_started
= false;
64 /* Unused callback for thread_add_read() */
65 static int bgp_io_dummy(struct thread
*thread
) { return 0; }
67 void *bgp_io_start(void *arg
)
69 struct frr_pthread
*fpt
= frr_pthread_get(PTHREAD_IO
);
70 fpt
->master
->owner
= pthread_self();
72 // fd so we can sleep in poll()
75 thread_add_read(fpt
->master
, &bgp_io_dummy
, NULL
, sleeper
[0], NULL
);
77 // we definitely don't want to handle signals
78 fpt
->master
->handle_signals
= false;
82 atomic_store_explicit(&bgp_io_thread_run
, true, memory_order_seq_cst
);
83 atomic_store_explicit(&bgp_io_thread_started
, true,
84 memory_order_seq_cst
);
86 while (bgp_io_thread_run
) {
87 if (thread_fetch(fpt
->master
, &task
)) {
98 static int bgp_io_finish(struct thread
*thread
)
100 atomic_store_explicit(&bgp_io_thread_run
, false, memory_order_seq_cst
);
104 int bgp_io_stop(void **result
, struct frr_pthread
*fpt
)
106 thread_add_event(fpt
->master
, &bgp_io_finish
, NULL
, 0, NULL
);
107 pthread_join(fpt
->thread
, result
);
111 /* Extern API -------------------------------------------------------------- */
112 void bgp_io_running(void)
114 while (!atomic_load_explicit(&bgp_io_thread_started
,
115 memory_order_seq_cst
))
119 void bgp_writes_on(struct peer
*peer
)
121 assert(peer
->status
!= Deleted
);
124 assert(peer
->ibuf_work
);
125 assert(!peer
->t_connect_check_r
);
126 assert(!peer
->t_connect_check_w
);
129 struct frr_pthread
*fpt
= frr_pthread_get(PTHREAD_IO
);
131 thread_add_write(fpt
->master
, bgp_process_writes
, peer
, peer
->fd
,
133 SET_FLAG(peer
->thread_flags
, PEER_THREAD_WRITES_ON
);
136 void bgp_writes_off(struct peer
*peer
)
138 struct frr_pthread
*fpt
= frr_pthread_get(PTHREAD_IO
);
140 thread_cancel_async(fpt
->master
, &peer
->t_write
, NULL
);
141 THREAD_OFF(peer
->t_generate_updgrp_packets
);
143 UNSET_FLAG(peer
->thread_flags
, PEER_THREAD_WRITES_ON
);
146 void bgp_reads_on(struct peer
*peer
)
148 assert(peer
->status
!= Deleted
);
151 assert(peer
->ibuf_work
);
153 assert(!peer
->t_connect_check_r
);
154 assert(!peer
->t_connect_check_w
);
157 struct frr_pthread
*fpt
= frr_pthread_get(PTHREAD_IO
);
159 thread_add_read(fpt
->master
, bgp_process_reads
, peer
, peer
->fd
,
162 SET_FLAG(peer
->thread_flags
, PEER_THREAD_READS_ON
);
165 void bgp_reads_off(struct peer
*peer
)
167 struct frr_pthread
*fpt
= frr_pthread_get(PTHREAD_IO
);
169 thread_cancel_async(fpt
->master
, &peer
->t_read
, NULL
);
170 THREAD_OFF(peer
->t_process_packet
);
172 UNSET_FLAG(peer
->thread_flags
, PEER_THREAD_READS_ON
);
175 /* Internal functions ------------------------------------------------------- */
178 * Called from I/O pthread when a file descriptor has become ready for writing.
180 static int bgp_process_writes(struct thread
*thread
)
182 static struct peer
*peer
;
183 peer
= THREAD_ARG(thread
);
191 struct frr_pthread
*fpt
= frr_pthread_get(PTHREAD_IO
);
193 pthread_mutex_lock(&peer
->io_mtx
);
195 status
= bgp_write(peer
);
196 reschedule
= (stream_fifo_head(peer
->obuf
) != NULL
);
198 pthread_mutex_unlock(&peer
->io_mtx
);
200 if (CHECK_FLAG(status
, BGP_IO_TRANS_ERR
)) { /* no problem */
203 if (CHECK_FLAG(status
, BGP_IO_FATAL_ERR
)) {
204 reschedule
= false; /* problem */
209 thread_add_write(fpt
->master
, bgp_process_writes
, peer
,
210 peer
->fd
, &peer
->t_write
);
212 BGP_TIMER_ON(peer
->t_generate_updgrp_packets
,
213 bgp_generate_updgrp_packets
, 0);
220 * Called from I/O pthread when a file descriptor has become ready for reading,
223 * We read as much data as possible, process as many packets as we can and
224 * place them on peer->ibuf for secondary processing by the main thread.
226 static int bgp_process_reads(struct thread
*thread
)
228 /* clang-format off */
229 static struct peer
*peer
; // peer to read from
230 uint16_t status
; // bgp_read status code
231 bool more
= true; // whether we got more data
232 bool fatal
= false; // whether fatal error occurred
233 bool added_pkt
= false; // whether we pushed onto ->ibuf
234 bool header_valid
= true; // whether header is valid
235 /* clang-format on */
237 peer
= THREAD_ARG(thread
);
242 struct frr_pthread
*fpt
= frr_pthread_get(PTHREAD_IO
);
244 pthread_mutex_lock(&peer
->io_mtx
);
246 status
= bgp_read(peer
);
248 pthread_mutex_unlock(&peer
->io_mtx
);
250 /* error checking phase */
251 if (CHECK_FLAG(status
, BGP_IO_TRANS_ERR
)) {
252 /* no problem; just don't process packets */
256 if (CHECK_FLAG(status
, BGP_IO_FATAL_ERR
)) {
257 /* problem; tear down session */
263 /* static buffer for transferring packets */
264 static unsigned char pktbuf
[BGP_MAX_PACKET_SIZE
];
265 /* shorter alias to peer's input buffer */
266 struct stream
*ibw
= peer
->ibuf_work
;
267 /* offset of start of current packet */
268 size_t offset
= stream_get_getp(ibw
);
269 /* packet size as given by header */
270 u_int16_t pktsize
= 0;
272 /* check that we have enough data for a header */
273 if (STREAM_READABLE(ibw
) < BGP_HEADER_SIZE
)
276 /* validate header */
277 header_valid
= validate_header(peer
);
284 /* header is valid; retrieve packet size */
285 pktsize
= stream_getw_from(ibw
, offset
+ BGP_MARKER_SIZE
);
287 /* if this fails we are seriously screwed */
288 assert(pktsize
<= BGP_MAX_PACKET_SIZE
);
290 /* If we have that much data, chuck it into its own
291 * stream and append to input queue for processing. */
292 if (STREAM_READABLE(ibw
) >= pktsize
) {
293 struct stream
*pkt
= stream_new(pktsize
);
294 stream_get(pktbuf
, ibw
, pktsize
);
295 stream_put(pkt
, pktbuf
, pktsize
);
297 pthread_mutex_lock(&peer
->io_mtx
);
299 stream_fifo_push(peer
->ibuf
, pkt
);
301 pthread_mutex_unlock(&peer
->io_mtx
);
310 * 1. Move unread data to stream start to make room for more.
311 * 2. Reschedule and return when we have additional data.
313 * XXX: Heavy abuse of stream API. This needs a ring buffer.
315 if (more
&& STREAM_WRITEABLE(peer
->ibuf_work
) < BGP_MAX_PACKET_SIZE
) {
316 void *from
= stream_pnt(peer
->ibuf_work
);
317 void *to
= peer
->ibuf_work
->data
;
318 size_t siz
= STREAM_READABLE(peer
->ibuf_work
);
319 memmove(to
, from
, siz
);
320 stream_set_getp(peer
->ibuf_work
, 0);
321 stream_set_endp(peer
->ibuf_work
, siz
);
324 assert(STREAM_WRITEABLE(peer
->ibuf_work
) >= BGP_MAX_PACKET_SIZE
);
326 /* handle invalid header */
328 /* wipe buffer just in case someone screwed up */
329 stream_reset(peer
->ibuf_work
);
331 thread_add_read(fpt
->master
, bgp_process_reads
, peer
, peer
->fd
,
334 thread_add_timer_msec(bm
->master
, bgp_process_packet
,
335 peer
, 0, &peer
->t_process_packet
);
342 * Flush peer output buffer.
344 * This function pops packets off of peer->obuf and writes them to peer->fd.
345 * The amount of packets written is equal to the minimum of peer->wpkt_quanta
346 * and the number of packets on the output buffer, unless an error occurs.
348 * If write() returns an error, the appropriate FSM event is generated.
350 * The return value is equal to the number of packets written
351 * (which may be zero).
353 static uint16_t bgp_write(struct peer
*peer
)
358 int update_last_write
= 0;
359 unsigned int count
= 0;
363 uint32_t wpkt_quanta_old
;
365 // save current # updates sent
366 oc
= atomic_load_explicit(&peer
->update_out
, memory_order_relaxed
);
368 // cache current write quanta
370 atomic_load_explicit(&peer
->bgp
->wpkt_quanta
, memory_order_relaxed
);
372 while (count
< wpkt_quanta_old
&& (s
= stream_fifo_head(peer
->obuf
))) {
375 writenum
= stream_get_endp(s
) - stream_get_getp(s
);
376 num
= write(peer
->fd
, STREAM_PNT(s
), writenum
);
379 if (!ERRNO_IO_RETRY(errno
)) {
380 BGP_EVENT_ADD(peer
, TCP_fatal_error
);
381 SET_FLAG(status
, BGP_IO_FATAL_ERR
);
383 SET_FLAG(status
, BGP_IO_TRANS_ERR
);
387 } else if (num
!= writenum
) // incomplete write
388 stream_forward_getp(s
, num
);
390 } while (num
!= writenum
);
392 /* Retrieve BGP packet type. */
393 stream_set_getp(s
, BGP_MARKER_SIZE
+ 2);
394 type
= stream_getc(s
);
398 atomic_fetch_add_explicit(&peer
->open_out
, 1,
399 memory_order_relaxed
);
402 atomic_fetch_add_explicit(&peer
->update_out
, 1,
403 memory_order_relaxed
);
406 atomic_fetch_add_explicit(&peer
->notify_out
, 1,
407 memory_order_relaxed
);
408 /* Double start timer. */
411 /* Overflow check. */
412 if (peer
->v_start
>= (60 * 2))
413 peer
->v_start
= (60 * 2);
415 /* Handle Graceful Restart case where the state changes
416 * to Connect instead of Idle */
417 BGP_EVENT_ADD(peer
, BGP_Stop
);
420 case BGP_MSG_KEEPALIVE
:
421 atomic_fetch_add_explicit(&peer
->keepalive_out
, 1,
422 memory_order_relaxed
);
424 case BGP_MSG_ROUTE_REFRESH_NEW
:
425 case BGP_MSG_ROUTE_REFRESH_OLD
:
426 atomic_fetch_add_explicit(&peer
->refresh_out
, 1,
427 memory_order_relaxed
);
429 case BGP_MSG_CAPABILITY
:
430 atomic_fetch_add_explicit(&peer
->dynamic_cap_out
, 1,
431 memory_order_relaxed
);
437 stream_free(stream_fifo_pop(peer
->obuf
));
438 update_last_write
= 1;
442 /* Update last_update if UPDATEs were written. */
443 uo
= atomic_load_explicit(&peer
->update_out
, memory_order_relaxed
);
445 atomic_store_explicit(&peer
->last_update
, bgp_clock(),
446 memory_order_relaxed
);
448 /* If we TXed any flavor of packet */
449 if (update_last_write
)
450 atomic_store_explicit(&peer
->last_write
, bgp_clock(),
451 memory_order_relaxed
);
458 * Reads a chunk of data from peer->fd into peer->ibuf_work.
460 * @return status flag (see top-of-file)
462 static uint16_t bgp_read(struct peer
*peer
)
464 size_t readsize
; // how many bytes we want to read
465 ssize_t nbytes
; // how many bytes we actually read
468 readsize
= STREAM_WRITEABLE(peer
->ibuf_work
);
470 nbytes
= stream_read_try(peer
->ibuf_work
, peer
->fd
, readsize
);
473 /* Fatal error; tear down session */
475 zlog_err("%s [Error] bgp_read_packet error: %s", peer
->host
,
476 safe_strerror(errno
));
478 if (peer
->status
== Established
) {
479 if (CHECK_FLAG(peer
->sflags
, PEER_STATUS_NSF_MODE
)) {
480 peer
->last_reset
= PEER_DOWN_NSF_CLOSE_SESSION
;
481 SET_FLAG(peer
->sflags
, PEER_STATUS_NSF_WAIT
);
483 peer
->last_reset
= PEER_DOWN_CLOSE_SESSION
;
486 BGP_EVENT_ADD(peer
, TCP_fatal_error
);
487 SET_FLAG(status
, BGP_IO_FATAL_ERR
);
490 /* Received EOF / TCP session closed */
492 if (bgp_debug_neighbor_events(peer
))
493 zlog_debug("%s [Event] BGP connection closed fd %d",
494 peer
->host
, peer
->fd
);
496 if (peer
->status
== Established
) {
497 if (CHECK_FLAG(peer
->sflags
, PEER_STATUS_NSF_MODE
)) {
498 peer
->last_reset
= PEER_DOWN_NSF_CLOSE_SESSION
;
499 SET_FLAG(peer
->sflags
, PEER_STATUS_NSF_WAIT
);
501 peer
->last_reset
= PEER_DOWN_CLOSE_SESSION
;
504 BGP_EVENT_ADD(peer
, TCP_connection_closed
);
505 SET_FLAG(status
, BGP_IO_FATAL_ERR
);
508 /* EAGAIN or EWOULDBLOCK; come back later */
510 SET_FLAG(status
, BGP_IO_TRANS_ERR
);
520 * Called after we have read a BGP packet header. Validates marker, message
521 * type and packet length. If any of these aren't correct, sends a notify.
523 static bool validate_header(struct peer
*peer
)
527 struct stream
*pkt
= peer
->ibuf_work
;
528 size_t getp
= stream_get_getp(pkt
);
530 static uint8_t marker
[BGP_MARKER_SIZE
] = {
531 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
532 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
534 if (memcmp(marker
, stream_pnt(pkt
), BGP_MARKER_SIZE
) != 0) {
535 bgp_notify_send(peer
, BGP_NOTIFY_HEADER_ERR
,
536 BGP_NOTIFY_HEADER_NOT_SYNC
);
540 /* Get size and type in host byte order. */
541 size
= stream_getw_from(pkt
, getp
+ BGP_MARKER_SIZE
);
542 type
= stream_getc_from(pkt
, getp
+ BGP_MARKER_SIZE
+ 2);
544 /* BGP type check. */
545 if (type
!= BGP_MSG_OPEN
&& type
!= BGP_MSG_UPDATE
546 && type
!= BGP_MSG_NOTIFY
&& type
!= BGP_MSG_KEEPALIVE
547 && type
!= BGP_MSG_ROUTE_REFRESH_NEW
548 && type
!= BGP_MSG_ROUTE_REFRESH_OLD
549 && type
!= BGP_MSG_CAPABILITY
) {
550 if (bgp_debug_neighbor_events(peer
))
551 zlog_debug("%s unknown message type 0x%02x", peer
->host
,
554 bgp_notify_send_with_data(peer
, BGP_NOTIFY_HEADER_ERR
,
555 BGP_NOTIFY_HEADER_BAD_MESTYPE
,
560 /* Minimum packet length check. */
561 if ((size
< BGP_HEADER_SIZE
) || (size
> BGP_MAX_PACKET_SIZE
)
562 || (type
== BGP_MSG_OPEN
&& size
< BGP_MSG_OPEN_MIN_SIZE
)
563 || (type
== BGP_MSG_UPDATE
&& size
< BGP_MSG_UPDATE_MIN_SIZE
)
564 || (type
== BGP_MSG_NOTIFY
&& size
< BGP_MSG_NOTIFY_MIN_SIZE
)
565 || (type
== BGP_MSG_KEEPALIVE
&& size
!= BGP_MSG_KEEPALIVE_MIN_SIZE
)
566 || (type
== BGP_MSG_ROUTE_REFRESH_NEW
567 && size
< BGP_MSG_ROUTE_REFRESH_MIN_SIZE
)
568 || (type
== BGP_MSG_ROUTE_REFRESH_OLD
569 && size
< BGP_MSG_ROUTE_REFRESH_MIN_SIZE
)
570 || (type
== BGP_MSG_CAPABILITY
571 && size
< BGP_MSG_CAPABILITY_MIN_SIZE
)) {
572 if (bgp_debug_neighbor_events(peer
)) {
573 zlog_debug("%s bad message length - %d for %s",
575 type
== 128 ? "ROUTE-REFRESH"
576 : bgp_type_str
[(int) type
]);
579 uint16_t nsize
= htons(size
);
581 bgp_notify_send_with_data(peer
, BGP_NOTIFY_HEADER_ERR
,
582 BGP_NOTIFY_HEADER_BAD_MESLEN
,
583 (unsigned char *) &nsize
, 2);