]> git.proxmox.com Git - mirror_frr.git/blame - bgpd/bgp_io.c
bgpd: schedule packet job after connection xfer
[mirror_frr.git] / bgpd / bgp_io.c
CommitLineData
958b450c 1/* BGP I/O.
51abb4b4 2 * Implements packet I/O in a pthread.
958b450c 3 * Copyright (C) 2017 Cumulus Networks
51abb4b4 4 * Quentin Young
958b450c
QY
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
19 * MA 02110-1301 USA
56257a44
QY
20 */
21
95158b0c 22/* clang-format off */
42cf651e 23#include <zebra.h>
95158b0c 24#include <pthread.h> // for pthread_mutex_unlock, pthread_mutex_lock
56257a44 25
95158b0c
QY
26#include "frr_pthread.h" // for frr_pthread_get, frr_pthread
27#include "linklist.h" // for list_delete, list_delete_all_node, lis...
28#include "log.h" // for zlog_debug, safe_strerror, zlog_err
29#include "memory.h" // for MTYPE_TMP, XCALLOC, XFREE
30#include "network.h" // for ERRNO_IO_RETRY
31#include "stream.h" // for stream_get_endp, stream_getw_from, str...
32#include "thread.h" // for THREAD_OFF, THREAD_ARG, thread, thread...
33#include "zassert.h" // for assert
56257a44 34
42cf651e 35#include "bgpd/bgp_io.h"
95158b0c
QY
36#include "bgpd/bgp_debug.h" // for bgp_debug_neighbor_events, bgp_type_str
37#include "bgpd/bgp_fsm.h" // for BGP_EVENT_ADD, bgp_event
38#include "bgpd/bgp_packet.h" // for bgp_notify_send_with_data, bgp_notify...
39#include "bgpd/bgpd.h" // for peer, BGP_MARKER_SIZE, bgp_master, bm
40/* clang-format on */
56257a44 41
424ab01d
QY
42/* forward declarations */
43static uint16_t bgp_write(struct peer *);
44static uint16_t bgp_read(struct peer *);
45static int bgp_process_writes(struct thread *);
46static int bgp_process_reads(struct thread *);
47static bool validate_header(struct peer *);
56257a44 48
424ab01d 49/* generic i/o status codes */
95158b0c
QY
50#define BGP_IO_TRANS_ERR (1 << 0) // EAGAIN or similar occurred
51#define BGP_IO_FATAL_ERR (1 << 1) // some kind of fatal TCP error
56257a44 52
424ab01d 53/* Start and stop routines for I/O pthread + control variables
56257a44 54 * ------------------------------------------------------------------------ */
b750b0ba
QY
55_Atomic bool bgp_io_thread_run;
56_Atomic bool bgp_io_thread_started;
56257a44 57
424ab01d 58void bgp_io_init()
56257a44 59{
b750b0ba
QY
60 bgp_io_thread_run = false;
61 bgp_io_thread_started = false;
56257a44 62}
56257a44 63
1588f6f4
QY
64/* Unused callback for thread_add_read() */
65static int bgp_io_dummy(struct thread *thread) { return 0; }
e11eeb8c 66
424ab01d 67void *bgp_io_start(void *arg)
56257a44 68{
424ab01d 69 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
72bb6e33 70 fpt->master->owner = pthread_self();
424ab01d 71
e11eeb8c
QY
72 // fd so we can sleep in poll()
73 int sleeper[2];
74 pipe(sleeper);
75 thread_add_read(fpt->master, &bgp_io_dummy, NULL, sleeper[0], NULL);
76
424ab01d
QY
77 // we definitely don't want to handle signals
78 fpt->master->handle_signals = false;
79
424ab01d
QY
80 struct thread task;
81
1588f6f4 82 atomic_store_explicit(&bgp_io_thread_run, true, memory_order_seq_cst);
b750b0ba 83 atomic_store_explicit(&bgp_io_thread_started, true,
1588f6f4 84 memory_order_seq_cst);
b750b0ba
QY
85
86 while (bgp_io_thread_run) {
424ab01d 87 if (thread_fetch(fpt->master, &task)) {
b750b0ba 88 thread_call(&task);
424ab01d
QY
89 }
90 }
56257a44 91
e11eeb8c
QY
92 close(sleeper[1]);
93 close(sleeper[0]);
94
424ab01d 95 return NULL;
56257a44
QY
96}
97
48e5262f
QY
98static int bgp_io_finish(struct thread *thread)
99{
1588f6f4 100 atomic_store_explicit(&bgp_io_thread_run, false, memory_order_seq_cst);
48e5262f
QY
101 return 0;
102}
103
424ab01d 104int bgp_io_stop(void **result, struct frr_pthread *fpt)
56257a44 105{
48e5262f 106 thread_add_event(fpt->master, &bgp_io_finish, NULL, 0, NULL);
424ab01d 107 pthread_join(fpt->thread, result);
424ab01d 108 return 0;
56257a44 109}
51abb4b4
QY
110
111/* Extern API -------------------------------------------------------------- */
56257a44 112
424ab01d 113void bgp_writes_on(struct peer *peer)
56257a44 114{
1588f6f4
QY
115 while (
116 !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
b750b0ba
QY
117 ;
118
424ab01d
QY
119 assert(peer->status != Deleted);
120 assert(peer->obuf);
121 assert(peer->ibuf);
122 assert(peer->ibuf_work);
387f984e
QY
123 assert(!peer->t_connect_check_r);
124 assert(!peer->t_connect_check_w);
424ab01d 125 assert(peer->fd);
56257a44 126
424ab01d 127 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
56257a44 128
b750b0ba
QY
129 thread_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
130 &peer->t_write);
131 SET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
424ab01d 132}
56257a44 133
424ab01d
QY
134void bgp_writes_off(struct peer *peer)
135{
1588f6f4
QY
136 while (
137 !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
b750b0ba
QY
138 ;
139
151044ce
QY
140 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
141
b750b0ba
QY
142 thread_cancel_async(fpt->master, &peer->t_write, NULL);
143 THREAD_OFF(peer->t_generate_updgrp_packets);
56257a44 144
b750b0ba 145 UNSET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
56257a44
QY
146}
147
424ab01d 148void bgp_reads_on(struct peer *peer)
56257a44 149{
1588f6f4
QY
150 while (
151 !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
b750b0ba
QY
152 ;
153
424ab01d
QY
154 assert(peer->status != Deleted);
155 assert(peer->ibuf);
156 assert(peer->fd);
157 assert(peer->ibuf_work);
424ab01d 158 assert(peer->obuf);
387f984e
QY
159 assert(!peer->t_connect_check_r);
160 assert(!peer->t_connect_check_w);
424ab01d
QY
161 assert(peer->fd);
162
163 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
164
b750b0ba
QY
165 thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
166 &peer->t_read);
167
168 SET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
56257a44
QY
169}
170
424ab01d 171void bgp_reads_off(struct peer *peer)
56257a44 172{
1588f6f4
QY
173 while (
174 !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
b750b0ba
QY
175 ;
176
151044ce
QY
177 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
178
b750b0ba
QY
179 thread_cancel_async(fpt->master, &peer->t_read, NULL);
180 THREAD_OFF(peer->t_process_packet);
56257a44 181
b750b0ba 182 UNSET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
56257a44
QY
183}
184
51abb4b4
QY
185/* Internal functions ------------------------------------------------------- */
186
424ab01d 187/**
51abb4b4 188 * Called from I/O pthread when a file descriptor has become ready for writing.
424ab01d
QY
189 */
190static int bgp_process_writes(struct thread *thread)
56257a44 191{
424ab01d
QY
192 static struct peer *peer;
193 peer = THREAD_ARG(thread);
194 uint16_t status;
b750b0ba 195 bool reschedule;
bbac44ac 196 bool fatal = false;
424ab01d
QY
197
198 if (peer->fd < 0)
199 return -1;
200
201 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
202
424ab01d 203 pthread_mutex_lock(&peer->io_mtx);
56257a44 204 {
424ab01d
QY
205 status = bgp_write(peer);
206 reschedule = (stream_fifo_head(peer->obuf) != NULL);
207 }
208 pthread_mutex_unlock(&peer->io_mtx);
56257a44 209
424ab01d 210 if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) { /* no problem */
56257a44 211 }
56257a44 212
bbac44ac 213 if (CHECK_FLAG(status, BGP_IO_FATAL_ERR)) {
b750b0ba 214 reschedule = false; /* problem */
bbac44ac
QY
215 fatal = true;
216 }
424ab01d
QY
217
218 if (reschedule) {
219 thread_add_write(fpt->master, bgp_process_writes, peer,
220 peer->fd, &peer->t_write);
bbac44ac
QY
221 }
222
223 if (!fatal) {
a9794991 224 thread_add_timer_msec(bm->master, bgp_generate_updgrp_packets,
424ab01d
QY
225 peer, 0,
226 &peer->t_generate_updgrp_packets);
227 }
228
229 return 0;
56257a44
QY
230}
231
232/**
51abb4b4
QY
233 * Called from I/O pthread when a file descriptor has become ready for reading,
234 * or has hung up.
9eb217ff
QY
235 *
236 * We read as much data as possible, process as many packets as we can and
237 * place them on peer->ibuf for secondary processing by the main thread.
56257a44 238 */
424ab01d 239static int bgp_process_reads(struct thread *thread)
56257a44 240{
e11eeb8c
QY
241 /* clang-format off */
242 static struct peer *peer; // peer to read from
243 uint16_t status; // bgp_read status code
244 bool more = true; // whether we got more data
245 bool fatal = false; // whether fatal error occurred
246 bool added_pkt = false; // whether we pushed onto ->ibuf
247 bool header_valid = true; // whether header is valid
248 /* clang-format on */
9eb217ff 249
424ab01d 250 peer = THREAD_ARG(thread);
424ab01d
QY
251
252 if (peer->fd < 0)
253 return -1;
254
255 struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
256
424ab01d 257 pthread_mutex_lock(&peer->io_mtx);
56257a44 258 {
424ab01d
QY
259 status = bgp_read(peer);
260 }
261 pthread_mutex_unlock(&peer->io_mtx);
262
9eb217ff
QY
263 /* error checking phase */
264 if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
265 /* no problem; just don't process packets */
266 more = false;
267 }
424ab01d 268
9eb217ff
QY
269 if (CHECK_FLAG(status, BGP_IO_FATAL_ERR)) {
270 /* problem; tear down session */
271 more = false;
272 fatal = true;
56257a44 273 }
56257a44 274
9eb217ff
QY
275 while (more) {
276 /* static buffer for transferring packets */
277 static unsigned char pktbuf[BGP_MAX_PACKET_SIZE];
278 /* shorter alias to peer's input buffer */
279 struct stream *ibw = peer->ibuf_work;
280 /* offset of start of current packet */
281 size_t offset = stream_get_getp(ibw);
282 /* packet size as given by header */
283 u_int16_t pktsize = 0;
284
285 /* check that we have enough data for a header */
286 if (STREAM_READABLE(ibw) < BGP_HEADER_SIZE)
287 break;
424ab01d 288
9eb217ff 289 /* validate header */
424ab01d 290 header_valid = validate_header(peer);
9eb217ff 291
424ab01d 292 if (!header_valid) {
9eb217ff
QY
293 fatal = true;
294 break;
424ab01d 295 }
424ab01d 296
9eb217ff
QY
297 /* header is valid; retrieve packet size */
298 pktsize = stream_getw_from(ibw, offset + BGP_MARKER_SIZE);
424ab01d 299
9eb217ff
QY
300 /* if this fails we are seriously screwed */
301 assert(pktsize <= BGP_MAX_PACKET_SIZE);
302
303 /* If we have that much data, chuck it into its own
304 * stream and append to input queue for processing. */
305 if (STREAM_READABLE(ibw) >= pktsize) {
306 struct stream *pkt = stream_new(pktsize);
307 stream_get(pktbuf, ibw, pktsize);
308 stream_put(pkt, pktbuf, pktsize);
309
310 pthread_mutex_lock(&peer->io_mtx);
311 {
312 stream_fifo_push(peer->ibuf, pkt);
313 }
314 pthread_mutex_unlock(&peer->io_mtx);
315
316 added_pkt = true;
317 } else
318 break;
319 }
320
becedef6
QY
321 /*
322 * After reading:
9eb217ff
QY
323 * 1. Move unread data to stream start to make room for more.
324 * 2. Reschedule and return when we have additional data.
325 *
326 * XXX: Heavy abuse of stream API. This needs a ring buffer.
327 */
328 if (more && STREAM_WRITEABLE(peer->ibuf_work) < BGP_MAX_PACKET_SIZE) {
329 void *from = stream_pnt(peer->ibuf_work);
330 void *to = peer->ibuf_work->data;
331 size_t siz = STREAM_READABLE(peer->ibuf_work);
332 memmove(to, from, siz);
333 stream_set_getp(peer->ibuf_work, 0);
334 stream_set_endp(peer->ibuf_work, siz);
424ab01d
QY
335 }
336
9eb217ff
QY
337 assert(STREAM_WRITEABLE(peer->ibuf_work) >= BGP_MAX_PACKET_SIZE);
338
339 /* handle invalid header */
340 if (fatal) {
9eb217ff
QY
341 /* wipe buffer just in case someone screwed up */
342 stream_reset(peer->ibuf_work);
343 } else {
424ab01d
QY
344 thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
345 &peer->t_read);
9eb217ff 346 if (added_pkt)
7a86aa5a
QY
347 thread_add_timer_msec(bm->master, bgp_process_packet,
348 peer, 0, &peer->t_process_packet);
9eb217ff 349 }
424ab01d
QY
350
351 return 0;
56257a44
QY
352}
353
354/**
355 * Flush peer output buffer.
356 *
357 * This function pops packets off of peer->obuf and writes them to peer->fd.
358 * The amount of packets written is equal to the minimum of peer->wpkt_quanta
424ab01d 359 * and the number of packets on the output buffer, unless an error occurs.
56257a44
QY
360 *
361 * If write() returns an error, the appropriate FSM event is generated.
362 *
363 * The return value is equal to the number of packets written
364 * (which may be zero).
365 */
424ab01d 366static uint16_t bgp_write(struct peer *peer)
56257a44
QY
367{
368 u_char type;
369 struct stream *s;
370 int num;
371 int update_last_write = 0;
372 unsigned int count = 0;
373 unsigned int oc = 0;
424ab01d 374 uint16_t status = 0;
555e09d4 375 uint32_t wpkt_quanta_old;
56257a44 376
555e09d4 377 // cache current write quanta
1588f6f4
QY
378 wpkt_quanta_old =
379 atomic_load_explicit(&peer->bgp->wpkt_quanta, memory_order_relaxed);
555e09d4
QY
380
381 while (count < wpkt_quanta_old && (s = stream_fifo_head(peer->obuf))) {
56257a44
QY
382 int writenum;
383 do {
384 writenum = stream_get_endp(s) - stream_get_getp(s);
385 num = write(peer->fd, STREAM_PNT(s), writenum);
386
387 if (num < 0) {
424ab01d 388 if (!ERRNO_IO_RETRY(errno)) {
56257a44 389 BGP_EVENT_ADD(peer, TCP_fatal_error);
424ab01d
QY
390 SET_FLAG(status, BGP_IO_FATAL_ERR);
391 } else {
392 SET_FLAG(status, BGP_IO_TRANS_ERR);
393 }
56257a44
QY
394
395 goto done;
396 } else if (num != writenum) // incomplete write
397 stream_forward_getp(s, num);
398
399 } while (num != writenum);
400
401 /* Retrieve BGP packet type. */
402 stream_set_getp(s, BGP_MARKER_SIZE + 2);
403 type = stream_getc(s);
404
405 switch (type) {
406 case BGP_MSG_OPEN:
1588f6f4
QY
407 atomic_fetch_add_explicit(&peer->open_out, 1,
408 memory_order_relaxed);
56257a44
QY
409 break;
410 case BGP_MSG_UPDATE:
1588f6f4
QY
411 atomic_fetch_add_explicit(&peer->update_out, 1,
412 memory_order_relaxed);
56257a44
QY
413 break;
414 case BGP_MSG_NOTIFY:
1588f6f4
QY
415 atomic_fetch_add_explicit(&peer->notify_out, 1,
416 memory_order_relaxed);
56257a44
QY
417 /* Double start timer. */
418 peer->v_start *= 2;
419
420 /* Overflow check. */
421 if (peer->v_start >= (60 * 2))
422 peer->v_start = (60 * 2);
423
424 /* Handle Graceful Restart case where the state changes
becedef6 425 * to Connect instead of Idle */
56257a44
QY
426 BGP_EVENT_ADD(peer, BGP_Stop);
427 goto done;
428
429 case BGP_MSG_KEEPALIVE:
1588f6f4
QY
430 atomic_fetch_add_explicit(&peer->keepalive_out, 1,
431 memory_order_relaxed);
56257a44
QY
432 break;
433 case BGP_MSG_ROUTE_REFRESH_NEW:
434 case BGP_MSG_ROUTE_REFRESH_OLD:
1588f6f4
QY
435 atomic_fetch_add_explicit(&peer->refresh_out, 1,
436 memory_order_relaxed);
56257a44
QY
437 break;
438 case BGP_MSG_CAPABILITY:
1588f6f4
QY
439 atomic_fetch_add_explicit(&peer->dynamic_cap_out, 1,
440 memory_order_relaxed);
56257a44
QY
441 break;
442 }
443
444 count++;
424ab01d 445
56257a44
QY
446 stream_free(stream_fifo_pop(peer->obuf));
447 update_last_write = 1;
448 }
449
450done : {
451 /* Update last_update if UPDATEs were written. */
452 if (peer->update_out > oc)
1588f6f4
QY
453 atomic_store_explicit(&peer->last_update, bgp_clock(),
454 memory_order_relaxed);
56257a44
QY
455
456 /* If we TXed any flavor of packet update last_write */
457 if (update_last_write)
1588f6f4
QY
458 atomic_store_explicit(&peer->last_write, bgp_clock(),
459 memory_order_relaxed);
56257a44
QY
460}
461
424ab01d
QY
462 return status;
463}
464
465/**
51abb4b4 466 * Reads a chunk of data from peer->fd into peer->ibuf_work.
424ab01d 467 *
51abb4b4 468 * @return status flag (see top-of-file)
424ab01d
QY
469 */
470static uint16_t bgp_read(struct peer *peer)
471{
b750b0ba
QY
472 size_t readsize; // how many bytes we want to read
473 ssize_t nbytes; // how many bytes we actually read
424ab01d
QY
474 uint16_t status = 0;
475
9eb217ff 476 readsize = STREAM_WRITEABLE(peer->ibuf_work);
424ab01d
QY
477
478 nbytes = stream_read_try(peer->ibuf_work, peer->fd, readsize);
479
85145b62
QY
480 switch (nbytes) {
481 /* Fatal error; tear down session */
482 case -1:
483 zlog_err("%s [Error] bgp_read_packet error: %s", peer->host,
484 safe_strerror(errno));
485
486 if (peer->status == Established) {
487 if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) {
488 peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION;
489 SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
490 } else
491 peer->last_reset = PEER_DOWN_CLOSE_SESSION;
492 }
424ab01d 493
85145b62
QY
494 BGP_EVENT_ADD(peer, TCP_fatal_error);
495 SET_FLAG(status, BGP_IO_FATAL_ERR);
496 break;
497
498 /* Received EOF / TCP session closed */
499 case 0:
500 if (bgp_debug_neighbor_events(peer))
501 zlog_debug("%s [Event] BGP connection closed fd %d",
502 peer->host, peer->fd);
503
504 if (peer->status == Established) {
505 if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) {
506 peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION;
507 SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
508 } else
509 peer->last_reset = PEER_DOWN_CLOSE_SESSION;
424ab01d
QY
510 }
511
85145b62
QY
512 BGP_EVENT_ADD(peer, TCP_connection_closed);
513 SET_FLAG(status, BGP_IO_FATAL_ERR);
514 break;
515
516 /* EAGAIN or EWOULDBLOCK; come back later */
517 case -2:
518 SET_FLAG(status, BGP_IO_TRANS_ERR);
519 break;
520 default:
521 break;
424ab01d
QY
522 }
523
424ab01d
QY
524 return status;
525}
526
527/*
528 * Called after we have read a BGP packet header. Validates marker, message
529 * type and packet length. If any of these aren't correct, sends a notify.
530 */
531static bool validate_header(struct peer *peer)
532{
3fe63c29
QY
533 uint16_t size;
534 uint8_t type;
9eb217ff
QY
535 struct stream *pkt = peer->ibuf_work;
536 size_t getp = stream_get_getp(pkt);
424ab01d 537
442c9afb 538 static uint8_t marker[BGP_MARKER_SIZE] = {
1588f6f4
QY
539 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
540 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
442c9afb 541
9eb217ff 542 if (memcmp(marker, stream_pnt(pkt), BGP_MARKER_SIZE) != 0) {
442c9afb
QY
543 bgp_notify_send(peer, BGP_NOTIFY_HEADER_ERR,
544 BGP_NOTIFY_HEADER_NOT_SYNC);
545 return false;
546 }
424ab01d 547
3fe63c29 548 /* Get size and type in host byte order. */
9eb217ff
QY
549 size = stream_getw_from(pkt, getp + BGP_MARKER_SIZE);
550 type = stream_getc_from(pkt, getp + BGP_MARKER_SIZE + 2);
424ab01d
QY
551
552 /* BGP type check. */
553 if (type != BGP_MSG_OPEN && type != BGP_MSG_UPDATE
554 && type != BGP_MSG_NOTIFY && type != BGP_MSG_KEEPALIVE
555 && type != BGP_MSG_ROUTE_REFRESH_NEW
556 && type != BGP_MSG_ROUTE_REFRESH_OLD
557 && type != BGP_MSG_CAPABILITY) {
3fe63c29 558 if (bgp_debug_neighbor_events(peer))
424ab01d
QY
559 zlog_debug("%s unknown message type 0x%02x", peer->host,
560 type);
561
562 bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
563 BGP_NOTIFY_HEADER_BAD_MESTYPE,
3fe63c29 564 &type, 1);
424ab01d
QY
565 return false;
566 }
567
3fe63c29 568 /* Minimum packet length check. */
424ab01d
QY
569 if ((size < BGP_HEADER_SIZE) || (size > BGP_MAX_PACKET_SIZE)
570 || (type == BGP_MSG_OPEN && size < BGP_MSG_OPEN_MIN_SIZE)
571 || (type == BGP_MSG_UPDATE && size < BGP_MSG_UPDATE_MIN_SIZE)
572 || (type == BGP_MSG_NOTIFY && size < BGP_MSG_NOTIFY_MIN_SIZE)
573 || (type == BGP_MSG_KEEPALIVE && size != BGP_MSG_KEEPALIVE_MIN_SIZE)
574 || (type == BGP_MSG_ROUTE_REFRESH_NEW
575 && size < BGP_MSG_ROUTE_REFRESH_MIN_SIZE)
576 || (type == BGP_MSG_ROUTE_REFRESH_OLD
577 && size < BGP_MSG_ROUTE_REFRESH_MIN_SIZE)
578 || (type == BGP_MSG_CAPABILITY
579 && size < BGP_MSG_CAPABILITY_MIN_SIZE)) {
1588f6f4 580 if (bgp_debug_neighbor_events(peer)) {
424ab01d
QY
581 zlog_debug("%s bad message length - %d for %s",
582 peer->host, size,
583 type == 128 ? "ROUTE-REFRESH"
1588f6f4
QY
584 : bgp_type_str[(int) type]);
585 }
424ab01d 586
3fe63c29
QY
587 uint16_t nsize = htons(size);
588
424ab01d
QY
589 bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
590 BGP_NOTIFY_HEADER_BAD_MESLEN,
3fe63c29 591 (unsigned char *) &nsize, 2);
424ab01d
QY
592 return false;
593 }
594
595 return true;
56257a44 596}