2 * Copyright (c) 2015-19 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 #include <sys/types.h>
31 #ifdef HAVE_PTHREAD_NP_H
32 #include <pthread_np.h>
35 #include <sys/syscall.h>
47 #include <mach/mach_traps.h>
55 #include "libfrr_trace.h"
57 DEFINE_MTYPE_STATIC(LIB
, LOG_MESSAGE
, "log message");
58 DEFINE_MTYPE_STATIC(LIB
, LOG_TLSBUF
, "log thread-local buffer");
60 DEFINE_HOOK(zlog_init
, (const char *progname
, const char *protoname
,
61 unsigned short instance
, uid_t uid
, gid_t gid
),
62 (progname
, protoname
, instance
, uid
, gid
));
63 DEFINE_KOOH(zlog_fini
, (), ());
64 DEFINE_HOOK(zlog_aux_init
, (const char *prefix
, int prio_min
),
67 char zlog_prefix
[128];
69 int zlog_tmpdirfd
= -1;
71 static atomic_bool zlog_ec
= true, zlog_xid
= true;
73 /* these are kept around because logging is initialized (and directories
74 * & files created) before zprivs code switches to the FRR user; therefore
75 * we need to chown() things so we don't get permission errors later when
76 * trying to delete things on shutdown
78 static uid_t zlog_uid
= -1;
79 static gid_t zlog_gid
= -1;
81 DECLARE_ATOMLIST(zlog_targets
, struct zlog_target
, head
);
82 static struct zlog_targets_head zlog_targets
;
84 /* cf. zlog.h for additional comments on this struct.
86 * Note: you MUST NOT pass the format string + va_list to non-FRR format
87 * string functions (e.g. vsyslog, sd_journal_printv, ...) since FRR uses an
88 * extended prinf() with additional formats (%pI4 and the like).
90 * Also remember to use va_copy() on args.
99 const struct xref_logmsg
*xref
;
106 /* This is always ISO8601 with sub-second precision 9 here, it's
107 * converted for callers as needed. ts_dot points to the "."
108 * separating sub-seconds. ts_zonetail is "Z" or "+00:00" for the
111 * Valid if ZLOG_TS_ISO8601 is set.
112 * (0 if timestamp has not been formatted yet)
115 char ts_str
[32], *ts_dot
, ts_zonetail
[8];
118 /* thread-local log message buffering
120 * This is strictly optional and set up by calling zlog_tls_buffer_init()
121 * on a particular thread.
123 * If in use, this will create a temporary file in /var/tmp which is used as
124 * memory-mapped MAP_SHARED log message buffer. The idea there is that buffer
125 * access doesn't require any syscalls, but in case of a crash the kernel
126 * knows to sync the memory back to disk. This way the user can still get the
127 * last log messages if there were any left unwritten in the buffer.
129 * Sizing this dynamically isn't particularly useful, so here's an 8k buffer
130 * with a message limit of 64 messages. Message metadata (e.g. priority,
131 * timestamp) aren't in the mmap region, so they're lost on crash, but we can
135 #if defined(HAVE_OPENAT) && defined(HAVE_UNLINKAT)
139 #define TLS_LOG_BUF_SIZE 8192
140 #define TLS_LOG_MAXMSG 64
147 struct zlog_msg msgs
[TLS_LOG_MAXMSG
];
148 struct zlog_msg
*msgp
[TLS_LOG_MAXMSG
];
151 static inline void zlog_tls_free(void *arg
);
153 /* proper ELF TLS is a bit faster than pthread_[gs]etspecific, so if it's
154 * available we'll use it here
158 static pthread_key_t zlog_tls_key
;
160 static void zlog_tls_key_init(void) __attribute__((_CONSTRUCTOR(500)));
161 static void zlog_tls_key_init(void)
163 pthread_key_create(&zlog_tls_key
, zlog_tls_free
);
166 static void zlog_tls_key_fini(void) __attribute__((_DESTRUCTOR(500)));
167 static void zlog_tls_key_fini(void)
169 pthread_key_delete(zlog_tls_key
);
172 static inline struct zlog_tls
*zlog_tls_get(void)
174 return pthread_getspecific(zlog_tls_key
);
177 static inline void zlog_tls_set(struct zlog_tls
*val
)
179 pthread_setspecific(zlog_tls_key
, val
);
182 # ifndef thread_local
183 # define thread_local __thread
186 static thread_local
struct zlog_tls
*zlog_tls_var
187 __attribute__((tls_model("initial-exec")));
189 static inline struct zlog_tls
*zlog_tls_get(void)
194 static inline void zlog_tls_set(struct zlog_tls
*val
)
201 static long zlog_gettid(void)
204 #ifdef HAVE_PTHREAD_GETTHREADID_NP
205 rv
= pthread_getthreadid_np();
207 rv
= syscall(__NR_gettid
);
208 #elif defined(__NetBSD__)
210 #elif defined(__FreeBSD__)
212 #elif defined(__DragonFly__)
214 #elif defined(__OpenBSD__)
218 #elif defined(__APPLE__)
219 rv
= mach_thread_self();
220 mach_port_deallocate(mach_task_self(), rv
);
225 void zlog_tls_buffer_init(void)
227 struct zlog_tls
*zlog_tls
;
228 char mmpath
[MAXPATHLEN
];
232 zlog_tls
= zlog_tls_get();
234 if (zlog_tls
|| zlog_tmpdirfd
< 0)
237 zlog_tls
= XCALLOC(MTYPE_LOG_TLSBUF
, sizeof(*zlog_tls
));
238 for (i
= 0; i
< array_size(zlog_tls
->msgp
); i
++)
239 zlog_tls
->msgp
[i
] = &zlog_tls
->msgs
[i
];
241 snprintfrr(mmpath
, sizeof(mmpath
), "logbuf.%ld", zlog_gettid());
243 mmfd
= openat(zlog_tmpdirfd
, mmpath
,
244 O_RDWR
| O_CREAT
| O_EXCL
| O_CLOEXEC
, 0600);
246 zlog_err("failed to open thread log buffer \"%s\": %s",
247 mmpath
, strerror(errno
));
250 fchown(mmfd
, zlog_uid
, zlog_gid
);
252 #ifdef HAVE_POSIX_FALLOCATE
253 if (posix_fallocate(mmfd
, 0, TLS_LOG_BUF_SIZE
) != 0)
254 /* note next statement is under above if() */
256 if (ftruncate(mmfd
, TLS_LOG_BUF_SIZE
) < 0) {
257 zlog_err("failed to allocate thread log buffer \"%s\": %s",
258 mmpath
, strerror(errno
));
259 goto out_anon_unlink
;
262 zlog_tls
->mmbuf
= mmap(NULL
, TLS_LOG_BUF_SIZE
, PROT_READ
| PROT_WRITE
,
263 MAP_SHARED
, mmfd
, 0);
264 if (zlog_tls
->mmbuf
== MAP_FAILED
) {
265 zlog_err("failed to mmap thread log buffer \"%s\": %s",
266 mmpath
, strerror(errno
));
267 goto out_anon_unlink
;
271 zlog_tls_set(zlog_tls
);
279 #ifndef MAP_ANONYMOUS
280 #define MAP_ANONYMOUS MAP_ANON
282 zlog_tls
->mmbuf
= mmap(NULL
, TLS_LOG_BUF_SIZE
, PROT_READ
| PROT_WRITE
,
283 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
285 if (!zlog_tls
->mmbuf
) {
286 zlog_err("failed to anonymous-mmap thread log buffer: %s",
288 XFREE(MTYPE_LOG_TLSBUF
, zlog_tls
);
293 zlog_tls_set(zlog_tls
);
296 void zlog_tls_buffer_fini(void)
298 char mmpath
[MAXPATHLEN
];
300 zlog_tls_buffer_flush();
302 zlog_tls_free(zlog_tls_get());
305 snprintfrr(mmpath
, sizeof(mmpath
), "logbuf.%ld", zlog_gettid());
306 if (unlinkat(zlog_tmpdirfd
, mmpath
, 0))
307 zlog_err("unlink logbuf: %s (%d)", strerror(errno
), errno
);
310 #else /* !CAN_DO_TLS */
311 void zlog_tls_buffer_init(void)
315 void zlog_tls_buffer_fini(void)
320 static inline void zlog_tls_free(void *arg
)
322 struct zlog_tls
*zlog_tls
= arg
;
327 munmap(zlog_tls
->mmbuf
, TLS_LOG_BUF_SIZE
);
328 XFREE(MTYPE_LOG_TLSBUF
, zlog_tls
);
331 void zlog_tls_buffer_flush(void)
333 struct zlog_target
*zt
;
334 struct zlog_tls
*zlog_tls
= zlog_tls_get();
338 if (!zlog_tls
->nmsgs
)
342 frr_each (zlog_targets
, &zlog_targets
, zt
) {
346 zt
->logfn(zt
, zlog_tls
->msgp
, zlog_tls
->nmsgs
);
350 zlog_tls
->bufpos
= 0;
355 static void vzlog_notls(const struct xref_logmsg
*xref
, int prio
,
356 const char *fmt
, va_list ap
)
358 struct zlog_target
*zt
;
359 struct zlog_msg stackmsg
= {
360 .prio
= prio
& LOG_PRIMASK
,
366 clock_gettime(CLOCK_REALTIME
, &msg
->ts
);
367 va_copy(msg
->args
, ap
);
368 msg
->stackbuf
= stackbuf
;
369 msg
->stackbufsz
= sizeof(stackbuf
);
372 frr_each (zlog_targets
, &zlog_targets
, zt
) {
373 if (prio
> zt
->prio_min
)
378 zt
->logfn(zt
, &msg
, 1);
383 if (msg
->text
&& msg
->text
!= stackbuf
)
384 XFREE(MTYPE_LOG_MESSAGE
, msg
->text
);
387 static void vzlog_tls(struct zlog_tls
*zlog_tls
, const struct xref_logmsg
*xref
,
388 int prio
, const char *fmt
, va_list ap
)
390 struct zlog_target
*zt
;
391 struct zlog_msg
*msg
;
393 bool ignoremsg
= true;
394 bool immediate
= false;
396 /* avoid further processing cost if no target wants this message */
398 frr_each (zlog_targets
, &zlog_targets
, zt
) {
399 if (prio
> zt
->prio_min
)
409 msg
= &zlog_tls
->msgs
[zlog_tls
->nmsgs
];
411 if (zlog_tls
->nmsgs
== array_size(zlog_tls
->msgs
))
414 memset(msg
, 0, sizeof(*msg
));
415 clock_gettime(CLOCK_REALTIME
, &msg
->ts
);
416 va_copy(msg
->args
, ap
);
417 msg
->stackbuf
= buf
= zlog_tls
->mmbuf
+ zlog_tls
->bufpos
;
418 msg
->stackbufsz
= TLS_LOG_BUF_SIZE
- zlog_tls
->bufpos
- 1;
420 msg
->prio
= prio
& LOG_PRIMASK
;
422 if (msg
->prio
< LOG_INFO
)
426 /* messages written later need to take the formatting cost
427 * immediately since we can't hold a reference on varargs
429 zlog_msg_text(msg
, NULL
);
431 if (msg
->text
!= buf
)
432 /* zlog_msg_text called malloc() on us :( */
435 zlog_tls
->bufpos
+= msg
->textlen
+ 1;
436 /* write a second \0 to mark current end position
437 * (in case of crash this signals end of unwritten log
438 * messages in mmap'd logbuf file)
440 zlog_tls
->mmbuf
[zlog_tls
->bufpos
] = '\0';
442 /* avoid malloc() for next message */
443 if (TLS_LOG_BUF_SIZE
- zlog_tls
->bufpos
< 256)
449 zlog_tls_buffer_flush();
452 if (msg
->text
&& msg
->text
!= buf
)
453 XFREE(MTYPE_LOG_MESSAGE
, msg
->text
);
456 void vzlogx(const struct xref_logmsg
*xref
, int prio
,
457 const char *fmt
, va_list ap
)
459 struct zlog_tls
*zlog_tls
= zlog_tls_get();
464 char *msg
= vasprintfrr(MTYPE_LOG_MESSAGE
, fmt
, copy
);
468 frrtracelog(TRACE_ERR
, msg
);
471 frrtracelog(TRACE_WARNING
, msg
);
474 frrtracelog(TRACE_DEBUG
, msg
);
477 frrtracelog(TRACE_DEBUG
, msg
);
481 frrtracelog(TRACE_INFO
, msg
);
486 XFREE(MTYPE_LOG_MESSAGE
, msg
);
490 vzlog_tls(zlog_tls
, xref
, prio
, fmt
, ap
);
492 vzlog_notls(xref
, prio
, fmt
, ap
);
495 void zlog_sigsafe(const char *text
, size_t len
)
497 struct zlog_target
*zt
;
498 const char *end
= text
+ len
, *nlpos
;
501 nlpos
= memchr(text
, '\n', end
- text
);
505 frr_each (zlog_targets
, &zlog_targets
, zt
) {
506 if (LOG_CRIT
> zt
->prio_min
)
508 if (!zt
->logfn_sigsafe
)
511 zt
->logfn_sigsafe(zt
, text
, nlpos
- text
);
521 int zlog_msg_prio(struct zlog_msg
*msg
)
526 const struct xref_logmsg
*zlog_msg_xref(struct zlog_msg
*msg
)
531 const char *zlog_msg_text(struct zlog_msg
*msg
, size_t *textlen
)
536 size_t need
= 0, hdrlen
;
538 .buf
= msg
->stackbuf
,
539 .pos
= msg
->stackbuf
,
540 .len
= msg
->stackbufsz
,
543 do_ec
= atomic_load_explicit(&zlog_ec
, memory_order_relaxed
);
544 do_xid
= atomic_load_explicit(&zlog_xid
, memory_order_relaxed
);
546 if (msg
->xref
&& do_xid
&& msg
->xref
->xref
.xrefdata
->uid
[0]) {
547 need
+= bputch(&fb
, '[');
548 need
+= bputs(&fb
, msg
->xref
->xref
.xrefdata
->uid
);
549 need
+= bputch(&fb
, ']');
551 if (msg
->xref
&& do_ec
&& msg
->xref
->ec
)
552 need
+= bprintfrr(&fb
, "[EC %u]", msg
->xref
->ec
);
554 need
+= bputch(&fb
, ' ');
557 assert(hdrlen
< msg
->stackbufsz
);
559 va_copy(args
, msg
->args
);
560 need
+= vbprintfrr(&fb
, msg
->fmt
, args
);
564 need
+= bputch(&fb
, '\0');
566 if (need
<= msg
->stackbufsz
)
567 msg
->text
= msg
->stackbuf
;
569 msg
->text
= XMALLOC(MTYPE_LOG_MESSAGE
, need
);
571 memcpy(msg
->text
, msg
->stackbuf
, hdrlen
);
575 fb
.pos
= msg
->text
+ hdrlen
;
577 va_copy(args
, msg
->args
);
578 vbprintfrr(&fb
, msg
->fmt
, args
);
585 *textlen
= msg
->textlen
;
589 #define ZLOG_TS_FORMAT (ZLOG_TS_ISO8601 | ZLOG_TS_LEGACY)
590 #define ZLOG_TS_FLAGS ~ZLOG_TS_PREC
592 size_t zlog_msg_ts(struct zlog_msg
*msg
, char *out
, size_t outsz
,
597 if (!(flags
& ZLOG_TS_FORMAT
))
600 if (!(msg
->ts_flags
& ZLOG_TS_FORMAT
) ||
601 ((msg
->ts_flags
^ flags
) & ZLOG_TS_UTC
)) {
604 if (flags
& ZLOG_TS_UTC
)
605 gmtime_r(&msg
->ts
.tv_sec
, &tm
);
607 localtime_r(&msg
->ts
.tv_sec
, &tm
);
609 strftime(msg
->ts_str
, sizeof(msg
->ts_str
),
610 "%Y-%m-%dT%H:%M:%S", &tm
);
612 if (flags
& ZLOG_TS_UTC
) {
613 msg
->ts_zonetail
[0] = 'Z';
614 msg
->ts_zonetail
[1] = '\0';
616 snprintfrr(msg
->ts_zonetail
, sizeof(msg
->ts_zonetail
),
618 (int)(tm
.tm_gmtoff
/ 3600),
619 (int)(labs(tm
.tm_gmtoff
) / 60) % 60);
621 msg
->ts_dot
= msg
->ts_str
+ strlen(msg
->ts_str
);
622 snprintfrr(msg
->ts_dot
,
623 msg
->ts_str
+ sizeof(msg
->ts_str
) - msg
->ts_dot
,
624 ".%09lu", (unsigned long)msg
->ts
.tv_nsec
);
626 msg
->ts_flags
= ZLOG_TS_ISO8601
| (flags
& ZLOG_TS_UTC
);
629 len1
= flags
& ZLOG_TS_PREC
;
630 len1
= (msg
->ts_dot
- msg
->ts_str
) + (len1
? len1
+ 1 : 0);
632 if (len1
> strlen(msg
->ts_str
))
633 len1
= strlen(msg
->ts_str
);
635 if (flags
& ZLOG_TS_LEGACY
) {
636 if (len1
+ 1 > outsz
)
639 /* just swap out the formatting, faster than redoing it */
640 for (char *p
= msg
->ts_str
; p
< msg
->ts_str
+ len1
; p
++) {
655 size_t len2
= strlen(msg
->ts_zonetail
);
657 if (len1
+ len2
+ 1 > outsz
)
659 memcpy(out
, msg
->ts_str
, len1
);
660 memcpy(out
+ len1
, msg
->ts_zonetail
, len2
);
661 out
[len1
+ len2
] = '\0';
666 void zlog_set_prefix_ec(bool enable
)
668 atomic_store_explicit(&zlog_ec
, enable
, memory_order_relaxed
);
671 bool zlog_get_prefix_ec(void)
673 return atomic_load_explicit(&zlog_ec
, memory_order_relaxed
);
676 void zlog_set_prefix_xid(bool enable
)
678 atomic_store_explicit(&zlog_xid
, enable
, memory_order_relaxed
);
681 bool zlog_get_prefix_xid(void)
683 return atomic_load_explicit(&zlog_xid
, memory_order_relaxed
);
686 /* setup functions */
688 struct zlog_target
*zlog_target_clone(struct memtype
*mt
,
689 struct zlog_target
*oldzt
, size_t size
)
691 struct zlog_target
*newzt
;
693 newzt
= XCALLOC(mt
, size
);
695 newzt
->prio_min
= oldzt
->prio_min
;
696 newzt
->logfn
= oldzt
->logfn
;
697 newzt
->logfn_sigsafe
= oldzt
->logfn_sigsafe
;
703 struct zlog_target
*zlog_target_replace(struct zlog_target
*oldzt
,
704 struct zlog_target
*newzt
)
707 zlog_targets_add_tail(&zlog_targets
, newzt
);
709 zlog_targets_del(&zlog_targets
, oldzt
);
716 #define TMPBASEDIR "/var/tmp/frr"
718 static char zlog_tmpdir
[MAXPATHLEN
];
720 void zlog_aux_init(const char *prefix
, int prio_min
)
723 strlcpy(zlog_prefix
, prefix
, sizeof(zlog_prefix
));
725 hook_call(zlog_aux_init
, prefix
, prio_min
);
728 void zlog_init(const char *progname
, const char *protoname
,
729 unsigned short instance
, uid_t uid
, gid_t gid
)
735 snprintfrr(zlog_tmpdir
, sizeof(zlog_tmpdir
),
736 "/var/tmp/frr/%s-%d.%ld",
737 progname
, instance
, (long)getpid());
739 zlog_prefixsz
= snprintfrr(zlog_prefix
, sizeof(zlog_prefix
),
740 "%s[%d]: ", protoname
, instance
);
742 snprintfrr(zlog_tmpdir
, sizeof(zlog_tmpdir
),
743 "/var/tmp/frr/%s.%ld",
744 progname
, (long)getpid());
746 zlog_prefixsz
= snprintfrr(zlog_prefix
, sizeof(zlog_prefix
),
750 if (mkdir(TMPBASEDIR
, 0700) != 0) {
751 if (errno
!= EEXIST
) {
752 zlog_err("failed to mkdir \"%s\": %s",
753 TMPBASEDIR
, strerror(errno
));
757 chown(TMPBASEDIR
, zlog_uid
, zlog_gid
);
759 if (mkdir(zlog_tmpdir
, 0700) != 0) {
760 zlog_err("failed to mkdir \"%s\": %s",
761 zlog_tmpdir
, strerror(errno
));
766 zlog_tmpdirfd
= open(zlog_tmpdir
,
767 O_PATH
| O_RDONLY
| O_CLOEXEC
);
769 zlog_tmpdirfd
= open(zlog_tmpdir
,
770 O_DIRECTORY
| O_RDONLY
| O_CLOEXEC
);
772 if (zlog_tmpdirfd
< 0) {
773 zlog_err("failed to open \"%s\": %s",
774 zlog_tmpdir
, strerror(errno
));
779 fchownat(zlog_tmpdirfd
, "", zlog_uid
, zlog_gid
, AT_EMPTY_PATH
);
781 chown(zlog_tmpdir
, zlog_uid
, zlog_gid
);
784 hook_call(zlog_init
, progname
, protoname
, instance
, uid
, gid
);
788 zlog_err("crashlog and per-thread log buffering unavailable!");
789 hook_call(zlog_init
, progname
, protoname
, instance
, uid
, gid
);
794 hook_call(zlog_fini
);
796 if (zlog_tmpdirfd
>= 0) {
797 close(zlog_tmpdirfd
);
800 if (rmdir(zlog_tmpdir
))
801 zlog_err("failed to rmdir \"%s\": %s",
802 zlog_tmpdir
, strerror(errno
));