]> git.proxmox.com Git - mirror_frr.git/blob - lib/zlog.c
Merge pull request #8605 from donaldsharp/libyang_version
[mirror_frr.git] / lib / zlog.c
1 /*
2 * Copyright (c) 2015-19 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "zebra.h"
18
19 #include <unistd.h>
20 #include <sys/time.h>
21 #include <sys/mman.h>
22 #include <sys/types.h>
23 #include <time.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdarg.h>
28 #include <pthread.h>
29
30 /* gettid() & co. */
31 #ifdef HAVE_PTHREAD_NP_H
32 #include <pthread_np.h>
33 #endif
34 #ifdef linux
35 #include <sys/syscall.h>
36 #endif
37 #ifdef __FreeBSD__
38 #include <sys/thr.h>
39 #endif
40 #ifdef __NetBSD__
41 #include <lwp.h>
42 #endif
43 #ifdef __DragonFly__
44 #include <sys/lwp.h>
45 #endif
46 #ifdef __APPLE__
47 #include <mach/mach_traps.h>
48 #endif
49
50 #include "memory.h"
51 #include "atomlist.h"
52 #include "printfrr.h"
53 #include "frrcu.h"
54 #include "zlog.h"
55 #include "libfrr_trace.h"
56
57 DEFINE_MTYPE_STATIC(LIB, LOG_MESSAGE, "log message");
58 DEFINE_MTYPE_STATIC(LIB, LOG_TLSBUF, "log thread-local buffer");
59
60 DEFINE_HOOK(zlog_init, (const char *progname, const char *protoname,
61 unsigned short instance, uid_t uid, gid_t gid),
62 (progname, protoname, instance, uid, gid));
63 DEFINE_KOOH(zlog_fini, (), ());
64 DEFINE_HOOK(zlog_aux_init, (const char *prefix, int prio_min),
65 (prefix, prio_min));
66
67 char zlog_prefix[128];
68 size_t zlog_prefixsz;
69 int zlog_tmpdirfd = -1;
70
71 static atomic_bool zlog_ec = true, zlog_xid = true;
72
73 /* these are kept around because logging is initialized (and directories
74 * & files created) before zprivs code switches to the FRR user; therefore
75 * we need to chown() things so we don't get permission errors later when
76 * trying to delete things on shutdown
77 */
78 static uid_t zlog_uid = -1;
79 static gid_t zlog_gid = -1;
80
81 DECLARE_ATOMLIST(zlog_targets, struct zlog_target, head);
82 static struct zlog_targets_head zlog_targets;
83
84 /* Global setting for buffered vs immediate output. The default is
85 * per-pthread buffering.
86 */
87 static bool default_immediate;
88
89 /* cf. zlog.h for additional comments on this struct.
90 *
91 * Note: you MUST NOT pass the format string + va_list to non-FRR format
92 * string functions (e.g. vsyslog, sd_journal_printv, ...) since FRR uses an
93 * extended prinf() with additional formats (%pI4 and the like).
94 *
95 * Also remember to use va_copy() on args.
96 */
97
98 struct zlog_msg {
99 struct timespec ts;
100 int prio;
101
102 const char *fmt;
103 va_list args;
104 const struct xref_logmsg *xref;
105
106 char *stackbuf;
107 size_t stackbufsz;
108 char *text;
109 size_t textlen;
110
111 /* This is always ISO8601 with sub-second precision 9 here, it's
112 * converted for callers as needed. ts_dot points to the "."
113 * separating sub-seconds. ts_zonetail is "Z" or "+00:00" for the
114 * local time offset.
115 *
116 * Valid if ZLOG_TS_ISO8601 is set.
117 * (0 if timestamp has not been formatted yet)
118 */
119 uint32_t ts_flags;
120 char ts_str[32], *ts_dot, ts_zonetail[8];
121 };
122
123 /* thread-local log message buffering
124 *
125 * This is strictly optional and set up by calling zlog_tls_buffer_init()
126 * on a particular thread.
127 *
128 * If in use, this will create a temporary file in /var/tmp which is used as
129 * memory-mapped MAP_SHARED log message buffer. The idea there is that buffer
130 * access doesn't require any syscalls, but in case of a crash the kernel
131 * knows to sync the memory back to disk. This way the user can still get the
132 * last log messages if there were any left unwritten in the buffer.
133 *
134 * Sizing this dynamically isn't particularly useful, so here's an 8k buffer
135 * with a message limit of 64 messages. Message metadata (e.g. priority,
136 * timestamp) aren't in the mmap region, so they're lost on crash, but we can
137 * live with that.
138 */
139
140 #if defined(HAVE_OPENAT) && defined(HAVE_UNLINKAT)
141 #define CAN_DO_TLS 1
142 #endif
143
144 #define TLS_LOG_BUF_SIZE 8192
145 #define TLS_LOG_MAXMSG 64
146
147 struct zlog_tls {
148 char *mmbuf;
149 size_t bufpos;
150 bool do_unlink;
151
152 size_t nmsgs;
153 struct zlog_msg msgs[TLS_LOG_MAXMSG];
154 struct zlog_msg *msgp[TLS_LOG_MAXMSG];
155 };
156
157 static inline void zlog_tls_free(void *arg);
158
159 /* proper ELF TLS is a bit faster than pthread_[gs]etspecific, so if it's
160 * available we'll use it here
161 */
162
163 #ifdef __OpenBSD__
164 static pthread_key_t zlog_tls_key;
165
166 static void zlog_tls_key_init(void) __attribute__((_CONSTRUCTOR(500)));
167 static void zlog_tls_key_init(void)
168 {
169 pthread_key_create(&zlog_tls_key, zlog_tls_free);
170 }
171
172 static void zlog_tls_key_fini(void) __attribute__((_DESTRUCTOR(500)));
173 static void zlog_tls_key_fini(void)
174 {
175 pthread_key_delete(zlog_tls_key);
176 }
177
178 static inline struct zlog_tls *zlog_tls_get(void)
179 {
180 return pthread_getspecific(zlog_tls_key);
181 }
182
183 static inline void zlog_tls_set(struct zlog_tls *val)
184 {
185 pthread_setspecific(zlog_tls_key, val);
186 }
187 #else
188 # ifndef thread_local
189 # define thread_local __thread
190 # endif
191
192 static thread_local struct zlog_tls *zlog_tls_var
193 __attribute__((tls_model("initial-exec")));
194
195 static inline struct zlog_tls *zlog_tls_get(void)
196 {
197 return zlog_tls_var;
198 }
199
200 static inline void zlog_tls_set(struct zlog_tls *val)
201 {
202 zlog_tls_var = val;
203 }
204 #endif
205
206 #ifdef CAN_DO_TLS
207 static long zlog_gettid(void)
208 {
209 long rv = -1;
210 #ifdef HAVE_PTHREAD_GETTHREADID_NP
211 rv = pthread_getthreadid_np();
212 #elif defined(linux)
213 rv = syscall(__NR_gettid);
214 #elif defined(__NetBSD__)
215 rv = _lwp_self();
216 #elif defined(__FreeBSD__)
217 thr_self(&rv);
218 #elif defined(__DragonFly__)
219 rv = lwp_gettid();
220 #elif defined(__OpenBSD__)
221 rv = getthrid();
222 #elif defined(__sun)
223 rv = pthread_self();
224 #elif defined(__APPLE__)
225 rv = mach_thread_self();
226 mach_port_deallocate(mach_task_self(), rv);
227 #endif
228 return rv;
229 }
230
231 void zlog_tls_buffer_init(void)
232 {
233 struct zlog_tls *zlog_tls;
234 char mmpath[MAXPATHLEN];
235 int mmfd;
236 size_t i;
237
238 zlog_tls = zlog_tls_get();
239
240 if (zlog_tls || zlog_tmpdirfd < 0)
241 return;
242
243 zlog_tls = XCALLOC(MTYPE_LOG_TLSBUF, sizeof(*zlog_tls));
244 for (i = 0; i < array_size(zlog_tls->msgp); i++)
245 zlog_tls->msgp[i] = &zlog_tls->msgs[i];
246
247 snprintfrr(mmpath, sizeof(mmpath), "logbuf.%ld", zlog_gettid());
248
249 mmfd = openat(zlog_tmpdirfd, mmpath,
250 O_RDWR | O_CREAT | O_EXCL | O_CLOEXEC, 0600);
251 if (mmfd < 0) {
252 zlog_err("failed to open thread log buffer \"%s\": %s",
253 mmpath, strerror(errno));
254 goto out_anon;
255 }
256 fchown(mmfd, zlog_uid, zlog_gid);
257
258 #ifdef HAVE_POSIX_FALLOCATE
259 if (posix_fallocate(mmfd, 0, TLS_LOG_BUF_SIZE) != 0)
260 /* note next statement is under above if() */
261 #endif
262 if (ftruncate(mmfd, TLS_LOG_BUF_SIZE) < 0) {
263 zlog_err("failed to allocate thread log buffer \"%s\": %s",
264 mmpath, strerror(errno));
265 goto out_anon_unlink;
266 }
267
268 zlog_tls->mmbuf = mmap(NULL, TLS_LOG_BUF_SIZE, PROT_READ | PROT_WRITE,
269 MAP_SHARED, mmfd, 0);
270 if (zlog_tls->mmbuf == MAP_FAILED) {
271 zlog_err("failed to mmap thread log buffer \"%s\": %s",
272 mmpath, strerror(errno));
273 goto out_anon_unlink;
274 }
275 zlog_tls->do_unlink = true;
276
277 close(mmfd);
278 zlog_tls_set(zlog_tls);
279 return;
280
281 out_anon_unlink:
282 unlinkat(zlog_tmpdirfd, mmpath, 0);
283 close(mmfd);
284 out_anon:
285
286 #ifndef MAP_ANONYMOUS
287 #define MAP_ANONYMOUS MAP_ANON
288 #endif
289 zlog_tls->mmbuf = mmap(NULL, TLS_LOG_BUF_SIZE, PROT_READ | PROT_WRITE,
290 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
291
292 if (!zlog_tls->mmbuf) {
293 zlog_err("failed to anonymous-mmap thread log buffer: %s",
294 strerror(errno));
295 XFREE(MTYPE_LOG_TLSBUF, zlog_tls);
296 zlog_tls_set(NULL);
297 return;
298 }
299
300 zlog_tls_set(zlog_tls);
301 }
302
303 void zlog_tls_buffer_fini(void)
304 {
305 char mmpath[MAXPATHLEN];
306 struct zlog_tls *zlog_tls = zlog_tls_get();
307 bool do_unlink = zlog_tls ? zlog_tls->do_unlink : false;
308
309 zlog_tls_buffer_flush();
310
311 zlog_tls_free(zlog_tls);
312 zlog_tls_set(NULL);
313
314 snprintfrr(mmpath, sizeof(mmpath), "logbuf.%ld", zlog_gettid());
315 if (do_unlink && unlinkat(zlog_tmpdirfd, mmpath, 0))
316 zlog_err("unlink logbuf: %s (%d)", strerror(errno), errno);
317 }
318
319 #else /* !CAN_DO_TLS */
320 void zlog_tls_buffer_init(void)
321 {
322 }
323
324 void zlog_tls_buffer_fini(void)
325 {
326 }
327 #endif
328
329 static inline void zlog_tls_free(void *arg)
330 {
331 struct zlog_tls *zlog_tls = arg;
332
333 if (!zlog_tls)
334 return;
335
336 munmap(zlog_tls->mmbuf, TLS_LOG_BUF_SIZE);
337 XFREE(MTYPE_LOG_TLSBUF, zlog_tls);
338 }
339
340 void zlog_tls_buffer_flush(void)
341 {
342 struct zlog_target *zt;
343 struct zlog_tls *zlog_tls = zlog_tls_get();
344
345 if (!zlog_tls)
346 return;
347 if (!zlog_tls->nmsgs)
348 return;
349
350 rcu_read_lock();
351 frr_each (zlog_targets, &zlog_targets, zt) {
352 if (!zt->logfn)
353 continue;
354
355 zt->logfn(zt, zlog_tls->msgp, zlog_tls->nmsgs);
356 }
357 rcu_read_unlock();
358
359 zlog_tls->bufpos = 0;
360 zlog_tls->nmsgs = 0;
361 }
362
363
364 static void vzlog_notls(const struct xref_logmsg *xref, int prio,
365 const char *fmt, va_list ap)
366 {
367 struct zlog_target *zt;
368 struct zlog_msg stackmsg = {
369 .prio = prio & LOG_PRIMASK,
370 .fmt = fmt,
371 .xref = xref,
372 }, *msg = &stackmsg;
373 char stackbuf[512];
374
375 clock_gettime(CLOCK_REALTIME, &msg->ts);
376 va_copy(msg->args, ap);
377 msg->stackbuf = stackbuf;
378 msg->stackbufsz = sizeof(stackbuf);
379
380 rcu_read_lock();
381 frr_each (zlog_targets, &zlog_targets, zt) {
382 if (prio > zt->prio_min)
383 continue;
384 if (!zt->logfn)
385 continue;
386
387 zt->logfn(zt, &msg, 1);
388 }
389 rcu_read_unlock();
390
391 va_end(msg->args);
392 if (msg->text && msg->text != stackbuf)
393 XFREE(MTYPE_LOG_MESSAGE, msg->text);
394 }
395
396 static void vzlog_tls(struct zlog_tls *zlog_tls, const struct xref_logmsg *xref,
397 int prio, const char *fmt, va_list ap)
398 {
399 struct zlog_target *zt;
400 struct zlog_msg *msg;
401 char *buf;
402 bool ignoremsg = true;
403 bool immediate = default_immediate;
404
405 /* avoid further processing cost if no target wants this message */
406 rcu_read_lock();
407 frr_each (zlog_targets, &zlog_targets, zt) {
408 if (prio > zt->prio_min)
409 continue;
410 ignoremsg = false;
411 break;
412 }
413 rcu_read_unlock();
414
415 if (ignoremsg)
416 return;
417
418 msg = &zlog_tls->msgs[zlog_tls->nmsgs];
419 zlog_tls->nmsgs++;
420 if (zlog_tls->nmsgs == array_size(zlog_tls->msgs))
421 immediate = true;
422
423 memset(msg, 0, sizeof(*msg));
424 clock_gettime(CLOCK_REALTIME, &msg->ts);
425 va_copy(msg->args, ap);
426 msg->stackbuf = buf = zlog_tls->mmbuf + zlog_tls->bufpos;
427 msg->stackbufsz = TLS_LOG_BUF_SIZE - zlog_tls->bufpos - 1;
428 msg->fmt = fmt;
429 msg->prio = prio & LOG_PRIMASK;
430 msg->xref = xref;
431 if (msg->prio < LOG_INFO)
432 immediate = true;
433
434 if (!immediate) {
435 /* messages written later need to take the formatting cost
436 * immediately since we can't hold a reference on varargs
437 */
438 zlog_msg_text(msg, NULL);
439
440 if (msg->text != buf)
441 /* zlog_msg_text called malloc() on us :( */
442 immediate = true;
443 else {
444 zlog_tls->bufpos += msg->textlen + 1;
445 /* write a second \0 to mark current end position
446 * (in case of crash this signals end of unwritten log
447 * messages in mmap'd logbuf file)
448 */
449 zlog_tls->mmbuf[zlog_tls->bufpos] = '\0';
450
451 /* avoid malloc() for next message */
452 if (TLS_LOG_BUF_SIZE - zlog_tls->bufpos < 256)
453 immediate = true;
454 }
455 }
456
457 if (immediate)
458 zlog_tls_buffer_flush();
459
460 va_end(msg->args);
461 if (msg->text && msg->text != buf)
462 XFREE(MTYPE_LOG_MESSAGE, msg->text);
463 }
464
465 void vzlogx(const struct xref_logmsg *xref, int prio,
466 const char *fmt, va_list ap)
467 {
468 struct zlog_tls *zlog_tls = zlog_tls_get();
469
470 #ifdef HAVE_LTTNG
471 va_list copy;
472 va_copy(copy, ap);
473 char *msg = vasprintfrr(MTYPE_LOG_MESSAGE, fmt, copy);
474
475 switch (prio) {
476 case LOG_ERR:
477 frrtracelog(TRACE_ERR, msg);
478 break;
479 case LOG_WARNING:
480 frrtracelog(TRACE_WARNING, msg);
481 break;
482 case LOG_DEBUG:
483 frrtracelog(TRACE_DEBUG, msg);
484 break;
485 case LOG_NOTICE:
486 frrtracelog(TRACE_DEBUG, msg);
487 break;
488 case LOG_INFO:
489 default:
490 frrtracelog(TRACE_INFO, msg);
491 break;
492 }
493
494 va_end(copy);
495 XFREE(MTYPE_LOG_MESSAGE, msg);
496 #endif
497
498 if (zlog_tls)
499 vzlog_tls(zlog_tls, xref, prio, fmt, ap);
500 else
501 vzlog_notls(xref, prio, fmt, ap);
502 }
503
504 void zlog_sigsafe(const char *text, size_t len)
505 {
506 struct zlog_target *zt;
507 const char *end = text + len, *nlpos;
508
509 while (text < end) {
510 nlpos = memchr(text, '\n', end - text);
511 if (!nlpos)
512 nlpos = end;
513
514 frr_each (zlog_targets, &zlog_targets, zt) {
515 if (LOG_CRIT > zt->prio_min)
516 continue;
517 if (!zt->logfn_sigsafe)
518 continue;
519
520 zt->logfn_sigsafe(zt, text, nlpos - text);
521 }
522
523 if (nlpos == end)
524 break;
525 text = nlpos + 1;
526 }
527 }
528
529
530 int zlog_msg_prio(struct zlog_msg *msg)
531 {
532 return msg->prio;
533 }
534
535 const struct xref_logmsg *zlog_msg_xref(struct zlog_msg *msg)
536 {
537 return msg->xref;
538 }
539
540 const char *zlog_msg_text(struct zlog_msg *msg, size_t *textlen)
541 {
542 if (!msg->text) {
543 va_list args;
544 bool do_xid, do_ec;
545 size_t need = 0, hdrlen;
546 struct fbuf fb = {
547 .buf = msg->stackbuf,
548 .pos = msg->stackbuf,
549 .len = msg->stackbufsz,
550 };
551
552 do_ec = atomic_load_explicit(&zlog_ec, memory_order_relaxed);
553 do_xid = atomic_load_explicit(&zlog_xid, memory_order_relaxed);
554
555 if (msg->xref && do_xid && msg->xref->xref.xrefdata->uid[0]) {
556 need += bputch(&fb, '[');
557 need += bputs(&fb, msg->xref->xref.xrefdata->uid);
558 need += bputch(&fb, ']');
559 }
560 if (msg->xref && do_ec && msg->xref->ec)
561 need += bprintfrr(&fb, "[EC %u]", msg->xref->ec);
562 if (need)
563 need += bputch(&fb, ' ');
564
565 hdrlen = need;
566 assert(hdrlen < msg->stackbufsz);
567
568 va_copy(args, msg->args);
569 need += vbprintfrr(&fb, msg->fmt, args);
570 va_end(args);
571
572 msg->textlen = need;
573 need += bputch(&fb, '\0');
574
575 if (need <= msg->stackbufsz)
576 msg->text = msg->stackbuf;
577 else {
578 msg->text = XMALLOC(MTYPE_LOG_MESSAGE, need);
579
580 memcpy(msg->text, msg->stackbuf, hdrlen);
581
582 fb.buf = msg->text;
583 fb.len = need;
584 fb.pos = msg->text + hdrlen;
585
586 va_copy(args, msg->args);
587 vbprintfrr(&fb, msg->fmt, args);
588 va_end(args);
589
590 bputch(&fb, '\0');
591 }
592 }
593 if (textlen)
594 *textlen = msg->textlen;
595 return msg->text;
596 }
597
598 #define ZLOG_TS_FORMAT (ZLOG_TS_ISO8601 | ZLOG_TS_LEGACY)
599 #define ZLOG_TS_FLAGS ~ZLOG_TS_PREC
600
601 size_t zlog_msg_ts(struct zlog_msg *msg, char *out, size_t outsz,
602 uint32_t flags)
603 {
604 size_t len1;
605
606 if (!(flags & ZLOG_TS_FORMAT))
607 return 0;
608
609 if (!(msg->ts_flags & ZLOG_TS_FORMAT) ||
610 ((msg->ts_flags ^ flags) & ZLOG_TS_UTC)) {
611 struct tm tm;
612
613 if (flags & ZLOG_TS_UTC)
614 gmtime_r(&msg->ts.tv_sec, &tm);
615 else
616 localtime_r(&msg->ts.tv_sec, &tm);
617
618 strftime(msg->ts_str, sizeof(msg->ts_str),
619 "%Y-%m-%dT%H:%M:%S", &tm);
620
621 if (flags & ZLOG_TS_UTC) {
622 msg->ts_zonetail[0] = 'Z';
623 msg->ts_zonetail[1] = '\0';
624 } else
625 snprintfrr(msg->ts_zonetail, sizeof(msg->ts_zonetail),
626 "%+03d:%02d",
627 (int)(tm.tm_gmtoff / 3600),
628 (int)(labs(tm.tm_gmtoff) / 60) % 60);
629
630 msg->ts_dot = msg->ts_str + strlen(msg->ts_str);
631 snprintfrr(msg->ts_dot,
632 msg->ts_str + sizeof(msg->ts_str) - msg->ts_dot,
633 ".%09lu", (unsigned long)msg->ts.tv_nsec);
634
635 msg->ts_flags = ZLOG_TS_ISO8601 | (flags & ZLOG_TS_UTC);
636 }
637
638 len1 = flags & ZLOG_TS_PREC;
639 len1 = (msg->ts_dot - msg->ts_str) + (len1 ? len1 + 1 : 0);
640
641 if (len1 > strlen(msg->ts_str))
642 len1 = strlen(msg->ts_str);
643
644 if (flags & ZLOG_TS_LEGACY) {
645 if (len1 + 1 > outsz)
646 return 0;
647
648 /* just swap out the formatting, faster than redoing it */
649 for (char *p = msg->ts_str; p < msg->ts_str + len1; p++) {
650 switch (*p) {
651 case '-':
652 *out++ = '/';
653 break;
654 case 'T':
655 *out++ = ' ';
656 break;
657 default:
658 *out++ = *p;
659 }
660 }
661 *out = '\0';
662 return len1;
663 } else {
664 size_t len2 = strlen(msg->ts_zonetail);
665
666 if (len1 + len2 + 1 > outsz)
667 return 0;
668 memcpy(out, msg->ts_str, len1);
669 memcpy(out + len1, msg->ts_zonetail, len2);
670 out[len1 + len2] = '\0';
671 return len1 + len2;
672 }
673 }
674
675 void zlog_set_prefix_ec(bool enable)
676 {
677 atomic_store_explicit(&zlog_ec, enable, memory_order_relaxed);
678 }
679
680 bool zlog_get_prefix_ec(void)
681 {
682 return atomic_load_explicit(&zlog_ec, memory_order_relaxed);
683 }
684
685 void zlog_set_prefix_xid(bool enable)
686 {
687 atomic_store_explicit(&zlog_xid, enable, memory_order_relaxed);
688 }
689
690 bool zlog_get_prefix_xid(void)
691 {
692 return atomic_load_explicit(&zlog_xid, memory_order_relaxed);
693 }
694
695 /* setup functions */
696
697 struct zlog_target *zlog_target_clone(struct memtype *mt,
698 struct zlog_target *oldzt, size_t size)
699 {
700 struct zlog_target *newzt;
701
702 newzt = XCALLOC(mt, size);
703 if (oldzt) {
704 newzt->prio_min = oldzt->prio_min;
705 newzt->logfn = oldzt->logfn;
706 newzt->logfn_sigsafe = oldzt->logfn_sigsafe;
707 }
708
709 return newzt;
710 }
711
712 struct zlog_target *zlog_target_replace(struct zlog_target *oldzt,
713 struct zlog_target *newzt)
714 {
715 if (newzt)
716 zlog_targets_add_tail(&zlog_targets, newzt);
717 if (oldzt)
718 zlog_targets_del(&zlog_targets, oldzt);
719 return oldzt;
720 }
721
722 /*
723 * Enable or disable 'immediate' output - default is to buffer
724 * each pthread's messages.
725 */
726 void zlog_set_immediate(bool set_p)
727 {
728 default_immediate = set_p;
729 }
730
731 /* common init */
732
733 #define TMPBASEDIR "/var/tmp/frr"
734
735 static char zlog_tmpdir[MAXPATHLEN];
736
737 void zlog_aux_init(const char *prefix, int prio_min)
738 {
739 if (prefix)
740 strlcpy(zlog_prefix, prefix, sizeof(zlog_prefix));
741
742 hook_call(zlog_aux_init, prefix, prio_min);
743 }
744
745 void zlog_init(const char *progname, const char *protoname,
746 unsigned short instance, uid_t uid, gid_t gid)
747 {
748 zlog_uid = uid;
749 zlog_gid = gid;
750
751 if (instance) {
752 snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir),
753 "/var/tmp/frr/%s-%d.%ld",
754 progname, instance, (long)getpid());
755
756 zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix),
757 "%s[%d]: ", protoname, instance);
758 } else {
759 snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir),
760 "/var/tmp/frr/%s.%ld",
761 progname, (long)getpid());
762
763 zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix),
764 "%s: ", protoname);
765 }
766
767 if (mkdir(TMPBASEDIR, 0700) != 0) {
768 if (errno != EEXIST) {
769 zlog_err("failed to mkdir \"%s\": %s",
770 TMPBASEDIR, strerror(errno));
771 goto out_warn;
772 }
773 }
774 chown(TMPBASEDIR, zlog_uid, zlog_gid);
775
776 if (mkdir(zlog_tmpdir, 0700) != 0) {
777 zlog_err("failed to mkdir \"%s\": %s",
778 zlog_tmpdir, strerror(errno));
779 goto out_warn;
780 }
781
782 #ifdef O_PATH
783 zlog_tmpdirfd = open(zlog_tmpdir,
784 O_PATH | O_RDONLY | O_CLOEXEC);
785 #else
786 zlog_tmpdirfd = open(zlog_tmpdir,
787 O_DIRECTORY | O_RDONLY | O_CLOEXEC);
788 #endif
789 if (zlog_tmpdirfd < 0) {
790 zlog_err("failed to open \"%s\": %s",
791 zlog_tmpdir, strerror(errno));
792 goto out_warn;
793 }
794
795 #ifdef AT_EMPTY_PATH
796 fchownat(zlog_tmpdirfd, "", zlog_uid, zlog_gid, AT_EMPTY_PATH);
797 #else
798 chown(zlog_tmpdir, zlog_uid, zlog_gid);
799 #endif
800
801 hook_call(zlog_init, progname, protoname, instance, uid, gid);
802 return;
803
804 out_warn:
805 zlog_err("crashlog and per-thread log buffering unavailable!");
806 hook_call(zlog_init, progname, protoname, instance, uid, gid);
807 }
808
809 void zlog_fini(void)
810 {
811 hook_call(zlog_fini);
812
813 if (zlog_tmpdirfd >= 0) {
814 close(zlog_tmpdirfd);
815 zlog_tmpdirfd = -1;
816
817 if (rmdir(zlog_tmpdir))
818 zlog_err("failed to rmdir \"%s\": %s",
819 zlog_tmpdir, strerror(errno));
820 }
821 }