1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
9 #include "common/errno.h"
10 #include "common/safe_io.h"
11 #include "common/Clock.h"
12 #include "common/Graylog.h"
13 #include "common/valgrind.h"
15 #include "include/assert.h"
16 #include "include/compat.h"
17 #include "include/on_exit.h"
20 #include "SubsystemMap.h"
22 #define DEFAULT_MAX_NEW 100
23 #define DEFAULT_MAX_RECENT 10000
25 #define PREALLOC 1000000
31 static OnExitManager exit_callbacks
;
33 static void log_on_exit(void *p
)
38 delete (Log
**)p
;// Delete allocated pointer (not Log object, the pointer only!)
41 Log::Log(SubsystemMap
*s
)
42 : m_indirect_this(NULL
),
44 m_queue_mutex_holder(0),
45 m_flush_mutex_holder(0),
51 m_syslog_log(-2), m_syslog_crash(-2),
52 m_stderr_log(1), m_stderr_crash(-1),
53 m_graylog_log(-3), m_graylog_crash(-3),
55 m_max_new(DEFAULT_MAX_NEW
),
56 m_max_recent(DEFAULT_MAX_RECENT
),
61 ret
= pthread_mutex_init(&m_flush_mutex
, NULL
);
64 ret
= pthread_mutex_init(&m_queue_mutex
, NULL
);
67 ret
= pthread_cond_init(&m_cond_loggers
, NULL
);
70 ret
= pthread_cond_init(&m_cond_flusher
, NULL
);
73 // kludge for prealloc testing
75 for (int i
=0; i
< PREALLOC
; i
++)
76 m_recent
.enqueue(new Entry
);
81 if (m_indirect_this
) {
82 *m_indirect_this
= NULL
;
85 assert(!is_started());
87 VOID_TEMP_FAILURE_RETRY(::close(m_fd
));
89 pthread_mutex_destroy(&m_queue_mutex
);
90 pthread_mutex_destroy(&m_flush_mutex
);
91 pthread_cond_destroy(&m_cond_loggers
);
92 pthread_cond_destroy(&m_cond_flusher
);
98 void Log::set_flush_on_exit()
100 // Make sure we flush on shutdown. We do this by deliberately
101 // leaking an indirect pointer to ourselves (on_exit() can't
102 // unregister a callback). This is not racy only becuase we
103 // assume that exit() won't race with ~Log().
104 if (m_indirect_this
== NULL
) {
105 m_indirect_this
= new (Log
*)(this);
106 exit_callbacks
.add_callback(log_on_exit
, m_indirect_this
);
110 void Log::set_max_new(int n
)
115 void Log::set_max_recent(int n
)
117 pthread_mutex_lock(&m_flush_mutex
);
118 m_flush_mutex_holder
= pthread_self();
120 m_flush_mutex_holder
= 0;
121 pthread_mutex_unlock(&m_flush_mutex
);
124 void Log::set_log_file(string fn
)
129 void Log::set_log_stderr_prefix(const std::string
& p
)
131 m_log_stderr_prefix
= p
;
134 void Log::reopen_log_file()
136 pthread_mutex_lock(&m_flush_mutex
);
137 m_flush_mutex_holder
= pthread_self();
139 VOID_TEMP_FAILURE_RETRY(::close(m_fd
));
140 if (m_log_file
.length()) {
141 m_fd
= ::open(m_log_file
.c_str(), O_CREAT
|O_WRONLY
|O_APPEND
, 0644);
142 if (m_fd
>= 0 && (m_uid
|| m_gid
)) {
143 int r
= ::fchown(m_fd
, m_uid
, m_gid
);
146 cerr
<< "failed to chown " << m_log_file
<< ": " << cpp_strerror(r
)
153 m_flush_mutex_holder
= 0;
154 pthread_mutex_unlock(&m_flush_mutex
);
157 void Log::chown_log_file(uid_t uid
, gid_t gid
)
159 pthread_mutex_lock(&m_flush_mutex
);
161 int r
= ::fchown(m_fd
, uid
, gid
);
164 cerr
<< "failed to chown " << m_log_file
<< ": " << cpp_strerror(r
)
168 pthread_mutex_unlock(&m_flush_mutex
);
171 void Log::set_syslog_level(int log
, int crash
)
173 pthread_mutex_lock(&m_flush_mutex
);
175 m_syslog_crash
= crash
;
176 pthread_mutex_unlock(&m_flush_mutex
);
179 void Log::set_stderr_level(int log
, int crash
)
181 pthread_mutex_lock(&m_flush_mutex
);
183 m_stderr_crash
= crash
;
184 pthread_mutex_unlock(&m_flush_mutex
);
187 void Log::set_graylog_level(int log
, int crash
)
189 pthread_mutex_lock(&m_flush_mutex
);
191 m_graylog_crash
= crash
;
192 pthread_mutex_unlock(&m_flush_mutex
);
195 void Log::start_graylog()
197 pthread_mutex_lock(&m_flush_mutex
);
198 if (! m_graylog
.get())
199 m_graylog
= std::make_shared
<Graylog
>(m_subs
, "dlog");
200 pthread_mutex_unlock(&m_flush_mutex
);
204 void Log::stop_graylog()
206 pthread_mutex_lock(&m_flush_mutex
);
208 pthread_mutex_unlock(&m_flush_mutex
);
211 void Log::submit_entry(Entry
*e
)
213 pthread_mutex_lock(&m_queue_mutex
);
214 m_queue_mutex_holder
= pthread_self();
217 *(volatile int *)(0) = 0xdead;
219 // wait for flush to catch up
220 while (m_new
.m_len
> m_max_new
)
221 pthread_cond_wait(&m_cond_loggers
, &m_queue_mutex
);
224 pthread_cond_signal(&m_cond_flusher
);
225 m_queue_mutex_holder
= 0;
226 pthread_mutex_unlock(&m_queue_mutex
);
230 Entry
*Log::create_entry(int level
, int subsys
)
233 return new Entry(ceph_clock_now(),
237 // kludge for perf testing
238 Entry
*e
= m_recent
.dequeue();
239 e
->m_stamp
= ceph_clock_now();
240 e
->m_thread
= pthread_self();
242 e
->m_subsys
= subsys
;
247 Entry
*Log::create_entry(int level
, int subsys
, size_t* expected_size
)
250 ANNOTATE_BENIGN_RACE_SIZED(expected_size
, sizeof(*expected_size
),
252 size_t size
= __atomic_load_n(expected_size
, __ATOMIC_RELAXED
);
253 void *ptr
= ::operator new(sizeof(Entry
) + size
);
254 return new(ptr
) Entry(ceph_clock_now(),
255 pthread_self(), level
, subsys
,
256 reinterpret_cast<char*>(ptr
) + sizeof(Entry
), size
, expected_size
);
258 // kludge for perf testing
259 Entry
*e
= m_recent
.dequeue();
260 e
->m_stamp
= ceph_clock_now();
261 e
->m_thread
= pthread_self();
263 e
->m_subsys
= subsys
;
270 pthread_mutex_lock(&m_flush_mutex
);
271 m_flush_mutex_holder
= pthread_self();
272 pthread_mutex_lock(&m_queue_mutex
);
273 m_queue_mutex_holder
= pthread_self();
276 pthread_cond_broadcast(&m_cond_loggers
);
277 m_queue_mutex_holder
= 0;
278 pthread_mutex_unlock(&m_queue_mutex
);
279 _flush(&t
, &m_recent
, false);
282 while (m_recent
.m_len
> m_max_recent
) {
283 m_recent
.dequeue()->destroy();
286 m_flush_mutex_holder
= 0;
287 pthread_mutex_unlock(&m_flush_mutex
);
290 void Log::_flush(EntryQueue
*t
, EntryQueue
*requeue
, bool crash
)
293 while ((e
= t
->dequeue()) != NULL
) {
294 unsigned sub
= e
->m_subsys
;
296 bool should_log
= crash
|| m_subs
->get_log_level(sub
) >= e
->m_prio
;
297 bool do_fd
= m_fd
>= 0 && should_log
;
298 bool do_syslog
= m_syslog_crash
>= e
->m_prio
&& should_log
;
299 bool do_stderr
= m_stderr_crash
>= e
->m_prio
&& should_log
;
300 bool do_graylog2
= m_graylog_crash
>= e
->m_prio
&& should_log
;
303 if (do_fd
|| do_syslog
|| do_stderr
) {
307 size_t buf_size
= 80 + e
->size();
308 bool need_dynamic
= buf_size
>= 0x10000; //avoids >64K buffers
309 //allocation at stack
310 char buf0
[need_dynamic
? 1 : buf_size
];
312 buf
= new char[buf_size
];
318 buflen
+= snprintf(buf
, buf_size
, "%6d> ", -t
->m_len
);
319 buflen
+= e
->m_stamp
.sprintf(buf
+ buflen
, buf_size
-buflen
);
320 buflen
+= snprintf(buf
+ buflen
, buf_size
-buflen
, " %lx %2d ",
321 (unsigned long)e
->m_thread
, e
->m_prio
);
323 buflen
+= e
->snprintf(buf
+ buflen
, buf_size
- buflen
- 1);
324 if (buflen
> buf_size
- 1) { //paranoid check, buf was declared
326 buflen
= buf_size
- 1;
331 syslog(LOG_USER
|LOG_INFO
, "%s", buf
);
335 cerr
<< m_log_stderr_prefix
<< buf
<< std::endl
;
339 int r
= safe_write(m_fd
, buf
, buflen
+1);
340 if (r
!= m_fd_last_error
) {
342 cerr
<< "problem writing to " << m_log_file
343 << ": " << cpp_strerror(r
)
351 if (do_graylog2
&& m_graylog
) {
352 m_graylog
->log_entry(e
);
359 void Log::_log_message(const char *s
, bool crash
)
362 size_t len
= strlen(s
);
367 int r
= safe_write(m_fd
, b
.c_str(), b
.size());
369 cerr
<< "problem writing to " << m_log_file
<< ": " << cpp_strerror(r
) << std::endl
;
371 if ((crash
? m_syslog_crash
: m_syslog_log
) >= 0) {
372 syslog(LOG_USER
|LOG_INFO
, "%s", s
);
375 if ((crash
? m_stderr_crash
: m_stderr_log
) >= 0) {
376 cerr
<< s
<< std::endl
;
380 void Log::dump_recent()
382 pthread_mutex_lock(&m_flush_mutex
);
383 m_flush_mutex_holder
= pthread_self();
385 pthread_mutex_lock(&m_queue_mutex
);
386 m_queue_mutex_holder
= pthread_self();
391 m_queue_mutex_holder
= 0;
392 pthread_mutex_unlock(&m_queue_mutex
);
393 _flush(&t
, &m_recent
, false);
396 _log_message("--- begin dump of recent events ---", true);
397 _flush(&m_recent
, &old
, true);
400 _log_message("--- logging levels ---", true);
401 for (vector
<Subsystem
>::iterator p
= m_subs
->m_subsys
.begin();
402 p
!= m_subs
->m_subsys
.end();
404 snprintf(buf
, sizeof(buf
), " %2d/%2d %s", p
->log_level
, p
->gather_level
, p
->name
.c_str());
405 _log_message(buf
, true);
408 sprintf(buf
, " %2d/%2d (syslog threshold)", m_syslog_log
, m_syslog_crash
);
409 _log_message(buf
, true);
410 sprintf(buf
, " %2d/%2d (stderr threshold)", m_stderr_log
, m_stderr_crash
);
411 _log_message(buf
, true);
412 sprintf(buf
, " max_recent %9d", m_max_recent
);
413 _log_message(buf
, true);
414 sprintf(buf
, " max_new %9d", m_max_new
);
415 _log_message(buf
, true);
416 sprintf(buf
, " log_file %s", m_log_file
.c_str());
417 _log_message(buf
, true);
419 _log_message("--- end dump of recent events ---", true);
421 m_flush_mutex_holder
= 0;
422 pthread_mutex_unlock(&m_flush_mutex
);
427 assert(!is_started());
428 pthread_mutex_lock(&m_queue_mutex
);
430 pthread_mutex_unlock(&m_queue_mutex
);
437 pthread_mutex_lock(&m_queue_mutex
);
439 pthread_cond_signal(&m_cond_flusher
);
440 pthread_cond_broadcast(&m_cond_loggers
);
441 pthread_mutex_unlock(&m_queue_mutex
);
448 pthread_mutex_lock(&m_queue_mutex
);
449 m_queue_mutex_holder
= pthread_self();
451 if (!m_new
.empty()) {
452 m_queue_mutex_holder
= 0;
453 pthread_mutex_unlock(&m_queue_mutex
);
455 pthread_mutex_lock(&m_queue_mutex
);
456 m_queue_mutex_holder
= pthread_self();
460 pthread_cond_wait(&m_cond_flusher
, &m_queue_mutex
);
462 m_queue_mutex_holder
= 0;
463 pthread_mutex_unlock(&m_queue_mutex
);
468 bool Log::is_inside_log_lock()
471 pthread_self() == m_queue_mutex_holder
||
472 pthread_self() == m_flush_mutex_holder
;
475 void Log::inject_segv()
477 m_inject_segv
= true;
480 void Log::reset_segv()
482 m_inject_segv
= false;