1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
6 #include "common/errno.h"
7 #include "common/safe_io.h"
8 #include "common/Graylog.h"
9 #include "common/valgrind.h"
11 #include "include/ceph_assert.h"
12 #include "include/compat.h"
13 #include "include/on_exit.h"
17 #include "SubsystemMap.h"
25 #define MAX_LOG_BUF 65536
30 static OnExitManager exit_callbacks
;
32 static void log_on_exit(void *p
)
37 delete (Log
**)p
;// Delete allocated pointer (not Log object, the pointer only!)
40 Log::Log(const SubsystemMap
*s
)
41 : m_indirect_this(nullptr),
43 m_recent(DEFAULT_MAX_RECENT
)
45 m_log_buf
.reserve(MAX_LOG_BUF
);
50 if (m_indirect_this
) {
51 *m_indirect_this
= nullptr;
54 ceph_assert(!is_started());
56 VOID_TEMP_FAILURE_RETRY(::close(m_fd
));
61 void Log::set_coarse_timestamps(bool coarse
) {
62 std::scoped_lock
lock(m_flush_mutex
);
69 void Log::set_flush_on_exit()
71 std::scoped_lock
lock(m_flush_mutex
);
72 // Make sure we flush on shutdown. We do this by deliberately
73 // leaking an indirect pointer to ourselves (on_exit() can't
74 // unregister a callback). This is not racy only becuase we
75 // assume that exit() won't race with ~Log().
76 if (m_indirect_this
== NULL
) {
77 m_indirect_this
= new (Log
*)(this);
78 exit_callbacks
.add_callback(log_on_exit
, m_indirect_this
);
82 void Log::set_max_new(std::size_t n
)
84 std::scoped_lock
lock(m_queue_mutex
);
88 void Log::set_max_recent(std::size_t n
)
90 std::scoped_lock
lock(m_flush_mutex
);
94 void Log::set_log_file(std::string_view fn
)
96 std::scoped_lock
lock(m_flush_mutex
);
100 void Log::set_log_stderr_prefix(std::string_view p
)
102 std::scoped_lock
lock(m_flush_mutex
);
103 m_log_stderr_prefix
= p
;
106 void Log::reopen_log_file()
108 std::scoped_lock
lock(m_flush_mutex
);
112 m_flush_mutex_holder
= pthread_self();
114 VOID_TEMP_FAILURE_RETRY(::close(m_fd
));
115 if (m_log_file
.length()) {
116 m_fd
= ::open(m_log_file
.c_str(), O_CREAT
|O_WRONLY
|O_APPEND
|O_CLOEXEC
, 0644);
117 if (m_fd
>= 0 && (m_uid
|| m_gid
)) {
118 if (::fchown(m_fd
, m_uid
, m_gid
) < 0) {
120 std::cerr
<< "failed to chown " << m_log_file
<< ": " << cpp_strerror(e
)
127 m_flush_mutex_holder
= 0;
130 void Log::chown_log_file(uid_t uid
, gid_t gid
)
132 std::scoped_lock
lock(m_flush_mutex
);
134 int r
= ::fchown(m_fd
, uid
, gid
);
137 std::cerr
<< "failed to chown " << m_log_file
<< ": " << cpp_strerror(r
)
143 void Log::set_syslog_level(int log
, int crash
)
145 std::scoped_lock
lock(m_flush_mutex
);
147 m_syslog_crash
= crash
;
150 void Log::set_stderr_level(int log
, int crash
)
152 std::scoped_lock
lock(m_flush_mutex
);
154 m_stderr_crash
= crash
;
157 void Log::set_graylog_level(int log
, int crash
)
159 std::scoped_lock
lock(m_flush_mutex
);
161 m_graylog_crash
= crash
;
164 void Log::start_graylog()
166 std::scoped_lock
lock(m_flush_mutex
);
167 if (! m_graylog
.get())
168 m_graylog
= std::make_shared
<Graylog
>(m_subs
, "dlog");
172 void Log::stop_graylog()
174 std::scoped_lock
lock(m_flush_mutex
);
178 void Log::submit_entry(Entry
&& e
)
180 std::unique_lock
lock(m_queue_mutex
);
181 m_queue_mutex_holder
= pthread_self();
183 if (unlikely(m_inject_segv
))
184 *(volatile int *)(0) = 0xdead;
186 // wait for flush to catch up
187 while (is_started() &&
188 m_new
.size() > m_max_new
) {
189 if (m_stop
) break; // force addition
190 m_cond_loggers
.wait(lock
);
193 m_new
.emplace_back(std::move(e
));
194 m_cond_flusher
.notify_all();
195 m_queue_mutex_holder
= 0;
200 std::scoped_lock
lock1(m_flush_mutex
);
201 m_flush_mutex_holder
= pthread_self();
204 std::scoped_lock
lock2(m_queue_mutex
);
205 m_queue_mutex_holder
= pthread_self();
206 assert(m_flush
.empty());
208 m_cond_loggers
.notify_all();
209 m_queue_mutex_holder
= 0;
212 _flush(m_flush
, true, false);
213 m_flush_mutex_holder
= 0;
216 void Log::_log_safe_write(std::string_view sv
)
220 int r
= safe_write(m_fd
, sv
.data(), sv
.size());
221 if (r
!= m_fd_last_error
) {
223 std::cerr
<< "problem writing to " << m_log_file
224 << ": " << cpp_strerror(r
)
230 void Log::_flush_logbuf()
232 if (m_log_buf
.size()) {
233 _log_safe_write(std::string_view(m_log_buf
.data(), m_log_buf
.size()));
238 void Log::_flush(EntryVector
& t
, bool requeue
, bool crash
)
244 if (!requeue
&& t
.empty()) {
248 auto prio
= e
.m_prio
;
249 auto stamp
= e
.m_stamp
;
250 auto sub
= e
.m_subsys
;
251 auto thread
= e
.m_thread
;
254 bool should_log
= crash
|| m_subs
->get_log_level(sub
) >= prio
;
255 bool do_fd
= m_fd
>= 0 && should_log
;
256 bool do_syslog
= m_syslog_crash
>= prio
&& should_log
;
257 bool do_stderr
= m_stderr_crash
>= prio
&& should_log
;
258 bool do_graylog2
= m_graylog_crash
>= prio
&& should_log
;
260 if (do_fd
|| do_syslog
|| do_stderr
) {
261 const std::size_t cur
= m_log_buf
.size();
262 std::size_t used
= 0;
263 const std::size_t allocated
= e
.size() + 80;
264 m_log_buf
.resize(cur
+ allocated
);
266 char* const start
= m_log_buf
.data();
267 char* pos
= start
+ cur
;
270 used
+= (std::size_t)snprintf(pos
+ used
, allocated
- used
, "%6ld> ", -(--len
));
272 used
+= (std::size_t)append_time(stamp
, pos
+ used
, allocated
- used
);
273 used
+= (std::size_t)snprintf(pos
+ used
, allocated
- used
, " %lx %2d ", (unsigned long)thread
, prio
);
274 memcpy(pos
+ used
, str
.data(), str
.size());
277 ceph_assert((used
+ 1 /* '\n' */) < allocated
);
280 syslog(LOG_USER
|LOG_INFO
, "%s", pos
);
284 std::cerr
<< m_log_stderr_prefix
<< std::string_view(pos
, used
) << std::endl
;
287 /* now add newline */
291 m_log_buf
.resize(cur
+ used
);
296 if (m_log_buf
.size() > MAX_LOG_BUF
) {
301 if (do_graylog2
&& m_graylog
) {
302 m_graylog
->log_entry(e
);
306 m_recent
.push_back(std::move(e
));
314 void Log::_log_message(const char *s
, bool crash
)
317 size_t len
= strlen(s
);
322 int r
= safe_write(m_fd
, b
.c_str(), b
.size());
324 std::cerr
<< "problem writing to " << m_log_file
<< ": " << cpp_strerror(r
) << std::endl
;
326 if ((crash
? m_syslog_crash
: m_syslog_log
) >= 0) {
327 syslog(LOG_USER
|LOG_INFO
, "%s", s
);
330 if ((crash
? m_stderr_crash
: m_stderr_log
) >= 0) {
331 std::cerr
<< s
<< std::endl
;
335 void Log::dump_recent()
337 std::scoped_lock
lock1(m_flush_mutex
);
338 m_flush_mutex_holder
= pthread_self();
341 std::scoped_lock
lock2(m_queue_mutex
);
342 m_queue_mutex_holder
= pthread_self();
343 assert(m_flush
.empty());
345 m_queue_mutex_holder
= 0;
348 _flush(m_flush
, true, false);
351 _log_message("--- begin dump of recent events ---", true);
354 t
.insert(t
.end(), std::make_move_iterator(m_recent
.begin()), std::make_move_iterator(m_recent
.end()));
356 _flush(t
, true, true);
360 _log_message("--- logging levels ---", true);
361 for (const auto& p
: m_subs
->m_subsys
) {
362 snprintf(buf
, sizeof(buf
), " %2d/%2d %s", p
.log_level
, p
.gather_level
, p
.name
);
363 _log_message(buf
, true);
366 sprintf(buf
, " %2d/%2d (syslog threshold)", m_syslog_log
, m_syslog_crash
);
367 _log_message(buf
, true);
368 sprintf(buf
, " %2d/%2d (stderr threshold)", m_stderr_log
, m_stderr_crash
);
369 _log_message(buf
, true);
370 sprintf(buf
, " max_recent %9zu", m_max_recent
);
371 _log_message(buf
, true);
372 sprintf(buf
, " max_new %9zu", m_max_new
);
373 _log_message(buf
, true);
374 sprintf(buf
, " log_file %s", m_log_file
.c_str());
375 _log_message(buf
, true);
377 _log_message("--- end dump of recent events ---", true);
381 m_flush_mutex_holder
= 0;
386 ceph_assert(!is_started());
388 std::scoped_lock
lock(m_queue_mutex
);
398 std::scoped_lock
lock(m_queue_mutex
);
400 m_cond_flusher
.notify_one();
401 m_cond_loggers
.notify_all();
411 std::unique_lock
lock(m_queue_mutex
);
412 m_queue_mutex_holder
= pthread_self();
414 if (!m_new
.empty()) {
415 m_queue_mutex_holder
= 0;
419 m_queue_mutex_holder
= pthread_self();
423 m_cond_flusher
.wait(lock
);
425 m_queue_mutex_holder
= 0;
431 bool Log::is_inside_log_lock()
434 pthread_self() == m_queue_mutex_holder
||
435 pthread_self() == m_flush_mutex_holder
;
438 void Log::inject_segv()
440 m_inject_segv
= true;
443 void Log::reset_segv()
445 m_inject_segv
= false;