1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
6 #include "common/errno.h"
7 #include "common/safe_io.h"
8 #include "common/Graylog.h"
9 #include "common/valgrind.h"
11 #include "include/ceph_assert.h"
12 #include "include/compat.h"
13 #include "include/on_exit.h"
17 #include "SubsystemMap.h"
26 #define MAX_LOG_BUF 65536
31 static OnExitManager exit_callbacks
;
33 static void log_on_exit(void *p
)
38 delete (Log
**)p
;// Delete allocated pointer (not Log object, the pointer only!)
41 Log::Log(const SubsystemMap
*s
)
42 : m_indirect_this(nullptr),
44 m_recent(DEFAULT_MAX_RECENT
)
46 m_log_buf
.reserve(MAX_LOG_BUF
);
51 if (m_indirect_this
) {
52 *m_indirect_this
= nullptr;
55 ceph_assert(!is_started());
57 VOID_TEMP_FAILURE_RETRY(::close(m_fd
));
62 void Log::set_coarse_timestamps(bool coarse
) {
63 std::scoped_lock
lock(m_flush_mutex
);
70 void Log::set_flush_on_exit()
72 std::scoped_lock
lock(m_flush_mutex
);
73 // Make sure we flush on shutdown. We do this by deliberately
74 // leaking an indirect pointer to ourselves (on_exit() can't
75 // unregister a callback). This is not racy only becuase we
76 // assume that exit() won't race with ~Log().
77 if (m_indirect_this
== NULL
) {
78 m_indirect_this
= new (Log
*)(this);
79 exit_callbacks
.add_callback(log_on_exit
, m_indirect_this
);
83 void Log::set_max_new(std::size_t n
)
85 std::scoped_lock
lock(m_queue_mutex
);
89 void Log::set_max_recent(std::size_t n
)
91 std::scoped_lock
lock(m_flush_mutex
);
95 void Log::set_log_file(std::string_view fn
)
97 std::scoped_lock
lock(m_flush_mutex
);
101 void Log::set_log_stderr_prefix(std::string_view p
)
103 std::scoped_lock
lock(m_flush_mutex
);
104 m_log_stderr_prefix
= p
;
107 void Log::reopen_log_file()
109 std::scoped_lock
lock(m_flush_mutex
);
113 m_flush_mutex_holder
= pthread_self();
115 VOID_TEMP_FAILURE_RETRY(::close(m_fd
));
116 if (m_log_file
.length()) {
117 m_fd
= ::open(m_log_file
.c_str(), O_CREAT
|O_WRONLY
|O_APPEND
|O_CLOEXEC
, 0644);
118 if (m_fd
>= 0 && (m_uid
|| m_gid
)) {
119 if (::fchown(m_fd
, m_uid
, m_gid
) < 0) {
121 std::cerr
<< "failed to chown " << m_log_file
<< ": " << cpp_strerror(e
)
128 m_flush_mutex_holder
= 0;
131 void Log::chown_log_file(uid_t uid
, gid_t gid
)
133 std::scoped_lock
lock(m_flush_mutex
);
135 int r
= ::fchown(m_fd
, uid
, gid
);
138 std::cerr
<< "failed to chown " << m_log_file
<< ": " << cpp_strerror(r
)
144 void Log::set_syslog_level(int log
, int crash
)
146 std::scoped_lock
lock(m_flush_mutex
);
148 m_syslog_crash
= crash
;
151 void Log::set_stderr_level(int log
, int crash
)
153 std::scoped_lock
lock(m_flush_mutex
);
155 m_stderr_crash
= crash
;
158 void Log::set_graylog_level(int log
, int crash
)
160 std::scoped_lock
lock(m_flush_mutex
);
162 m_graylog_crash
= crash
;
165 void Log::start_graylog()
167 std::scoped_lock
lock(m_flush_mutex
);
168 if (! m_graylog
.get())
169 m_graylog
= std::make_shared
<Graylog
>(m_subs
, "dlog");
173 void Log::stop_graylog()
175 std::scoped_lock
lock(m_flush_mutex
);
179 void Log::submit_entry(Entry
&& e
)
181 std::unique_lock
lock(m_queue_mutex
);
182 m_queue_mutex_holder
= pthread_self();
184 if (unlikely(m_inject_segv
))
185 *(volatile int *)(0) = 0xdead;
187 // wait for flush to catch up
188 while (is_started() &&
189 m_new
.size() > m_max_new
) {
190 if (m_stop
) break; // force addition
191 m_cond_loggers
.wait(lock
);
194 m_new
.emplace_back(std::move(e
));
195 m_cond_flusher
.notify_all();
196 m_queue_mutex_holder
= 0;
201 std::scoped_lock
lock1(m_flush_mutex
);
202 m_flush_mutex_holder
= pthread_self();
205 std::scoped_lock
lock2(m_queue_mutex
);
206 m_queue_mutex_holder
= pthread_self();
207 assert(m_flush
.empty());
209 m_cond_loggers
.notify_all();
210 m_queue_mutex_holder
= 0;
213 _flush(m_flush
, false);
214 m_flush_mutex_holder
= 0;
217 void Log::_log_safe_write(std::string_view sv
)
221 int r
= safe_write(m_fd
, sv
.data(), sv
.size());
222 if (r
!= m_fd_last_error
) {
224 std::cerr
<< "problem writing to " << m_log_file
225 << ": " << cpp_strerror(r
)
231 void Log::_flush_logbuf()
233 if (m_log_buf
.size()) {
234 _log_safe_write(std::string_view(m_log_buf
.data(), m_log_buf
.size()));
239 void Log::_flush(EntryVector
& t
, bool crash
)
243 assert(m_log_buf
.empty());
250 auto prio
= e
.m_prio
;
251 auto stamp
= e
.m_stamp
;
252 auto sub
= e
.m_subsys
;
253 auto thread
= e
.m_thread
;
256 bool should_log
= crash
|| m_subs
->get_log_level(sub
) >= prio
;
257 bool do_fd
= m_fd
>= 0 && should_log
;
258 bool do_syslog
= m_syslog_crash
>= prio
&& should_log
;
259 bool do_stderr
= m_stderr_crash
>= prio
&& should_log
;
260 bool do_graylog2
= m_graylog_crash
>= prio
&& should_log
;
262 if (do_fd
|| do_syslog
|| do_stderr
) {
263 const std::size_t cur
= m_log_buf
.size();
264 std::size_t used
= 0;
265 const std::size_t allocated
= e
.size() + 80;
266 m_log_buf
.resize(cur
+ allocated
);
268 char* const start
= m_log_buf
.data();
269 char* pos
= start
+ cur
;
272 used
+= (std::size_t)snprintf(pos
+ used
, allocated
- used
, "%6ld> ", -(--len
));
274 used
+= (std::size_t)append_time(stamp
, pos
+ used
, allocated
- used
);
275 used
+= (std::size_t)snprintf(pos
+ used
, allocated
- used
, " %lx %2d ", (unsigned long)thread
, prio
);
276 memcpy(pos
+ used
, str
.data(), str
.size());
279 ceph_assert((used
+ 1 /* '\n' */) < allocated
);
282 syslog(LOG_USER
|LOG_INFO
, "%s", pos
);
286 std::cerr
<< m_log_stderr_prefix
<< std::string_view(pos
, used
) << std::endl
;
289 /* now add newline */
293 m_log_buf
.resize(cur
+ used
);
298 if (m_log_buf
.size() > MAX_LOG_BUF
) {
303 if (do_graylog2
&& m_graylog
) {
304 m_graylog
->log_entry(e
);
307 m_recent
.push_back(std::move(e
));
314 void Log::_log_message(const char *s
, bool crash
)
317 size_t len
= strlen(s
);
322 int r
= safe_write(m_fd
, b
.c_str(), b
.size());
324 std::cerr
<< "problem writing to " << m_log_file
<< ": " << cpp_strerror(r
) << std::endl
;
326 if ((crash
? m_syslog_crash
: m_syslog_log
) >= 0) {
327 syslog(LOG_USER
|LOG_INFO
, "%s", s
);
330 if ((crash
? m_stderr_crash
: m_stderr_log
) >= 0) {
331 std::cerr
<< s
<< std::endl
;
335 void Log::dump_recent()
337 std::scoped_lock
lock1(m_flush_mutex
);
338 m_flush_mutex_holder
= pthread_self();
341 std::scoped_lock
lock2(m_queue_mutex
);
342 m_queue_mutex_holder
= pthread_self();
343 assert(m_flush
.empty());
345 m_queue_mutex_holder
= 0;
348 _flush(m_flush
, false);
350 _log_message("--- begin dump of recent events ---", true);
351 std::set
<pthread_t
> recent_pthread_ids
;
354 t
.insert(t
.end(), std::make_move_iterator(m_recent
.begin()), std::make_move_iterator(m_recent
.end()));
356 for (const auto& e
: t
) {
357 recent_pthread_ids
.emplace(e
.m_thread
);
363 _log_message("--- logging levels ---", true);
364 for (const auto& p
: m_subs
->m_subsys
) {
365 snprintf(buf
, sizeof(buf
), " %2d/%2d %s", p
.log_level
, p
.gather_level
, p
.name
);
366 _log_message(buf
, true);
368 sprintf(buf
, " %2d/%2d (syslog threshold)", m_syslog_log
, m_syslog_crash
);
369 _log_message(buf
, true);
370 sprintf(buf
, " %2d/%2d (stderr threshold)", m_stderr_log
, m_stderr_crash
);
371 _log_message(buf
, true);
373 _log_message("--- pthread ID / name mapping for recent threads ---", true);
374 for (const auto pthread_id
: recent_pthread_ids
)
376 char pthread_name
[16] = {0}; //limited by 16B include terminating null byte.
377 ceph_pthread_getname(pthread_id
, pthread_name
, sizeof(pthread_name
));
378 snprintf(buf
, sizeof(buf
), " %lx / %s", pthread_id
, pthread_name
);
379 _log_message(buf
, true);
382 sprintf(buf
, " max_recent %9zu", m_max_recent
);
383 _log_message(buf
, true);
384 sprintf(buf
, " max_new %9zu", m_max_new
);
385 _log_message(buf
, true);
386 sprintf(buf
, " log_file %s", m_log_file
.c_str());
387 _log_message(buf
, true);
389 _log_message("--- end dump of recent events ---", true);
391 assert(m_log_buf
.empty());
393 m_flush_mutex_holder
= 0;
398 ceph_assert(!is_started());
400 std::scoped_lock
lock(m_queue_mutex
);
410 std::scoped_lock
lock(m_queue_mutex
);
412 m_cond_flusher
.notify_one();
413 m_cond_loggers
.notify_all();
423 std::unique_lock
lock(m_queue_mutex
);
424 m_queue_mutex_holder
= pthread_self();
426 if (!m_new
.empty()) {
427 m_queue_mutex_holder
= 0;
431 m_queue_mutex_holder
= pthread_self();
435 m_cond_flusher
.wait(lock
);
437 m_queue_mutex_holder
= 0;
443 bool Log::is_inside_log_lock()
446 pthread_self() == m_queue_mutex_holder
||
447 pthread_self() == m_flush_mutex_holder
;
450 void Log::inject_segv()
452 m_inject_segv
= true;
455 void Log::reset_segv()
457 m_inject_segv
= false;