]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
4 | #include "Log.h" | |
5 | ||
6 | #include <errno.h> | |
7 | #include <syslog.h> | |
8 | ||
9 | #include "common/errno.h" | |
10 | #include "common/safe_io.h" | |
11 | #include "common/Clock.h" | |
12 | #include "common/Graylog.h" | |
13 | #include "common/valgrind.h" | |
14 | ||
15 | #include "include/assert.h" | |
16 | #include "include/compat.h" | |
17 | #include "include/on_exit.h" | |
18 | ||
19 | #include "Entry.h" | |
20 | #include "SubsystemMap.h" | |
21 | ||
22 | #define DEFAULT_MAX_NEW 100 | |
23 | #define DEFAULT_MAX_RECENT 10000 | |
24 | ||
25 | #define PREALLOC 1000000 | |
26 | ||
27 | ||
28 | namespace ceph { | |
29 | namespace logging { | |
30 | ||
31 | static OnExitManager exit_callbacks; | |
32 | ||
33 | static void log_on_exit(void *p) | |
34 | { | |
35 | Log *l = *(Log **)p; | |
36 | if (l) | |
37 | l->flush(); | |
38 | delete (Log **)p;// Delete allocated pointer (not Log object, the pointer only!) | |
39 | } | |
40 | ||
41 | Log::Log(SubsystemMap *s) | |
42 | : m_indirect_this(NULL), | |
43 | m_subs(s), | |
44 | m_queue_mutex_holder(0), | |
45 | m_flush_mutex_holder(0), | |
46 | m_new(), m_recent(), | |
47 | m_fd(-1), | |
48 | m_uid(0), | |
49 | m_gid(0), | |
50 | m_fd_last_error(0), | |
51 | m_syslog_log(-2), m_syslog_crash(-2), | |
52 | m_stderr_log(1), m_stderr_crash(-1), | |
53 | m_graylog_log(-3), m_graylog_crash(-3), | |
54 | m_stop(false), | |
55 | m_max_new(DEFAULT_MAX_NEW), | |
56 | m_max_recent(DEFAULT_MAX_RECENT), | |
57 | m_inject_segv(false) | |
58 | { | |
59 | int ret; | |
60 | ||
61 | ret = pthread_mutex_init(&m_flush_mutex, NULL); | |
62 | assert(ret == 0); | |
63 | ||
64 | ret = pthread_mutex_init(&m_queue_mutex, NULL); | |
65 | assert(ret == 0); | |
66 | ||
67 | ret = pthread_cond_init(&m_cond_loggers, NULL); | |
68 | assert(ret == 0); | |
69 | ||
70 | ret = pthread_cond_init(&m_cond_flusher, NULL); | |
71 | assert(ret == 0); | |
72 | ||
73 | // kludge for prealloc testing | |
74 | if (false) | |
75 | for (int i=0; i < PREALLOC; i++) | |
76 | m_recent.enqueue(new Entry); | |
77 | } | |
78 | ||
79 | Log::~Log() | |
80 | { | |
81 | if (m_indirect_this) { | |
82 | *m_indirect_this = NULL; | |
83 | } | |
84 | ||
85 | assert(!is_started()); | |
86 | if (m_fd >= 0) | |
87 | VOID_TEMP_FAILURE_RETRY(::close(m_fd)); | |
88 | ||
89 | pthread_mutex_destroy(&m_queue_mutex); | |
90 | pthread_mutex_destroy(&m_flush_mutex); | |
91 | pthread_cond_destroy(&m_cond_loggers); | |
92 | pthread_cond_destroy(&m_cond_flusher); | |
93 | } | |
94 | ||
95 | ||
96 | /// | |
97 | ||
98 | void Log::set_flush_on_exit() | |
99 | { | |
100 | // Make sure we flush on shutdown. We do this by deliberately | |
101 | // leaking an indirect pointer to ourselves (on_exit() can't | |
102 | // unregister a callback). This is not racy only becuase we | |
103 | // assume that exit() won't race with ~Log(). | |
104 | if (m_indirect_this == NULL) { | |
105 | m_indirect_this = new (Log*)(this); | |
106 | exit_callbacks.add_callback(log_on_exit, m_indirect_this); | |
107 | } | |
108 | } | |
109 | ||
110 | void Log::set_max_new(int n) | |
111 | { | |
112 | m_max_new = n; | |
113 | } | |
114 | ||
115 | void Log::set_max_recent(int n) | |
116 | { | |
117 | pthread_mutex_lock(&m_flush_mutex); | |
118 | m_flush_mutex_holder = pthread_self(); | |
119 | m_max_recent = n; | |
120 | m_flush_mutex_holder = 0; | |
121 | pthread_mutex_unlock(&m_flush_mutex); | |
122 | } | |
123 | ||
124 | void Log::set_log_file(string fn) | |
125 | { | |
126 | m_log_file = fn; | |
127 | } | |
128 | ||
b32b8144 FG |
129 | void Log::set_log_stderr_prefix(const std::string& p) |
130 | { | |
131 | m_log_stderr_prefix = p; | |
132 | } | |
133 | ||
7c673cae FG |
134 | void Log::reopen_log_file() |
135 | { | |
136 | pthread_mutex_lock(&m_flush_mutex); | |
137 | m_flush_mutex_holder = pthread_self(); | |
138 | if (m_fd >= 0) | |
139 | VOID_TEMP_FAILURE_RETRY(::close(m_fd)); | |
140 | if (m_log_file.length()) { | |
141 | m_fd = ::open(m_log_file.c_str(), O_CREAT|O_WRONLY|O_APPEND, 0644); | |
142 | if (m_fd >= 0 && (m_uid || m_gid)) { | |
143 | int r = ::fchown(m_fd, m_uid, m_gid); | |
144 | if (r < 0) { | |
145 | r = -errno; | |
146 | cerr << "failed to chown " << m_log_file << ": " << cpp_strerror(r) | |
147 | << std::endl; | |
148 | } | |
149 | } | |
150 | } else { | |
151 | m_fd = -1; | |
152 | } | |
153 | m_flush_mutex_holder = 0; | |
154 | pthread_mutex_unlock(&m_flush_mutex); | |
155 | } | |
156 | ||
157 | void Log::chown_log_file(uid_t uid, gid_t gid) | |
158 | { | |
159 | pthread_mutex_lock(&m_flush_mutex); | |
160 | if (m_fd >= 0) { | |
161 | int r = ::fchown(m_fd, uid, gid); | |
162 | if (r < 0) { | |
163 | r = -errno; | |
164 | cerr << "failed to chown " << m_log_file << ": " << cpp_strerror(r) | |
165 | << std::endl; | |
166 | } | |
167 | } | |
168 | pthread_mutex_unlock(&m_flush_mutex); | |
169 | } | |
170 | ||
171 | void Log::set_syslog_level(int log, int crash) | |
172 | { | |
173 | pthread_mutex_lock(&m_flush_mutex); | |
174 | m_syslog_log = log; | |
175 | m_syslog_crash = crash; | |
176 | pthread_mutex_unlock(&m_flush_mutex); | |
177 | } | |
178 | ||
179 | void Log::set_stderr_level(int log, int crash) | |
180 | { | |
181 | pthread_mutex_lock(&m_flush_mutex); | |
182 | m_stderr_log = log; | |
183 | m_stderr_crash = crash; | |
184 | pthread_mutex_unlock(&m_flush_mutex); | |
185 | } | |
186 | ||
187 | void Log::set_graylog_level(int log, int crash) | |
188 | { | |
189 | pthread_mutex_lock(&m_flush_mutex); | |
190 | m_graylog_log = log; | |
191 | m_graylog_crash = crash; | |
192 | pthread_mutex_unlock(&m_flush_mutex); | |
193 | } | |
194 | ||
195 | void Log::start_graylog() | |
196 | { | |
197 | pthread_mutex_lock(&m_flush_mutex); | |
198 | if (! m_graylog.get()) | |
199 | m_graylog = std::make_shared<Graylog>(m_subs, "dlog"); | |
200 | pthread_mutex_unlock(&m_flush_mutex); | |
201 | } | |
202 | ||
203 | ||
204 | void Log::stop_graylog() | |
205 | { | |
206 | pthread_mutex_lock(&m_flush_mutex); | |
207 | m_graylog.reset(); | |
208 | pthread_mutex_unlock(&m_flush_mutex); | |
209 | } | |
210 | ||
211 | void Log::submit_entry(Entry *e) | |
212 | { | |
213 | pthread_mutex_lock(&m_queue_mutex); | |
214 | m_queue_mutex_holder = pthread_self(); | |
215 | ||
216 | if (m_inject_segv) | |
217 | *(volatile int *)(0) = 0xdead; | |
218 | ||
219 | // wait for flush to catch up | |
220 | while (m_new.m_len > m_max_new) | |
221 | pthread_cond_wait(&m_cond_loggers, &m_queue_mutex); | |
222 | ||
223 | m_new.enqueue(e); | |
224 | pthread_cond_signal(&m_cond_flusher); | |
225 | m_queue_mutex_holder = 0; | |
226 | pthread_mutex_unlock(&m_queue_mutex); | |
227 | } | |
228 | ||
229 | ||
230 | Entry *Log::create_entry(int level, int subsys) | |
231 | { | |
232 | if (true) { | |
233 | return new Entry(ceph_clock_now(), | |
234 | pthread_self(), | |
235 | level, subsys); | |
236 | } else { | |
237 | // kludge for perf testing | |
238 | Entry *e = m_recent.dequeue(); | |
239 | e->m_stamp = ceph_clock_now(); | |
240 | e->m_thread = pthread_self(); | |
241 | e->m_prio = level; | |
242 | e->m_subsys = subsys; | |
243 | return e; | |
244 | } | |
245 | } | |
246 | ||
247 | Entry *Log::create_entry(int level, int subsys, size_t* expected_size) | |
248 | { | |
249 | if (true) { | |
250 | ANNOTATE_BENIGN_RACE_SIZED(expected_size, sizeof(*expected_size), | |
251 | "Log hint"); | |
252 | size_t size = __atomic_load_n(expected_size, __ATOMIC_RELAXED); | |
253 | void *ptr = ::operator new(sizeof(Entry) + size); | |
254 | return new(ptr) Entry(ceph_clock_now(), | |
255 | pthread_self(), level, subsys, | |
256 | reinterpret_cast<char*>(ptr) + sizeof(Entry), size, expected_size); | |
257 | } else { | |
258 | // kludge for perf testing | |
259 | Entry *e = m_recent.dequeue(); | |
260 | e->m_stamp = ceph_clock_now(); | |
261 | e->m_thread = pthread_self(); | |
262 | e->m_prio = level; | |
263 | e->m_subsys = subsys; | |
264 | return e; | |
265 | } | |
266 | } | |
267 | ||
268 | void Log::flush() | |
269 | { | |
270 | pthread_mutex_lock(&m_flush_mutex); | |
271 | m_flush_mutex_holder = pthread_self(); | |
272 | pthread_mutex_lock(&m_queue_mutex); | |
273 | m_queue_mutex_holder = pthread_self(); | |
274 | EntryQueue t; | |
275 | t.swap(m_new); | |
276 | pthread_cond_broadcast(&m_cond_loggers); | |
277 | m_queue_mutex_holder = 0; | |
278 | pthread_mutex_unlock(&m_queue_mutex); | |
279 | _flush(&t, &m_recent, false); | |
280 | ||
281 | // trim | |
282 | while (m_recent.m_len > m_max_recent) { | |
283 | delete m_recent.dequeue(); | |
284 | } | |
285 | ||
286 | m_flush_mutex_holder = 0; | |
287 | pthread_mutex_unlock(&m_flush_mutex); | |
288 | } | |
289 | ||
290 | void Log::_flush(EntryQueue *t, EntryQueue *requeue, bool crash) | |
291 | { | |
292 | Entry *e; | |
293 | while ((e = t->dequeue()) != NULL) { | |
294 | unsigned sub = e->m_subsys; | |
295 | ||
296 | bool should_log = crash || m_subs->get_log_level(sub) >= e->m_prio; | |
297 | bool do_fd = m_fd >= 0 && should_log; | |
298 | bool do_syslog = m_syslog_crash >= e->m_prio && should_log; | |
299 | bool do_stderr = m_stderr_crash >= e->m_prio && should_log; | |
300 | bool do_graylog2 = m_graylog_crash >= e->m_prio && should_log; | |
301 | ||
302 | e->hint_size(); | |
303 | if (do_fd || do_syslog || do_stderr) { | |
304 | size_t buflen = 0; | |
305 | ||
306 | char *buf; | |
307 | size_t buf_size = 80 + e->size(); | |
308 | bool need_dynamic = buf_size >= 0x10000; //avoids >64K buffers | |
309 | //allocation at stack | |
310 | char buf0[need_dynamic ? 1 : buf_size]; | |
311 | if (need_dynamic) { | |
312 | buf = new char[buf_size]; | |
313 | } else { | |
314 | buf = buf0; | |
315 | } | |
316 | ||
317 | if (crash) | |
318 | buflen += snprintf(buf, buf_size, "%6d> ", -t->m_len); | |
319 | buflen += e->m_stamp.sprintf(buf + buflen, buf_size-buflen); | |
320 | buflen += snprintf(buf + buflen, buf_size-buflen, " %lx %2d ", | |
321 | (unsigned long)e->m_thread, e->m_prio); | |
322 | ||
323 | buflen += e->snprintf(buf + buflen, buf_size - buflen - 1); | |
324 | if (buflen > buf_size - 1) { //paranoid check, buf was declared | |
325 | //to hold everything | |
326 | buflen = buf_size - 1; | |
327 | buf[buflen] = 0; | |
328 | } | |
329 | ||
330 | if (do_syslog) { | |
331 | syslog(LOG_USER|LOG_INFO, "%s", buf); | |
332 | } | |
333 | ||
334 | if (do_stderr) { | |
b32b8144 | 335 | cerr << m_log_stderr_prefix << buf << std::endl; |
7c673cae FG |
336 | } |
337 | if (do_fd) { | |
338 | buf[buflen] = '\n'; | |
339 | int r = safe_write(m_fd, buf, buflen+1); | |
340 | if (r != m_fd_last_error) { | |
341 | if (r < 0) | |
342 | cerr << "problem writing to " << m_log_file | |
343 | << ": " << cpp_strerror(r) | |
344 | << std::endl; | |
345 | m_fd_last_error = r; | |
346 | } | |
347 | } | |
348 | if (need_dynamic) | |
349 | delete[] buf; | |
350 | } | |
351 | if (do_graylog2 && m_graylog) { | |
352 | m_graylog->log_entry(e); | |
353 | } | |
354 | ||
355 | requeue->enqueue(e); | |
356 | } | |
357 | } | |
358 | ||
359 | void Log::_log_message(const char *s, bool crash) | |
360 | { | |
361 | if (m_fd >= 0) { | |
362 | size_t len = strlen(s); | |
363 | std::string b; | |
364 | b.reserve(len + 1); | |
365 | b.append(s, len); | |
366 | b += '\n'; | |
367 | int r = safe_write(m_fd, b.c_str(), b.size()); | |
368 | if (r < 0) | |
369 | cerr << "problem writing to " << m_log_file << ": " << cpp_strerror(r) << std::endl; | |
370 | } | |
371 | if ((crash ? m_syslog_crash : m_syslog_log) >= 0) { | |
372 | syslog(LOG_USER|LOG_INFO, "%s", s); | |
373 | } | |
374 | ||
375 | if ((crash ? m_stderr_crash : m_stderr_log) >= 0) { | |
376 | cerr << s << std::endl; | |
377 | } | |
378 | } | |
379 | ||
380 | void Log::dump_recent() | |
381 | { | |
382 | pthread_mutex_lock(&m_flush_mutex); | |
383 | m_flush_mutex_holder = pthread_self(); | |
384 | ||
385 | pthread_mutex_lock(&m_queue_mutex); | |
386 | m_queue_mutex_holder = pthread_self(); | |
387 | ||
388 | EntryQueue t; | |
389 | t.swap(m_new); | |
390 | ||
391 | m_queue_mutex_holder = 0; | |
392 | pthread_mutex_unlock(&m_queue_mutex); | |
393 | _flush(&t, &m_recent, false); | |
394 | ||
395 | EntryQueue old; | |
396 | _log_message("--- begin dump of recent events ---", true); | |
397 | _flush(&m_recent, &old, true); | |
398 | ||
399 | char buf[4096]; | |
400 | _log_message("--- logging levels ---", true); | |
401 | for (vector<Subsystem>::iterator p = m_subs->m_subsys.begin(); | |
402 | p != m_subs->m_subsys.end(); | |
403 | ++p) { | |
404 | snprintf(buf, sizeof(buf), " %2d/%2d %s", p->log_level, p->gather_level, p->name.c_str()); | |
405 | _log_message(buf, true); | |
406 | } | |
407 | ||
408 | sprintf(buf, " %2d/%2d (syslog threshold)", m_syslog_log, m_syslog_crash); | |
409 | _log_message(buf, true); | |
410 | sprintf(buf, " %2d/%2d (stderr threshold)", m_stderr_log, m_stderr_crash); | |
411 | _log_message(buf, true); | |
412 | sprintf(buf, " max_recent %9d", m_max_recent); | |
413 | _log_message(buf, true); | |
414 | sprintf(buf, " max_new %9d", m_max_new); | |
415 | _log_message(buf, true); | |
416 | sprintf(buf, " log_file %s", m_log_file.c_str()); | |
417 | _log_message(buf, true); | |
418 | ||
419 | _log_message("--- end dump of recent events ---", true); | |
420 | ||
421 | m_flush_mutex_holder = 0; | |
422 | pthread_mutex_unlock(&m_flush_mutex); | |
423 | } | |
424 | ||
425 | void Log::start() | |
426 | { | |
427 | assert(!is_started()); | |
428 | pthread_mutex_lock(&m_queue_mutex); | |
429 | m_stop = false; | |
430 | pthread_mutex_unlock(&m_queue_mutex); | |
431 | create("log"); | |
432 | } | |
433 | ||
434 | void Log::stop() | |
435 | { | |
436 | assert(is_started()); | |
437 | pthread_mutex_lock(&m_queue_mutex); | |
438 | m_stop = true; | |
439 | pthread_cond_signal(&m_cond_flusher); | |
440 | pthread_cond_broadcast(&m_cond_loggers); | |
441 | pthread_mutex_unlock(&m_queue_mutex); | |
442 | join(); | |
443 | } | |
444 | ||
445 | void *Log::entry() | |
446 | { | |
447 | pthread_mutex_lock(&m_queue_mutex); | |
448 | m_queue_mutex_holder = pthread_self(); | |
449 | while (!m_stop) { | |
450 | if (!m_new.empty()) { | |
451 | m_queue_mutex_holder = 0; | |
452 | pthread_mutex_unlock(&m_queue_mutex); | |
453 | flush(); | |
454 | pthread_mutex_lock(&m_queue_mutex); | |
455 | m_queue_mutex_holder = pthread_self(); | |
456 | continue; | |
457 | } | |
458 | ||
459 | pthread_cond_wait(&m_cond_flusher, &m_queue_mutex); | |
460 | } | |
461 | m_queue_mutex_holder = 0; | |
462 | pthread_mutex_unlock(&m_queue_mutex); | |
463 | flush(); | |
464 | return NULL; | |
465 | } | |
466 | ||
467 | bool Log::is_inside_log_lock() | |
468 | { | |
469 | return | |
470 | pthread_self() == m_queue_mutex_holder || | |
471 | pthread_self() == m_flush_mutex_holder; | |
472 | } | |
473 | ||
474 | void Log::inject_segv() | |
475 | { | |
476 | m_inject_segv = true; | |
477 | } | |
478 | ||
479 | void Log::reset_segv() | |
480 | { | |
481 | m_inject_segv = false; | |
482 | } | |
483 | ||
484 | } // ceph::logging:: | |
485 | } // ceph:: |