From: Steven Rostedt (Red Hat) Date: Tue, 5 Mar 2013 04:05:12 +0000 (-0500) Subject: tracing: Only clear trace buffer on module unload if event was traced X-Git-Tag: Ubuntu-snapdragon-4.4.0-1050.54~12690^2~58 X-Git-Url: https://git.proxmox.com/?a=commitdiff_plain;h=575380da8b46969a2c6a7e14a51056a63b30fe2e;p=mirror_ubuntu-artful-kernel.git tracing: Only clear trace buffer on module unload if event was traced Currently, when a module with events is unloaded, the trace buffer is cleared. This is just a safety net in case the module might have some strange callback when its event is outputted. But there's no reason to reset the buffer if the module didn't have any of its events traced. Add a flag to the event "call" structure called WAS_ENABLED and gets set when the event is ever enabled, and this flag never gets cleared. When a module gets unloaded, if any of its events have this flag set, then the trace buffer will get cleared. Signed-off-by: Steven Rostedt --- diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 0b0814d90164..d6964244e567 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -197,6 +197,7 @@ enum { TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, + TRACE_EVENT_FL_WAS_ENABLED_BIT, }; /* @@ -205,12 +206,16 @@ enum { * CAP_ANY - Any user can enable for perf * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file + * WAS_ENABLED - Set and stays set when an event was ever enabled + * (used for module unloading, if a module event is enabled, + * it is best to clear the buffers that used it). */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), + TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), }; struct ftrace_event_call { diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 0f1307a29fcf..9a7dc4bf1171 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -245,6 +245,9 @@ static int ftrace_event_enable_disable(struct ftrace_event_file *file, break; } file->flags |= FTRACE_EVENT_FL_ENABLED; + + /* WAS_ENABLED gets set but never cleared. */ + call->flags |= TRACE_EVENT_FL_WAS_ENABLED; } break; } @@ -1626,12 +1629,13 @@ static void trace_module_remove_events(struct module *mod) { struct ftrace_module_file_ops *file_ops; struct ftrace_event_call *call, *p; - bool found = false; + bool clear_trace = false; down_write(&trace_event_mutex); list_for_each_entry_safe(call, p, &ftrace_events, list) { if (call->mod == mod) { - found = true; + if (call->flags & TRACE_EVENT_FL_WAS_ENABLED) + clear_trace = true; __trace_remove_event_call(call); } } @@ -1648,9 +1652,9 @@ static void trace_module_remove_events(struct module *mod) /* * It is safest to reset the ring buffer if the module being unloaded - * registered any events. + * registered any events that were used. */ - if (found) + if (clear_trace) tracing_reset_current_online_cpus(); up_write(&trace_event_mutex); }