]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
ftrace: Add information on number of page groups allocated
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Tue, 1 Oct 2019 18:38:07 +0000 (14:38 -0400)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Wed, 13 Nov 2019 14:37:28 +0000 (09:37 -0500)
Looking for ways to shrink the size of the dyn_ftrace structure, knowing the
information about how many pages and the number of groups of those pages, is
useful in working out the best ways to save on memory.

This adds one info print on how many groups of pages were used to allocate
the ftrace dyn_ftrace structures, and also shows the number of pages and
groups in the dyn_ftrace_total_info (which is used for debugging).

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h

index f9456346ec6673097658a675cfb0d867b9f2168e..d2d488c43a6a019ea47fb1189662bef2e788b03e 100644 (file)
@@ -2991,6 +2991,8 @@ static void ftrace_shutdown_sysctl(void)
 
 static u64             ftrace_update_time;
 unsigned long          ftrace_update_tot_cnt;
+unsigned long          ftrace_number_of_pages;
+unsigned long          ftrace_number_of_groups;
 
 static inline int ops_traces_mod(struct ftrace_ops *ops)
 {
@@ -3115,6 +3117,9 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
                goto again;
        }
 
+       ftrace_number_of_pages += 1 << order;
+       ftrace_number_of_groups++;
+
        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
        pg->size = cnt;
 
@@ -3170,6 +3175,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
                start_pg = pg->next;
                kfree(pg);
                pg = start_pg;
+               ftrace_number_of_pages -= 1 << order;
+               ftrace_number_of_groups--;
        }
        pr_info("ftrace: FAILED to allocate memory for functions\n");
        return NULL;
@@ -6173,6 +6180,8 @@ void ftrace_release_mod(struct module *mod)
                free_pages((unsigned long)pg->records, order);
                tmp_page = pg->next;
                kfree(pg);
+               ftrace_number_of_pages -= 1 << order;
+               ftrace_number_of_groups--;
        }
 }
 
@@ -6514,6 +6523,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
                        *last_pg = pg->next;
                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
                        free_pages((unsigned long)pg->records, order);
+                       ftrace_number_of_pages -= 1 << order;
+                       ftrace_number_of_groups--;
                        kfree(pg);
                        pg = container_of(last_pg, struct ftrace_page, next);
                        if (!(*last_pg))
@@ -6569,6 +6580,9 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
+       pr_info("ftrace: allocated %ld pages with %ld groups\n",
+               ftrace_number_of_pages, ftrace_number_of_groups);
+
        set_ftrace_early_filters();
 
        return;
index 6a0ee91783656afc85e9fe6ec37a802f2b7a4dd1..5ea8c7c0f2d77d0eaba80860e45839729cc3e2b0 100644 (file)
@@ -7583,14 +7583,23 @@ static ssize_t
 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
 {
-       unsigned long *p = filp->private_data;
-       char buf[64]; /* Not too big for a shallow stack */
+       ssize_t ret;
+       char *buf;
        int r;
 
-       r = scnprintf(buf, 63, "%ld", *p);
-       buf[r++] = '\n';
+       /* 256 should be plenty to hold the amount needed */
+       buf = kmalloc(256, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
 
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
+                     ftrace_update_tot_cnt,
+                     ftrace_number_of_pages,
+                     ftrace_number_of_groups);
+
+       ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       kfree(buf);
+       return ret;
 }
 
 static const struct file_operations tracing_dyn_info_fops = {
@@ -8782,7 +8791,7 @@ static __init int tracer_init_tracefs(void)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
        trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
-                       &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
+                       NULL, &tracing_dyn_info_fops);
 #endif
 
        create_trace_instances(d_tracer);
index d685c61085c0ddd028ce90db179dc22c8c4630c4..8b590f10bc72b4eae90cafa8dfa94ab9a2161ece 100644 (file)
@@ -804,6 +804,8 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
+extern unsigned long ftrace_number_of_pages;
+extern unsigned long ftrace_number_of_groups;
 void ftrace_init_trace_array(struct trace_array *tr);
 #else
 static inline void ftrace_init_trace_array(struct trace_array *tr) { }