1 // SPDX-License-Identifier: GPL-2.0
2 #define CREATE_TRACE_POINTS
3 #include <trace/events/mmap_lock.h>
6 #include <linux/cgroup.h>
7 #include <linux/memcontrol.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/mutex.h>
10 #include <linux/percpu.h>
11 #include <linux/rcupdate.h>
12 #include <linux/smp.h>
13 #include <linux/trace_events.h>
14 #include <linux/local_lock.h>
16 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking
);
17 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned
);
18 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released
);
23 * Our various events all share the same buffer (because we don't want or need
24 * to allocate a set of buffers *per event type*), so we need to protect against
25 * concurrent _reg() and _unreg() calls, and count how many _reg() calls have
28 static DEFINE_MUTEX(reg_lock
);
29 static int reg_refcount
; /* Protected by reg_lock. */
32 * Size of the buffer for memcg path names. Ignoring stack trace support,
33 * trace_events_hist.c uses MAX_FILTER_STR_VAL for this, so we also use it.
35 #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
38 * How many contexts our trace events might be called in: normal, softirq, irq,
41 #define CONTEXT_COUNT 4
48 static DEFINE_PER_CPU(struct memcg_path
, memcg_paths
) = {
49 .lock
= INIT_LOCAL_LOCK(lock
),
50 .buf_idx
= LOCAL_INIT(0),
53 static char **tmp_bufs
;
55 /* Called with reg_lock held. */
56 static void free_memcg_path_bufs(void)
58 struct memcg_path
*memcg_path
;
60 char **old
= tmp_bufs
;
62 for_each_possible_cpu(cpu
) {
63 memcg_path
= per_cpu_ptr(&memcg_paths
, cpu
);
64 *(old
++) = rcu_dereference_protected(memcg_path
->buf
,
65 lockdep_is_held(®_lock
));
66 rcu_assign_pointer(memcg_path
->buf
, NULL
);
69 /* Wait for inflight memcg_path_buf users to finish. */
73 for_each_possible_cpu(cpu
) {
81 int trace_mmap_lock_reg(void)
86 mutex_lock(®_lock
);
88 /* If the refcount is going 0->1, proceed with allocating buffers. */
92 tmp_bufs
= kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs
),
97 for_each_possible_cpu(cpu
) {
98 new = kmalloc(MEMCG_PATH_BUF_SIZE
* CONTEXT_COUNT
, GFP_KERNEL
);
101 rcu_assign_pointer(per_cpu_ptr(&memcg_paths
, cpu
)->buf
, new);
102 /* Don't need to wait for inflights, they'd have gotten NULL. */
106 mutex_unlock(®_lock
);
110 free_memcg_path_bufs();
112 /* Since we failed, undo the earlier ref increment. */
115 mutex_unlock(®_lock
);
119 void trace_mmap_lock_unreg(void)
121 mutex_lock(®_lock
);
123 /* If the refcount is going 1->0, proceed with freeing buffers. */
127 free_memcg_path_bufs();
130 mutex_unlock(®_lock
);
133 static inline char *get_memcg_path_buf(void)
135 struct memcg_path
*memcg_path
= this_cpu_ptr(&memcg_paths
);
140 buf
= rcu_dereference(memcg_path
->buf
);
145 idx
= local_add_return(MEMCG_PATH_BUF_SIZE
, &memcg_path
->buf_idx
) -
150 static inline void put_memcg_path_buf(void)
152 local_sub(MEMCG_PATH_BUF_SIZE
, &this_cpu_ptr(&memcg_paths
)->buf_idx
);
156 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
158 const char *memcg_path; \
159 local_lock(&memcg_paths.lock); \
160 memcg_path = get_mm_memcg_path(mm); \
161 trace_mmap_lock_##type(mm, \
162 memcg_path != NULL ? memcg_path : "", \
164 if (likely(memcg_path != NULL)) \
165 put_memcg_path_buf(); \
166 local_unlock(&memcg_paths.lock); \
169 #else /* !CONFIG_MEMCG */
171 int trace_mmap_lock_reg(void)
176 void trace_mmap_lock_unreg(void)
180 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
181 trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
183 #endif /* CONFIG_MEMCG */
185 #ifdef CONFIG_TRACING
188 * Write the given mm_struct's memcg path to a percpu buffer, and return a
189 * pointer to it. If the path cannot be determined, or no buffer was available
190 * (because the trace event is being unregistered), NULL is returned.
192 * Note: buffers are allocated per-cpu to avoid locking, so preemption must be
193 * disabled by the caller before calling us, and re-enabled only after the
194 * caller is done with the pointer.
196 * The caller must call put_memcg_path_buf() once the buffer is no longer
197 * needed. This must be done while preemption is still disabled.
199 static const char *get_mm_memcg_path(struct mm_struct
*mm
)
202 struct mem_cgroup
*memcg
= get_mem_cgroup_from_mm(mm
);
206 if (unlikely(memcg
->css
.cgroup
== NULL
))
209 buf
= get_memcg_path_buf();
213 cgroup_path(memcg
->css
.cgroup
, buf
, MEMCG_PATH_BUF_SIZE
);
216 css_put(&memcg
->css
);
221 #endif /* CONFIG_MEMCG */
224 * Trace calls must be in a separate file, as otherwise there's a circular
225 * dependency between linux/mmap_lock.h and trace/events/mmap_lock.h.
228 void __mmap_lock_do_trace_start_locking(struct mm_struct
*mm
, bool write
)
230 TRACE_MMAP_LOCK_EVENT(start_locking
, mm
, write
);
232 EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking
);
234 void __mmap_lock_do_trace_acquire_returned(struct mm_struct
*mm
, bool write
,
237 TRACE_MMAP_LOCK_EVENT(acquire_returned
, mm
, write
, success
);
239 EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned
);
241 void __mmap_lock_do_trace_released(struct mm_struct
*mm
, bool write
)
243 TRACE_MMAP_LOCK_EVENT(released
, mm
, write
);
245 EXPORT_SYMBOL(__mmap_lock_do_trace_released
);
246 #endif /* CONFIG_TRACING */