1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM kmem
5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
8 #include <linux/types.h>
9 #include <linux/tracepoint.h>
10 #include <trace/events/mmflags.h>
12 DECLARE_EVENT_CLASS(kmem_alloc
,
14 TP_PROTO(unsigned long call_site
,
20 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
),
23 __field( unsigned long, call_site
)
24 __field( const void *, ptr
)
25 __field( size_t, bytes_req
)
26 __field( size_t, bytes_alloc
)
27 __field( gfp_t
, gfp_flags
)
31 __entry
->call_site
= call_site
;
33 __entry
->bytes_req
= bytes_req
;
34 __entry
->bytes_alloc
= bytes_alloc
;
35 __entry
->gfp_flags
= gfp_flags
;
38 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
39 (void *)__entry
->call_site
,
43 show_gfp_flags(__entry
->gfp_flags
))
46 DEFINE_EVENT(kmem_alloc
, kmalloc
,
48 TP_PROTO(unsigned long call_site
, const void *ptr
,
49 size_t bytes_req
, size_t bytes_alloc
, gfp_t gfp_flags
),
51 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
)
54 DEFINE_EVENT(kmem_alloc
, kmem_cache_alloc
,
56 TP_PROTO(unsigned long call_site
, const void *ptr
,
57 size_t bytes_req
, size_t bytes_alloc
, gfp_t gfp_flags
),
59 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
)
62 DECLARE_EVENT_CLASS(kmem_alloc_node
,
64 TP_PROTO(unsigned long call_site
,
71 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
, node
),
74 __field( unsigned long, call_site
)
75 __field( const void *, ptr
)
76 __field( size_t, bytes_req
)
77 __field( size_t, bytes_alloc
)
78 __field( gfp_t
, gfp_flags
)
83 __entry
->call_site
= call_site
;
85 __entry
->bytes_req
= bytes_req
;
86 __entry
->bytes_alloc
= bytes_alloc
;
87 __entry
->gfp_flags
= gfp_flags
;
91 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
92 (void *)__entry
->call_site
,
96 show_gfp_flags(__entry
->gfp_flags
),
100 DEFINE_EVENT(kmem_alloc_node
, kmalloc_node
,
102 TP_PROTO(unsigned long call_site
, const void *ptr
,
103 size_t bytes_req
, size_t bytes_alloc
,
104 gfp_t gfp_flags
, int node
),
106 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
, node
)
109 DEFINE_EVENT(kmem_alloc_node
, kmem_cache_alloc_node
,
111 TP_PROTO(unsigned long call_site
, const void *ptr
,
112 size_t bytes_req
, size_t bytes_alloc
,
113 gfp_t gfp_flags
, int node
),
115 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
, node
)
120 TP_PROTO(unsigned long call_site
, const void *ptr
),
122 TP_ARGS(call_site
, ptr
),
125 __field( unsigned long, call_site
)
126 __field( const void *, ptr
)
130 __entry
->call_site
= call_site
;
134 TP_printk("call_site=%pS ptr=%p",
135 (void *)__entry
->call_site
, __entry
->ptr
)
138 TRACE_EVENT(kmem_cache_free
,
140 TP_PROTO(unsigned long call_site
, const void *ptr
, const char *name
),
142 TP_ARGS(call_site
, ptr
, name
),
145 __field( unsigned long, call_site
)
146 __field( const void *, ptr
)
147 __string( name
, name
)
151 __entry
->call_site
= call_site
;
153 __assign_str(name
, name
);
156 TP_printk("call_site=%pS ptr=%p name=%s",
157 (void *)__entry
->call_site
, __entry
->ptr
, __get_str(name
))
160 TRACE_EVENT(mm_page_free
,
162 TP_PROTO(struct page
*page
, unsigned int order
),
164 TP_ARGS(page
, order
),
167 __field( unsigned long, pfn
)
168 __field( unsigned int, order
)
172 __entry
->pfn
= page_to_pfn(page
);
173 __entry
->order
= order
;
176 TP_printk("page=%p pfn=0x%lx order=%d",
177 pfn_to_page(__entry
->pfn
),
182 TRACE_EVENT(mm_page_free_batched
,
184 TP_PROTO(struct page
*page
),
189 __field( unsigned long, pfn
)
193 __entry
->pfn
= page_to_pfn(page
);
196 TP_printk("page=%p pfn=0x%lx order=0",
197 pfn_to_page(__entry
->pfn
),
201 TRACE_EVENT(mm_page_alloc
,
203 TP_PROTO(struct page
*page
, unsigned int order
,
204 gfp_t gfp_flags
, int migratetype
),
206 TP_ARGS(page
, order
, gfp_flags
, migratetype
),
209 __field( unsigned long, pfn
)
210 __field( unsigned int, order
)
211 __field( gfp_t
, gfp_flags
)
212 __field( int, migratetype
)
216 __entry
->pfn
= page
? page_to_pfn(page
) : -1UL;
217 __entry
->order
= order
;
218 __entry
->gfp_flags
= gfp_flags
;
219 __entry
->migratetype
= migratetype
;
222 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
223 __entry
->pfn
!= -1UL ? pfn_to_page(__entry
->pfn
) : NULL
,
224 __entry
->pfn
!= -1UL ? __entry
->pfn
: 0,
226 __entry
->migratetype
,
227 show_gfp_flags(__entry
->gfp_flags
))
230 DECLARE_EVENT_CLASS(mm_page
,
232 TP_PROTO(struct page
*page
, unsigned int order
, int migratetype
),
234 TP_ARGS(page
, order
, migratetype
),
237 __field( unsigned long, pfn
)
238 __field( unsigned int, order
)
239 __field( int, migratetype
)
243 __entry
->pfn
= page
? page_to_pfn(page
) : -1UL;
244 __entry
->order
= order
;
245 __entry
->migratetype
= migratetype
;
248 TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
249 __entry
->pfn
!= -1UL ? pfn_to_page(__entry
->pfn
) : NULL
,
250 __entry
->pfn
!= -1UL ? __entry
->pfn
: 0,
252 __entry
->migratetype
,
256 DEFINE_EVENT(mm_page
, mm_page_alloc_zone_locked
,
258 TP_PROTO(struct page
*page
, unsigned int order
, int migratetype
),
260 TP_ARGS(page
, order
, migratetype
)
263 TRACE_EVENT(mm_page_pcpu_drain
,
265 TP_PROTO(struct page
*page
, unsigned int order
, int migratetype
),
267 TP_ARGS(page
, order
, migratetype
),
270 __field( unsigned long, pfn
)
271 __field( unsigned int, order
)
272 __field( int, migratetype
)
276 __entry
->pfn
= page
? page_to_pfn(page
) : -1UL;
277 __entry
->order
= order
;
278 __entry
->migratetype
= migratetype
;
281 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
282 pfn_to_page(__entry
->pfn
), __entry
->pfn
,
283 __entry
->order
, __entry
->migratetype
)
286 TRACE_EVENT(mm_page_alloc_extfrag
,
288 TP_PROTO(struct page
*page
,
289 int alloc_order
, int fallback_order
,
290 int alloc_migratetype
, int fallback_migratetype
),
293 alloc_order
, fallback_order
,
294 alloc_migratetype
, fallback_migratetype
),
297 __field( unsigned long, pfn
)
298 __field( int, alloc_order
)
299 __field( int, fallback_order
)
300 __field( int, alloc_migratetype
)
301 __field( int, fallback_migratetype
)
302 __field( int, change_ownership
)
306 __entry
->pfn
= page_to_pfn(page
);
307 __entry
->alloc_order
= alloc_order
;
308 __entry
->fallback_order
= fallback_order
;
309 __entry
->alloc_migratetype
= alloc_migratetype
;
310 __entry
->fallback_migratetype
= fallback_migratetype
;
311 __entry
->change_ownership
= (alloc_migratetype
==
312 get_pageblock_migratetype(page
));
315 TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
316 pfn_to_page(__entry
->pfn
),
318 __entry
->alloc_order
,
319 __entry
->fallback_order
,
321 __entry
->alloc_migratetype
,
322 __entry
->fallback_migratetype
,
323 __entry
->fallback_order
< pageblock_order
,
324 __entry
->change_ownership
)
328 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
330 #ifndef __PTR_TO_HASHVAL
331 static unsigned int __maybe_unused
mm_ptr_to_hash(const void *ptr
)
334 unsigned long hashval
;
336 ret
= ptr_to_hashval(ptr
, &hashval
);
340 /* The hashed value is only 32-bit */
341 return (unsigned int)hashval
;
343 #define __PTR_TO_HASHVAL
346 #define TRACE_MM_PAGES \
355 #define EM(a) TRACE_DEFINE_ENUM(a);
356 #define EMe(a) TRACE_DEFINE_ENUM(a);
363 #define EM(a) { a, #a },
364 #define EMe(a) { a, #a }
366 TRACE_EVENT(rss_stat
,
368 TP_PROTO(struct mm_struct
*mm
,
372 TP_ARGS(mm
, member
, count
),
375 __field(unsigned int, mm_id
)
376 __field(unsigned int, curr
)
382 __entry
->mm_id
= mm_ptr_to_hash(mm
);
383 __entry
->curr
= !!(current
->mm
== mm
);
384 __entry
->member
= member
;
385 __entry
->size
= (count
<< PAGE_SHIFT
);
388 TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
391 __print_symbolic(__entry
->member
, TRACE_MM_PAGES
),
394 #endif /* _TRACE_KMEM_H */
396 /* This part must be outside protection */
397 #include <trace/define_trace.h>