1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
8 #define pr_fmt(fmt) "DMA-API: " fmt
10 #include <linux/sched/task_stack.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/sched/task.h>
14 #include <linux/stacktrace.h>
15 #include <linux/spinlock.h>
16 #include <linux/vmalloc.h>
17 #include <linux/debugfs.h>
18 #include <linux/uaccess.h>
19 #include <linux/export.h>
20 #include <linux/device.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/ctype.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <asm/sections.h>
29 #define HASH_SIZE 16384ULL
30 #define HASH_FN_SHIFT 13
31 #define HASH_FN_MASK (HASH_SIZE - 1)
33 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
34 /* If the pool runs out, add this many new entries at once */
35 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
45 MAP_ERR_CHECK_NOT_APPLICABLE
,
50 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54 * @list: node on pre-allocated free_entries list
55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56 * @size: length of the mapping
57 * @type: single, page, sg, coherent
58 * @direction: enum dma_data_direction
59 * @sg_call_ents: 'nents' from dma_map_sg
60 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
61 * @pfn: page frame of the start address
62 * @offset: offset of mapping relative to pfn
63 * @map_err_type: track whether dma_mapping_error() was checked
64 * @stacktrace: support backtraces when a violation is detected
66 struct dma_debug_entry
{
67 struct list_head list
;
77 enum map_err_types map_err_type
;
78 #ifdef CONFIG_STACKTRACE
79 unsigned int stack_len
;
80 unsigned long stack_entries
[DMA_DEBUG_STACKTRACE_ENTRIES
];
82 } ____cacheline_aligned_in_smp
;
84 typedef bool (*match_fn
)(struct dma_debug_entry
*, struct dma_debug_entry
*);
87 struct list_head list
;
91 /* Hash list to save the allocated dma addresses */
92 static struct hash_bucket dma_entry_hash
[HASH_SIZE
];
93 /* List of pre-allocated dma_debug_entry's */
94 static LIST_HEAD(free_entries
);
95 /* Lock for the list above */
96 static DEFINE_SPINLOCK(free_entries_lock
);
98 /* Global disable flag - will be set in case of an error */
99 static bool global_disable __read_mostly
;
101 /* Early initialization disable flag, set at the end of dma_debug_init */
102 static bool dma_debug_initialized __read_mostly
;
104 static inline bool dma_debug_disabled(void)
106 return global_disable
|| !dma_debug_initialized
;
109 /* Global error count */
110 static u32 error_count
;
112 /* Global error show enable*/
113 static u32 show_all_errors __read_mostly
;
114 /* Number of errors to show */
115 static u32 show_num_errors
= 1;
117 static u32 num_free_entries
;
118 static u32 min_free_entries
;
119 static u32 nr_total_entries
;
121 /* number of preallocated entries requested by kernel cmdline */
122 static u32 nr_prealloc_entries
= PREALLOC_DMA_DEBUG_ENTRIES
;
124 /* per-driver filter related state */
126 #define NAME_MAX_LEN 64
128 static char current_driver_name
[NAME_MAX_LEN
] __read_mostly
;
129 static struct device_driver
*current_driver __read_mostly
;
131 static DEFINE_RWLOCK(driver_name_lock
);
133 static const char *const maperr2str
[] = {
134 [MAP_ERR_CHECK_NOT_APPLICABLE
] = "dma map error check not applicable",
135 [MAP_ERR_NOT_CHECKED
] = "dma map error not checked",
136 [MAP_ERR_CHECKED
] = "dma map error checked",
139 static const char *type2name
[] = {
140 [dma_debug_single
] = "single",
141 [dma_debug_sg
] = "scather-gather",
142 [dma_debug_coherent
] = "coherent",
143 [dma_debug_resource
] = "resource",
146 static const char *dir2name
[] = {
147 [DMA_BIDIRECTIONAL
] = "DMA_BIDIRECTIONAL",
148 [DMA_TO_DEVICE
] = "DMA_TO_DEVICE",
149 [DMA_FROM_DEVICE
] = "DMA_FROM_DEVICE",
150 [DMA_NONE
] = "DMA_NONE",
154 * The access to some variables in this macro is racy. We can't use atomic_t
155 * here because all these variables are exported to debugfs. Some of them even
156 * writeable. This is also the reason why a lock won't help much. But anyway,
157 * the races are no big deal. Here is why:
159 * error_count: the addition is racy, but the worst thing that can happen is
160 * that we don't count some errors
161 * show_num_errors: the subtraction is racy. Also no big deal because in
162 * worst case this will result in one warning more in the
163 * system log than the user configured. This variable is
164 * writeable via debugfs.
166 static inline void dump_entry_trace(struct dma_debug_entry
*entry
)
168 #ifdef CONFIG_STACKTRACE
170 pr_warn("Mapped at:\n");
171 stack_trace_print(entry
->stack_entries
, entry
->stack_len
, 0);
176 static bool driver_filter(struct device
*dev
)
178 struct device_driver
*drv
;
182 /* driver filter off */
183 if (likely(!current_driver_name
[0]))
186 /* driver filter on and initialized */
187 if (current_driver
&& dev
&& dev
->driver
== current_driver
)
190 /* driver filter on, but we can't filter on a NULL device... */
194 if (current_driver
|| !current_driver_name
[0])
197 /* driver filter on but not yet initialized */
202 /* lock to protect against change of current_driver_name */
203 read_lock_irqsave(&driver_name_lock
, flags
);
207 strncmp(current_driver_name
, drv
->name
, NAME_MAX_LEN
- 1) == 0) {
208 current_driver
= drv
;
212 read_unlock_irqrestore(&driver_name_lock
, flags
);
217 #define err_printk(dev, entry, format, arg...) do { \
219 if (driver_filter(dev) && \
220 (show_all_errors || show_num_errors > 0)) { \
221 WARN(1, pr_fmt("%s %s: ") format, \
222 dev ? dev_driver_string(dev) : "NULL", \
223 dev ? dev_name(dev) : "NULL", ## arg); \
224 dump_entry_trace(entry); \
226 if (!show_all_errors && show_num_errors > 0) \
227 show_num_errors -= 1; \
231 * Hash related functions
233 * Every DMA-API request is saved into a struct dma_debug_entry. To
234 * have quick access to these structs they are stored into a hash.
236 static int hash_fn(struct dma_debug_entry
*entry
)
239 * Hash function is based on the dma address.
240 * We use bits 20-27 here as the index into the hash
242 return (entry
->dev_addr
>> HASH_FN_SHIFT
) & HASH_FN_MASK
;
246 * Request exclusive access to a hash bucket for a given dma_debug_entry.
248 static struct hash_bucket
*get_hash_bucket(struct dma_debug_entry
*entry
,
249 unsigned long *flags
)
250 __acquires(&dma_entry_hash
[idx
].lock
)
252 int idx
= hash_fn(entry
);
253 unsigned long __flags
;
255 spin_lock_irqsave(&dma_entry_hash
[idx
].lock
, __flags
);
257 return &dma_entry_hash
[idx
];
261 * Give up exclusive access to the hash bucket
263 static void put_hash_bucket(struct hash_bucket
*bucket
,
265 __releases(&bucket
->lock
)
267 spin_unlock_irqrestore(&bucket
->lock
, flags
);
270 static bool exact_match(struct dma_debug_entry
*a
, struct dma_debug_entry
*b
)
272 return ((a
->dev_addr
== b
->dev_addr
) &&
273 (a
->dev
== b
->dev
)) ? true : false;
276 static bool containing_match(struct dma_debug_entry
*a
,
277 struct dma_debug_entry
*b
)
279 if (a
->dev
!= b
->dev
)
282 if ((b
->dev_addr
<= a
->dev_addr
) &&
283 ((b
->dev_addr
+ b
->size
) >= (a
->dev_addr
+ a
->size
)))
290 * Search a given entry in the hash bucket list
292 static struct dma_debug_entry
*__hash_bucket_find(struct hash_bucket
*bucket
,
293 struct dma_debug_entry
*ref
,
296 struct dma_debug_entry
*entry
, *ret
= NULL
;
297 int matches
= 0, match_lvl
, last_lvl
= -1;
299 list_for_each_entry(entry
, &bucket
->list
, list
) {
300 if (!match(ref
, entry
))
304 * Some drivers map the same physical address multiple
305 * times. Without a hardware IOMMU this results in the
306 * same device addresses being put into the dma-debug
307 * hash multiple times too. This can result in false
308 * positives being reported. Therefore we implement a
309 * best-fit algorithm here which returns the entry from
310 * the hash which fits best to the reference value
311 * instead of the first-fit.
315 entry
->size
== ref
->size
? ++match_lvl
: 0;
316 entry
->type
== ref
->type
? ++match_lvl
: 0;
317 entry
->direction
== ref
->direction
? ++match_lvl
: 0;
318 entry
->sg_call_ents
== ref
->sg_call_ents
? ++match_lvl
: 0;
320 if (match_lvl
== 4) {
321 /* perfect-fit - return the result */
323 } else if (match_lvl
> last_lvl
) {
325 * We found an entry that fits better then the
326 * previous one or it is the 1st match.
328 last_lvl
= match_lvl
;
334 * If we have multiple matches but no perfect-fit, just return
337 ret
= (matches
== 1) ? ret
: NULL
;
342 static struct dma_debug_entry
*bucket_find_exact(struct hash_bucket
*bucket
,
343 struct dma_debug_entry
*ref
)
345 return __hash_bucket_find(bucket
, ref
, exact_match
);
348 static struct dma_debug_entry
*bucket_find_contain(struct hash_bucket
**bucket
,
349 struct dma_debug_entry
*ref
,
350 unsigned long *flags
)
353 unsigned int max_range
= dma_get_max_seg_size(ref
->dev
);
354 struct dma_debug_entry
*entry
, index
= *ref
;
355 unsigned int range
= 0;
357 while (range
<= max_range
) {
358 entry
= __hash_bucket_find(*bucket
, ref
, containing_match
);
364 * Nothing found, go back a hash bucket
366 put_hash_bucket(*bucket
, *flags
);
367 range
+= (1 << HASH_FN_SHIFT
);
368 index
.dev_addr
-= (1 << HASH_FN_SHIFT
);
369 *bucket
= get_hash_bucket(&index
, flags
);
376 * Add an entry to a hash bucket
378 static void hash_bucket_add(struct hash_bucket
*bucket
,
379 struct dma_debug_entry
*entry
)
381 list_add_tail(&entry
->list
, &bucket
->list
);
385 * Remove entry from a hash bucket list
387 static void hash_bucket_del(struct dma_debug_entry
*entry
)
389 list_del(&entry
->list
);
392 static unsigned long long phys_addr(struct dma_debug_entry
*entry
)
394 if (entry
->type
== dma_debug_resource
)
395 return __pfn_to_phys(entry
->pfn
) + entry
->offset
;
397 return page_to_phys(pfn_to_page(entry
->pfn
)) + entry
->offset
;
401 * Dump mapping entries for debugging purposes
403 void debug_dma_dump_mappings(struct device
*dev
)
407 for (idx
= 0; idx
< HASH_SIZE
; idx
++) {
408 struct hash_bucket
*bucket
= &dma_entry_hash
[idx
];
409 struct dma_debug_entry
*entry
;
412 spin_lock_irqsave(&bucket
->lock
, flags
);
414 list_for_each_entry(entry
, &bucket
->list
, list
) {
415 if (!dev
|| dev
== entry
->dev
) {
417 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
418 type2name
[entry
->type
], idx
,
419 phys_addr(entry
), entry
->pfn
,
420 entry
->dev_addr
, entry
->size
,
421 dir2name
[entry
->direction
],
422 maperr2str
[entry
->map_err_type
]);
426 spin_unlock_irqrestore(&bucket
->lock
, flags
);
432 * For each mapping (initial cacheline in the case of
433 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
434 * scatterlist, or the cacheline specified in dma_map_single) insert
435 * into this tree using the cacheline as the key. At
436 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
437 * the entry already exists at insertion time add a tag as a reference
438 * count for the overlapping mappings. For now, the overlap tracking
439 * just ensures that 'unmaps' balance 'maps' before marking the
440 * cacheline idle, but we should also be flagging overlaps as an API
443 * Memory usage is mostly constrained by the maximum number of available
444 * dma-debug entries in that we need a free dma_debug_entry before
445 * inserting into the tree. In the case of dma_map_page and
446 * dma_alloc_coherent there is only one dma_debug_entry and one
447 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
448 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
449 * entries into the tree.
451 static RADIX_TREE(dma_active_cacheline
, GFP_NOWAIT
);
452 static DEFINE_SPINLOCK(radix_lock
);
453 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
454 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
455 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
457 static phys_addr_t
to_cacheline_number(struct dma_debug_entry
*entry
)
459 return (entry
->pfn
<< CACHELINE_PER_PAGE_SHIFT
) +
460 (entry
->offset
>> L1_CACHE_SHIFT
);
463 static int active_cacheline_read_overlap(phys_addr_t cln
)
467 for (i
= RADIX_TREE_MAX_TAGS
- 1; i
>= 0; i
--)
468 if (radix_tree_tag_get(&dma_active_cacheline
, cln
, i
))
473 static int active_cacheline_set_overlap(phys_addr_t cln
, int overlap
)
477 if (overlap
> ACTIVE_CACHELINE_MAX_OVERLAP
|| overlap
< 0)
480 for (i
= RADIX_TREE_MAX_TAGS
- 1; i
>= 0; i
--)
481 if (overlap
& 1 << i
)
482 radix_tree_tag_set(&dma_active_cacheline
, cln
, i
);
484 radix_tree_tag_clear(&dma_active_cacheline
, cln
, i
);
489 static void active_cacheline_inc_overlap(phys_addr_t cln
)
491 int overlap
= active_cacheline_read_overlap(cln
);
493 overlap
= active_cacheline_set_overlap(cln
, ++overlap
);
495 /* If we overflowed the overlap counter then we're potentially
496 * leaking dma-mappings.
498 WARN_ONCE(overlap
> ACTIVE_CACHELINE_MAX_OVERLAP
,
499 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
500 ACTIVE_CACHELINE_MAX_OVERLAP
, &cln
);
503 static int active_cacheline_dec_overlap(phys_addr_t cln
)
505 int overlap
= active_cacheline_read_overlap(cln
);
507 return active_cacheline_set_overlap(cln
, --overlap
);
510 static int active_cacheline_insert(struct dma_debug_entry
*entry
)
512 phys_addr_t cln
= to_cacheline_number(entry
);
516 /* If the device is not writing memory then we don't have any
517 * concerns about the cpu consuming stale data. This mitigates
518 * legitimate usages of overlapping mappings.
520 if (entry
->direction
== DMA_TO_DEVICE
)
523 spin_lock_irqsave(&radix_lock
, flags
);
524 rc
= radix_tree_insert(&dma_active_cacheline
, cln
, entry
);
526 active_cacheline_inc_overlap(cln
);
527 spin_unlock_irqrestore(&radix_lock
, flags
);
532 static void active_cacheline_remove(struct dma_debug_entry
*entry
)
534 phys_addr_t cln
= to_cacheline_number(entry
);
537 /* ...mirror the insert case */
538 if (entry
->direction
== DMA_TO_DEVICE
)
541 spin_lock_irqsave(&radix_lock
, flags
);
542 /* since we are counting overlaps the final put of the
543 * cacheline will occur when the overlap count is 0.
544 * active_cacheline_dec_overlap() returns -1 in that case
546 if (active_cacheline_dec_overlap(cln
) < 0)
547 radix_tree_delete(&dma_active_cacheline
, cln
);
548 spin_unlock_irqrestore(&radix_lock
, flags
);
552 * Wrapper function for adding an entry to the hash.
553 * This function takes care of locking itself.
555 static void add_dma_entry(struct dma_debug_entry
*entry
, unsigned long attrs
)
557 struct hash_bucket
*bucket
;
561 bucket
= get_hash_bucket(entry
, &flags
);
562 hash_bucket_add(bucket
, entry
);
563 put_hash_bucket(bucket
, flags
);
565 rc
= active_cacheline_insert(entry
);
567 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
568 global_disable
= true;
569 } else if (rc
== -EEXIST
&& !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
)) {
570 err_printk(entry
->dev
, entry
,
571 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
575 static int dma_debug_create_entries(gfp_t gfp
)
577 struct dma_debug_entry
*entry
;
580 entry
= (void *)get_zeroed_page(gfp
);
584 for (i
= 0; i
< DMA_DEBUG_DYNAMIC_ENTRIES
; i
++)
585 list_add_tail(&entry
[i
].list
, &free_entries
);
587 num_free_entries
+= DMA_DEBUG_DYNAMIC_ENTRIES
;
588 nr_total_entries
+= DMA_DEBUG_DYNAMIC_ENTRIES
;
593 static struct dma_debug_entry
*__dma_entry_alloc(void)
595 struct dma_debug_entry
*entry
;
597 entry
= list_entry(free_entries
.next
, struct dma_debug_entry
, list
);
598 list_del(&entry
->list
);
599 memset(entry
, 0, sizeof(*entry
));
601 num_free_entries
-= 1;
602 if (num_free_entries
< min_free_entries
)
603 min_free_entries
= num_free_entries
;
608 static void __dma_entry_alloc_check_leak(void)
610 u32 tmp
= nr_total_entries
% nr_prealloc_entries
;
612 /* Shout each time we tick over some multiple of the initial pool */
613 if (tmp
< DMA_DEBUG_DYNAMIC_ENTRIES
) {
614 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
616 (nr_total_entries
/ nr_prealloc_entries
));
620 /* struct dma_entry allocator
622 * The next two functions implement the allocator for
623 * struct dma_debug_entries.
625 static struct dma_debug_entry
*dma_entry_alloc(void)
627 struct dma_debug_entry
*entry
;
630 spin_lock_irqsave(&free_entries_lock
, flags
);
631 if (num_free_entries
== 0) {
632 if (dma_debug_create_entries(GFP_ATOMIC
)) {
633 global_disable
= true;
634 spin_unlock_irqrestore(&free_entries_lock
, flags
);
635 pr_err("debugging out of memory - disabling\n");
638 __dma_entry_alloc_check_leak();
641 entry
= __dma_entry_alloc();
643 spin_unlock_irqrestore(&free_entries_lock
, flags
);
645 #ifdef CONFIG_STACKTRACE
646 entry
->stack_len
= stack_trace_save(entry
->stack_entries
,
647 ARRAY_SIZE(entry
->stack_entries
),
653 static void dma_entry_free(struct dma_debug_entry
*entry
)
657 active_cacheline_remove(entry
);
660 * add to beginning of the list - this way the entries are
661 * more likely cache hot when they are reallocated.
663 spin_lock_irqsave(&free_entries_lock
, flags
);
664 list_add(&entry
->list
, &free_entries
);
665 num_free_entries
+= 1;
666 spin_unlock_irqrestore(&free_entries_lock
, flags
);
670 * DMA-API debugging init code
672 * The init code does two things:
673 * 1. Initialize core data structures
674 * 2. Preallocate a given number of dma_debug_entry structs
677 static ssize_t
filter_read(struct file
*file
, char __user
*user_buf
,
678 size_t count
, loff_t
*ppos
)
680 char buf
[NAME_MAX_LEN
+ 1];
684 if (!current_driver_name
[0])
688 * We can't copy to userspace directly because current_driver_name can
689 * only be read under the driver_name_lock with irqs disabled. So
690 * create a temporary copy first.
692 read_lock_irqsave(&driver_name_lock
, flags
);
693 len
= scnprintf(buf
, NAME_MAX_LEN
+ 1, "%s\n", current_driver_name
);
694 read_unlock_irqrestore(&driver_name_lock
, flags
);
696 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
699 static ssize_t
filter_write(struct file
*file
, const char __user
*userbuf
,
700 size_t count
, loff_t
*ppos
)
702 char buf
[NAME_MAX_LEN
];
708 * We can't copy from userspace directly. Access to
709 * current_driver_name is protected with a write_lock with irqs
710 * disabled. Since copy_from_user can fault and may sleep we
711 * need to copy to temporary buffer first
713 len
= min(count
, (size_t)(NAME_MAX_LEN
- 1));
714 if (copy_from_user(buf
, userbuf
, len
))
719 write_lock_irqsave(&driver_name_lock
, flags
);
722 * Now handle the string we got from userspace very carefully.
724 * - only use the first token we got
725 * - token delimiter is everything looking like a space
726 * character (' ', '\n', '\t' ...)
729 if (!isalnum(buf
[0])) {
731 * If the first character userspace gave us is not
732 * alphanumerical then assume the filter should be
735 if (current_driver_name
[0])
736 pr_info("switching off dma-debug driver filter\n");
737 current_driver_name
[0] = 0;
738 current_driver
= NULL
;
743 * Now parse out the first token and use it as the name for the
744 * driver to filter for.
746 for (i
= 0; i
< NAME_MAX_LEN
- 1; ++i
) {
747 current_driver_name
[i
] = buf
[i
];
748 if (isspace(buf
[i
]) || buf
[i
] == ' ' || buf
[i
] == 0)
751 current_driver_name
[i
] = 0;
752 current_driver
= NULL
;
754 pr_info("enable driver filter for driver [%s]\n",
755 current_driver_name
);
758 write_unlock_irqrestore(&driver_name_lock
, flags
);
763 static const struct file_operations filter_fops
= {
765 .write
= filter_write
,
766 .llseek
= default_llseek
,
769 static int dump_show(struct seq_file
*seq
, void *v
)
773 for (idx
= 0; idx
< HASH_SIZE
; idx
++) {
774 struct hash_bucket
*bucket
= &dma_entry_hash
[idx
];
775 struct dma_debug_entry
*entry
;
778 spin_lock_irqsave(&bucket
->lock
, flags
);
779 list_for_each_entry(entry
, &bucket
->list
, list
) {
781 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
782 dev_name(entry
->dev
),
783 dev_driver_string(entry
->dev
),
784 type2name
[entry
->type
], idx
,
785 phys_addr(entry
), entry
->pfn
,
786 entry
->dev_addr
, entry
->size
,
787 dir2name
[entry
->direction
],
788 maperr2str
[entry
->map_err_type
]);
790 spin_unlock_irqrestore(&bucket
->lock
, flags
);
794 DEFINE_SHOW_ATTRIBUTE(dump
);
796 static int __init
dma_debug_fs_init(void)
798 struct dentry
*dentry
= debugfs_create_dir("dma-api", NULL
);
800 debugfs_create_bool("disabled", 0444, dentry
, &global_disable
);
801 debugfs_create_u32("error_count", 0444, dentry
, &error_count
);
802 debugfs_create_u32("all_errors", 0644, dentry
, &show_all_errors
);
803 debugfs_create_u32("num_errors", 0644, dentry
, &show_num_errors
);
804 debugfs_create_u32("num_free_entries", 0444, dentry
, &num_free_entries
);
805 debugfs_create_u32("min_free_entries", 0444, dentry
, &min_free_entries
);
806 debugfs_create_u32("nr_total_entries", 0444, dentry
, &nr_total_entries
);
807 debugfs_create_file("driver_filter", 0644, dentry
, NULL
, &filter_fops
);
808 debugfs_create_file("dump", 0444, dentry
, NULL
, &dump_fops
);
812 core_initcall_sync(dma_debug_fs_init
);
814 static int device_dma_allocations(struct device
*dev
, struct dma_debug_entry
**out_entry
)
816 struct dma_debug_entry
*entry
;
820 for (i
= 0; i
< HASH_SIZE
; ++i
) {
821 spin_lock_irqsave(&dma_entry_hash
[i
].lock
, flags
);
822 list_for_each_entry(entry
, &dma_entry_hash
[i
].list
, list
) {
823 if (entry
->dev
== dev
) {
828 spin_unlock_irqrestore(&dma_entry_hash
[i
].lock
, flags
);
834 static int dma_debug_device_change(struct notifier_block
*nb
, unsigned long action
, void *data
)
836 struct device
*dev
= data
;
837 struct dma_debug_entry
*entry
;
840 if (dma_debug_disabled())
844 case BUS_NOTIFY_UNBOUND_DRIVER
:
845 count
= device_dma_allocations(dev
, &entry
);
848 err_printk(dev
, entry
, "device driver has pending "
849 "DMA allocations while released from device "
851 "One of leaked entries details: "
852 "[device address=0x%016llx] [size=%llu bytes] "
853 "[mapped with %s] [mapped as %s]\n",
854 count
, entry
->dev_addr
, entry
->size
,
855 dir2name
[entry
->direction
], type2name
[entry
->type
]);
864 void dma_debug_add_bus(struct bus_type
*bus
)
866 struct notifier_block
*nb
;
868 if (dma_debug_disabled())
871 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
873 pr_err("dma_debug_add_bus: out of memory\n");
877 nb
->notifier_call
= dma_debug_device_change
;
879 bus_register_notifier(bus
, nb
);
882 static int dma_debug_init(void)
886 /* Do not use dma_debug_initialized here, since we really want to be
887 * called to set dma_debug_initialized
892 for (i
= 0; i
< HASH_SIZE
; ++i
) {
893 INIT_LIST_HEAD(&dma_entry_hash
[i
].list
);
894 spin_lock_init(&dma_entry_hash
[i
].lock
);
897 nr_pages
= DIV_ROUND_UP(nr_prealloc_entries
, DMA_DEBUG_DYNAMIC_ENTRIES
);
898 for (i
= 0; i
< nr_pages
; ++i
)
899 dma_debug_create_entries(GFP_KERNEL
);
900 if (num_free_entries
>= nr_prealloc_entries
) {
901 pr_info("preallocated %d debug entries\n", nr_total_entries
);
902 } else if (num_free_entries
> 0) {
903 pr_warn("%d debug entries requested but only %d allocated\n",
904 nr_prealloc_entries
, nr_total_entries
);
906 pr_err("debugging out of memory error - disabled\n");
907 global_disable
= true;
911 min_free_entries
= num_free_entries
;
913 dma_debug_initialized
= true;
915 pr_info("debugging enabled by kernel config\n");
918 core_initcall(dma_debug_init
);
920 static __init
int dma_debug_cmdline(char *str
)
925 if (strncmp(str
, "off", 3) == 0) {
926 pr_info("debugging disabled on kernel command line\n");
927 global_disable
= true;
933 static __init
int dma_debug_entries_cmdline(char *str
)
937 if (!get_option(&str
, &nr_prealloc_entries
))
938 nr_prealloc_entries
= PREALLOC_DMA_DEBUG_ENTRIES
;
942 __setup("dma_debug=", dma_debug_cmdline
);
943 __setup("dma_debug_entries=", dma_debug_entries_cmdline
);
945 static void check_unmap(struct dma_debug_entry
*ref
)
947 struct dma_debug_entry
*entry
;
948 struct hash_bucket
*bucket
;
951 bucket
= get_hash_bucket(ref
, &flags
);
952 entry
= bucket_find_exact(bucket
, ref
);
955 /* must drop lock before calling dma_mapping_error */
956 put_hash_bucket(bucket
, flags
);
958 if (dma_mapping_error(ref
->dev
, ref
->dev_addr
)) {
959 err_printk(ref
->dev
, NULL
,
960 "device driver tries to free an "
961 "invalid DMA memory address\n");
963 err_printk(ref
->dev
, NULL
,
964 "device driver tries to free DMA "
965 "memory it has not allocated [device "
966 "address=0x%016llx] [size=%llu bytes]\n",
967 ref
->dev_addr
, ref
->size
);
972 if (ref
->size
!= entry
->size
) {
973 err_printk(ref
->dev
, entry
, "device driver frees "
974 "DMA memory with different size "
975 "[device address=0x%016llx] [map size=%llu bytes] "
976 "[unmap size=%llu bytes]\n",
977 ref
->dev_addr
, entry
->size
, ref
->size
);
980 if (ref
->type
!= entry
->type
) {
981 err_printk(ref
->dev
, entry
, "device driver frees "
982 "DMA memory with wrong function "
983 "[device address=0x%016llx] [size=%llu bytes] "
984 "[mapped as %s] [unmapped as %s]\n",
985 ref
->dev_addr
, ref
->size
,
986 type2name
[entry
->type
], type2name
[ref
->type
]);
987 } else if ((entry
->type
== dma_debug_coherent
) &&
988 (phys_addr(ref
) != phys_addr(entry
))) {
989 err_printk(ref
->dev
, entry
, "device driver frees "
990 "DMA memory with different CPU address "
991 "[device address=0x%016llx] [size=%llu bytes] "
992 "[cpu alloc address=0x%016llx] "
993 "[cpu free address=0x%016llx]",
994 ref
->dev_addr
, ref
->size
,
999 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
1000 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
1001 err_printk(ref
->dev
, entry
, "device driver frees "
1002 "DMA sg list with different entry count "
1003 "[map count=%d] [unmap count=%d]\n",
1004 entry
->sg_call_ents
, ref
->sg_call_ents
);
1008 * This may be no bug in reality - but most implementations of the
1009 * DMA API don't handle this properly, so check for it here
1011 if (ref
->direction
!= entry
->direction
) {
1012 err_printk(ref
->dev
, entry
, "device driver frees "
1013 "DMA memory with different direction "
1014 "[device address=0x%016llx] [size=%llu bytes] "
1015 "[mapped with %s] [unmapped with %s]\n",
1016 ref
->dev_addr
, ref
->size
,
1017 dir2name
[entry
->direction
],
1018 dir2name
[ref
->direction
]);
1022 * Drivers should use dma_mapping_error() to check the returned
1023 * addresses of dma_map_single() and dma_map_page().
1024 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1026 if (entry
->map_err_type
== MAP_ERR_NOT_CHECKED
) {
1027 err_printk(ref
->dev
, entry
,
1028 "device driver failed to check map error"
1029 "[device address=0x%016llx] [size=%llu bytes] "
1031 ref
->dev_addr
, ref
->size
,
1032 type2name
[entry
->type
]);
1035 hash_bucket_del(entry
);
1036 dma_entry_free(entry
);
1038 put_hash_bucket(bucket
, flags
);
1041 static void check_for_stack(struct device
*dev
,
1042 struct page
*page
, size_t offset
)
1045 struct vm_struct
*stack_vm_area
= task_stack_vm_area(current
);
1047 if (!stack_vm_area
) {
1048 /* Stack is direct-mapped. */
1049 if (PageHighMem(page
))
1051 addr
= page_address(page
) + offset
;
1052 if (object_is_on_stack(addr
))
1053 err_printk(dev
, NULL
, "device driver maps memory from stack [addr=%p]\n", addr
);
1055 /* Stack is vmalloced. */
1058 for (i
= 0; i
< stack_vm_area
->nr_pages
; i
++) {
1059 if (page
!= stack_vm_area
->pages
[i
])
1062 addr
= (u8
*)current
->stack
+ i
* PAGE_SIZE
+ offset
;
1063 err_printk(dev
, NULL
, "device driver maps memory from stack [probable addr=%p]\n", addr
);
1069 static void check_for_illegal_area(struct device
*dev
, void *addr
, unsigned long len
)
1071 if (memory_intersects(_stext
, _etext
, addr
, len
) ||
1072 memory_intersects(__start_rodata
, __end_rodata
, addr
, len
))
1073 err_printk(dev
, NULL
, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr
, len
);
1076 static void check_sync(struct device
*dev
,
1077 struct dma_debug_entry
*ref
,
1080 struct dma_debug_entry
*entry
;
1081 struct hash_bucket
*bucket
;
1082 unsigned long flags
;
1084 bucket
= get_hash_bucket(ref
, &flags
);
1086 entry
= bucket_find_contain(&bucket
, ref
, &flags
);
1089 err_printk(dev
, NULL
, "device driver tries "
1090 "to sync DMA memory it has not allocated "
1091 "[device address=0x%016llx] [size=%llu bytes]\n",
1092 (unsigned long long)ref
->dev_addr
, ref
->size
);
1096 if (ref
->size
> entry
->size
) {
1097 err_printk(dev
, entry
, "device driver syncs"
1098 " DMA memory outside allocated range "
1099 "[device address=0x%016llx] "
1100 "[allocation size=%llu bytes] "
1101 "[sync offset+size=%llu]\n",
1102 entry
->dev_addr
, entry
->size
,
1106 if (entry
->direction
== DMA_BIDIRECTIONAL
)
1109 if (ref
->direction
!= entry
->direction
) {
1110 err_printk(dev
, entry
, "device driver syncs "
1111 "DMA memory with different direction "
1112 "[device address=0x%016llx] [size=%llu bytes] "
1113 "[mapped with %s] [synced with %s]\n",
1114 (unsigned long long)ref
->dev_addr
, entry
->size
,
1115 dir2name
[entry
->direction
],
1116 dir2name
[ref
->direction
]);
1119 if (to_cpu
&& !(entry
->direction
== DMA_FROM_DEVICE
) &&
1120 !(ref
->direction
== DMA_TO_DEVICE
))
1121 err_printk(dev
, entry
, "device driver syncs "
1122 "device read-only DMA memory for cpu "
1123 "[device address=0x%016llx] [size=%llu bytes] "
1124 "[mapped with %s] [synced with %s]\n",
1125 (unsigned long long)ref
->dev_addr
, entry
->size
,
1126 dir2name
[entry
->direction
],
1127 dir2name
[ref
->direction
]);
1129 if (!to_cpu
&& !(entry
->direction
== DMA_TO_DEVICE
) &&
1130 !(ref
->direction
== DMA_FROM_DEVICE
))
1131 err_printk(dev
, entry
, "device driver syncs "
1132 "device write-only DMA memory to device "
1133 "[device address=0x%016llx] [size=%llu bytes] "
1134 "[mapped with %s] [synced with %s]\n",
1135 (unsigned long long)ref
->dev_addr
, entry
->size
,
1136 dir2name
[entry
->direction
],
1137 dir2name
[ref
->direction
]);
1139 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
1140 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
1141 err_printk(ref
->dev
, entry
, "device driver syncs "
1142 "DMA sg list with different entry count "
1143 "[map count=%d] [sync count=%d]\n",
1144 entry
->sg_call_ents
, ref
->sg_call_ents
);
1148 put_hash_bucket(bucket
, flags
);
1151 static void check_sg_segment(struct device
*dev
, struct scatterlist
*sg
)
1153 #ifdef CONFIG_DMA_API_DEBUG_SG
1154 unsigned int max_seg
= dma_get_max_seg_size(dev
);
1155 u64 start
, end
, boundary
= dma_get_seg_boundary(dev
);
1158 * Either the driver forgot to set dma_parms appropriately, or
1159 * whoever generated the list forgot to check them.
1161 if (sg
->length
> max_seg
)
1162 err_printk(dev
, NULL
, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1163 sg
->length
, max_seg
);
1165 * In some cases this could potentially be the DMA API
1166 * implementation's fault, but it would usually imply that
1167 * the scatterlist was built inappropriately to begin with.
1169 start
= sg_dma_address(sg
);
1170 end
= start
+ sg_dma_len(sg
) - 1;
1171 if ((start
^ end
) & ~boundary
)
1172 err_printk(dev
, NULL
, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1173 start
, end
, boundary
);
1177 void debug_dma_map_single(struct device
*dev
, const void *addr
,
1180 if (unlikely(dma_debug_disabled()))
1183 if (!virt_addr_valid(addr
))
1184 err_printk(dev
, NULL
, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1187 if (is_vmalloc_addr(addr
))
1188 err_printk(dev
, NULL
, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1191 EXPORT_SYMBOL(debug_dma_map_single
);
1193 void debug_dma_map_page(struct device
*dev
, struct page
*page
, size_t offset
,
1194 size_t size
, int direction
, dma_addr_t dma_addr
,
1195 unsigned long attrs
)
1197 struct dma_debug_entry
*entry
;
1199 if (unlikely(dma_debug_disabled()))
1202 if (dma_mapping_error(dev
, dma_addr
))
1205 entry
= dma_entry_alloc();
1210 entry
->type
= dma_debug_single
;
1211 entry
->pfn
= page_to_pfn(page
);
1212 entry
->offset
= offset
;
1213 entry
->dev_addr
= dma_addr
;
1215 entry
->direction
= direction
;
1216 entry
->map_err_type
= MAP_ERR_NOT_CHECKED
;
1218 check_for_stack(dev
, page
, offset
);
1220 if (!PageHighMem(page
)) {
1221 void *addr
= page_address(page
) + offset
;
1223 check_for_illegal_area(dev
, addr
, size
);
1226 add_dma_entry(entry
, attrs
);
1229 void debug_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
1231 struct dma_debug_entry ref
;
1232 struct dma_debug_entry
*entry
;
1233 struct hash_bucket
*bucket
;
1234 unsigned long flags
;
1236 if (unlikely(dma_debug_disabled()))
1240 ref
.dev_addr
= dma_addr
;
1241 bucket
= get_hash_bucket(&ref
, &flags
);
1243 list_for_each_entry(entry
, &bucket
->list
, list
) {
1244 if (!exact_match(&ref
, entry
))
1248 * The same physical address can be mapped multiple
1249 * times. Without a hardware IOMMU this results in the
1250 * same device addresses being put into the dma-debug
1251 * hash multiple times too. This can result in false
1252 * positives being reported. Therefore we implement a
1253 * best-fit algorithm here which updates the first entry
1254 * from the hash which fits the reference value and is
1255 * not currently listed as being checked.
1257 if (entry
->map_err_type
== MAP_ERR_NOT_CHECKED
) {
1258 entry
->map_err_type
= MAP_ERR_CHECKED
;
1263 put_hash_bucket(bucket
, flags
);
1265 EXPORT_SYMBOL(debug_dma_mapping_error
);
1267 void debug_dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
1268 size_t size
, int direction
)
1270 struct dma_debug_entry ref
= {
1271 .type
= dma_debug_single
,
1275 .direction
= direction
,
1278 if (unlikely(dma_debug_disabled()))
1283 void debug_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1284 int nents
, int mapped_ents
, int direction
,
1285 unsigned long attrs
)
1287 struct dma_debug_entry
*entry
;
1288 struct scatterlist
*s
;
1291 if (unlikely(dma_debug_disabled()))
1294 for_each_sg(sg
, s
, nents
, i
) {
1295 check_for_stack(dev
, sg_page(s
), s
->offset
);
1296 if (!PageHighMem(sg_page(s
)))
1297 check_for_illegal_area(dev
, sg_virt(s
), s
->length
);
1300 for_each_sg(sg
, s
, mapped_ents
, i
) {
1301 entry
= dma_entry_alloc();
1305 entry
->type
= dma_debug_sg
;
1307 entry
->pfn
= page_to_pfn(sg_page(s
));
1308 entry
->offset
= s
->offset
;
1309 entry
->size
= sg_dma_len(s
);
1310 entry
->dev_addr
= sg_dma_address(s
);
1311 entry
->direction
= direction
;
1312 entry
->sg_call_ents
= nents
;
1313 entry
->sg_mapped_ents
= mapped_ents
;
1315 check_sg_segment(dev
, s
);
1317 add_dma_entry(entry
, attrs
);
1321 static int get_nr_mapped_entries(struct device
*dev
,
1322 struct dma_debug_entry
*ref
)
1324 struct dma_debug_entry
*entry
;
1325 struct hash_bucket
*bucket
;
1326 unsigned long flags
;
1329 bucket
= get_hash_bucket(ref
, &flags
);
1330 entry
= bucket_find_exact(bucket
, ref
);
1334 mapped_ents
= entry
->sg_mapped_ents
;
1335 put_hash_bucket(bucket
, flags
);
1340 void debug_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
1341 int nelems
, int dir
)
1343 struct scatterlist
*s
;
1344 int mapped_ents
= 0, i
;
1346 if (unlikely(dma_debug_disabled()))
1349 for_each_sg(sglist
, s
, nelems
, i
) {
1351 struct dma_debug_entry ref
= {
1352 .type
= dma_debug_sg
,
1354 .pfn
= page_to_pfn(sg_page(s
)),
1355 .offset
= s
->offset
,
1356 .dev_addr
= sg_dma_address(s
),
1357 .size
= sg_dma_len(s
),
1359 .sg_call_ents
= nelems
,
1362 if (mapped_ents
&& i
>= mapped_ents
)
1366 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1372 void debug_dma_alloc_coherent(struct device
*dev
, size_t size
,
1373 dma_addr_t dma_addr
, void *virt
,
1374 unsigned long attrs
)
1376 struct dma_debug_entry
*entry
;
1378 if (unlikely(dma_debug_disabled()))
1381 if (unlikely(virt
== NULL
))
1384 /* handle vmalloc and linear addresses */
1385 if (!is_vmalloc_addr(virt
) && !virt_addr_valid(virt
))
1388 entry
= dma_entry_alloc();
1392 entry
->type
= dma_debug_coherent
;
1394 entry
->offset
= offset_in_page(virt
);
1396 entry
->dev_addr
= dma_addr
;
1397 entry
->direction
= DMA_BIDIRECTIONAL
;
1399 if (is_vmalloc_addr(virt
))
1400 entry
->pfn
= vmalloc_to_pfn(virt
);
1402 entry
->pfn
= page_to_pfn(virt_to_page(virt
));
1404 add_dma_entry(entry
, attrs
);
1407 void debug_dma_free_coherent(struct device
*dev
, size_t size
,
1408 void *virt
, dma_addr_t addr
)
1410 struct dma_debug_entry ref
= {
1411 .type
= dma_debug_coherent
,
1413 .offset
= offset_in_page(virt
),
1416 .direction
= DMA_BIDIRECTIONAL
,
1419 /* handle vmalloc and linear addresses */
1420 if (!is_vmalloc_addr(virt
) && !virt_addr_valid(virt
))
1423 if (is_vmalloc_addr(virt
))
1424 ref
.pfn
= vmalloc_to_pfn(virt
);
1426 ref
.pfn
= page_to_pfn(virt_to_page(virt
));
1428 if (unlikely(dma_debug_disabled()))
1434 void debug_dma_map_resource(struct device
*dev
, phys_addr_t addr
, size_t size
,
1435 int direction
, dma_addr_t dma_addr
,
1436 unsigned long attrs
)
1438 struct dma_debug_entry
*entry
;
1440 if (unlikely(dma_debug_disabled()))
1443 entry
= dma_entry_alloc();
1447 entry
->type
= dma_debug_resource
;
1449 entry
->pfn
= PHYS_PFN(addr
);
1450 entry
->offset
= offset_in_page(addr
);
1452 entry
->dev_addr
= dma_addr
;
1453 entry
->direction
= direction
;
1454 entry
->map_err_type
= MAP_ERR_NOT_CHECKED
;
1456 add_dma_entry(entry
, attrs
);
1459 void debug_dma_unmap_resource(struct device
*dev
, dma_addr_t dma_addr
,
1460 size_t size
, int direction
)
1462 struct dma_debug_entry ref
= {
1463 .type
= dma_debug_resource
,
1465 .dev_addr
= dma_addr
,
1467 .direction
= direction
,
1470 if (unlikely(dma_debug_disabled()))
1476 void debug_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
1477 size_t size
, int direction
)
1479 struct dma_debug_entry ref
;
1481 if (unlikely(dma_debug_disabled()))
1484 ref
.type
= dma_debug_single
;
1486 ref
.dev_addr
= dma_handle
;
1488 ref
.direction
= direction
;
1489 ref
.sg_call_ents
= 0;
1491 check_sync(dev
, &ref
, true);
1494 void debug_dma_sync_single_for_device(struct device
*dev
,
1495 dma_addr_t dma_handle
, size_t size
,
1498 struct dma_debug_entry ref
;
1500 if (unlikely(dma_debug_disabled()))
1503 ref
.type
= dma_debug_single
;
1505 ref
.dev_addr
= dma_handle
;
1507 ref
.direction
= direction
;
1508 ref
.sg_call_ents
= 0;
1510 check_sync(dev
, &ref
, false);
1513 void debug_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1514 int nelems
, int direction
)
1516 struct scatterlist
*s
;
1517 int mapped_ents
= 0, i
;
1519 if (unlikely(dma_debug_disabled()))
1522 for_each_sg(sg
, s
, nelems
, i
) {
1524 struct dma_debug_entry ref
= {
1525 .type
= dma_debug_sg
,
1527 .pfn
= page_to_pfn(sg_page(s
)),
1528 .offset
= s
->offset
,
1529 .dev_addr
= sg_dma_address(s
),
1530 .size
= sg_dma_len(s
),
1531 .direction
= direction
,
1532 .sg_call_ents
= nelems
,
1536 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1538 if (i
>= mapped_ents
)
1541 check_sync(dev
, &ref
, true);
1545 void debug_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1546 int nelems
, int direction
)
1548 struct scatterlist
*s
;
1549 int mapped_ents
= 0, i
;
1551 if (unlikely(dma_debug_disabled()))
1554 for_each_sg(sg
, s
, nelems
, i
) {
1556 struct dma_debug_entry ref
= {
1557 .type
= dma_debug_sg
,
1559 .pfn
= page_to_pfn(sg_page(s
)),
1560 .offset
= s
->offset
,
1561 .dev_addr
= sg_dma_address(s
),
1562 .size
= sg_dma_len(s
),
1563 .direction
= direction
,
1564 .sg_call_ents
= nelems
,
1567 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1569 if (i
>= mapped_ents
)
1572 check_sync(dev
, &ref
, false);
1576 static int __init
dma_debug_driver_setup(char *str
)
1580 for (i
= 0; i
< NAME_MAX_LEN
- 1; ++i
, ++str
) {
1581 current_driver_name
[i
] = *str
;
1586 if (current_driver_name
[0])
1587 pr_info("enable driver filter for driver [%s]\n",
1588 current_driver_name
);
1593 __setup("dma_debug_driver=", dma_debug_driver_setup
);