2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sched/task.h>
23 #include <linux/stacktrace.h>
24 #include <linux/dma-debug.h>
25 #include <linux/spinlock.h>
26 #include <linux/vmalloc.h>
27 #include <linux/debugfs.h>
28 #include <linux/uaccess.h>
29 #include <linux/export.h>
30 #include <linux/device.h>
31 #include <linux/types.h>
32 #include <linux/sched.h>
33 #include <linux/ctype.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
37 #include <asm/sections.h>
39 #define HASH_SIZE 1024ULL
40 #define HASH_FN_SHIFT 13
41 #define HASH_FN_MASK (HASH_SIZE - 1)
52 MAP_ERR_CHECK_NOT_APPLICABLE
,
57 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
60 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
61 * @list: node on pre-allocated free_entries list
62 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
63 * @type: single, page, sg, coherent
64 * @pfn: page frame of the start address
65 * @offset: offset of mapping relative to pfn
66 * @size: length of the mapping
67 * @direction: enum dma_data_direction
68 * @sg_call_ents: 'nents' from dma_map_sg
69 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
70 * @map_err_type: track whether dma_mapping_error() was checked
71 * @stacktrace: support backtraces when a violation is detected
73 struct dma_debug_entry
{
74 struct list_head list
;
84 enum map_err_types map_err_type
;
85 #ifdef CONFIG_STACKTRACE
86 struct stack_trace stacktrace
;
87 unsigned long st_entries
[DMA_DEBUG_STACKTRACE_ENTRIES
];
91 typedef bool (*match_fn
)(struct dma_debug_entry
*, struct dma_debug_entry
*);
94 struct list_head list
;
96 } ____cacheline_aligned_in_smp
;
98 /* Hash list to save the allocated dma addresses */
99 static struct hash_bucket dma_entry_hash
[HASH_SIZE
];
100 /* List of pre-allocated dma_debug_entry's */
101 static LIST_HEAD(free_entries
);
102 /* Lock for the list above */
103 static DEFINE_SPINLOCK(free_entries_lock
);
105 /* Global disable flag - will be set in case of an error */
106 static bool global_disable __read_mostly
;
108 /* Early initialization disable flag, set at the end of dma_debug_init */
109 static bool dma_debug_initialized __read_mostly
;
111 static inline bool dma_debug_disabled(void)
113 return global_disable
|| !dma_debug_initialized
;
116 /* Global error count */
117 static u32 error_count
;
119 /* Global error show enable*/
120 static u32 show_all_errors __read_mostly
;
121 /* Number of errors to show */
122 static u32 show_num_errors
= 1;
124 static u32 num_free_entries
;
125 static u32 min_free_entries
;
126 static u32 nr_total_entries
;
128 /* number of preallocated entries requested by kernel cmdline */
129 static u32 req_entries
;
131 /* debugfs dentry's for the stuff above */
132 static struct dentry
*dma_debug_dent __read_mostly
;
133 static struct dentry
*global_disable_dent __read_mostly
;
134 static struct dentry
*error_count_dent __read_mostly
;
135 static struct dentry
*show_all_errors_dent __read_mostly
;
136 static struct dentry
*show_num_errors_dent __read_mostly
;
137 static struct dentry
*num_free_entries_dent __read_mostly
;
138 static struct dentry
*min_free_entries_dent __read_mostly
;
139 static struct dentry
*filter_dent __read_mostly
;
141 /* per-driver filter related state */
143 #define NAME_MAX_LEN 64
145 static char current_driver_name
[NAME_MAX_LEN
] __read_mostly
;
146 static struct device_driver
*current_driver __read_mostly
;
148 static DEFINE_RWLOCK(driver_name_lock
);
150 static const char *const maperr2str
[] = {
151 [MAP_ERR_CHECK_NOT_APPLICABLE
] = "dma map error check not applicable",
152 [MAP_ERR_NOT_CHECKED
] = "dma map error not checked",
153 [MAP_ERR_CHECKED
] = "dma map error checked",
156 static const char *type2name
[5] = { "single", "page",
157 "scather-gather", "coherent",
160 static const char *dir2name
[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
161 "DMA_FROM_DEVICE", "DMA_NONE" };
164 * The access to some variables in this macro is racy. We can't use atomic_t
165 * here because all these variables are exported to debugfs. Some of them even
166 * writeable. This is also the reason why a lock won't help much. But anyway,
167 * the races are no big deal. Here is why:
169 * error_count: the addition is racy, but the worst thing that can happen is
170 * that we don't count some errors
171 * show_num_errors: the subtraction is racy. Also no big deal because in
172 * worst case this will result in one warning more in the
173 * system log than the user configured. This variable is
174 * writeable via debugfs.
176 static inline void dump_entry_trace(struct dma_debug_entry
*entry
)
178 #ifdef CONFIG_STACKTRACE
180 pr_warning("Mapped at:\n");
181 print_stack_trace(&entry
->stacktrace
, 0);
186 static bool driver_filter(struct device
*dev
)
188 struct device_driver
*drv
;
192 /* driver filter off */
193 if (likely(!current_driver_name
[0]))
196 /* driver filter on and initialized */
197 if (current_driver
&& dev
&& dev
->driver
== current_driver
)
200 /* driver filter on, but we can't filter on a NULL device... */
204 if (current_driver
|| !current_driver_name
[0])
207 /* driver filter on but not yet initialized */
212 /* lock to protect against change of current_driver_name */
213 read_lock_irqsave(&driver_name_lock
, flags
);
217 strncmp(current_driver_name
, drv
->name
, NAME_MAX_LEN
- 1) == 0) {
218 current_driver
= drv
;
222 read_unlock_irqrestore(&driver_name_lock
, flags
);
227 #define err_printk(dev, entry, format, arg...) do { \
229 if (driver_filter(dev) && \
230 (show_all_errors || show_num_errors > 0)) { \
231 WARN(1, "%s %s: " format, \
232 dev ? dev_driver_string(dev) : "NULL", \
233 dev ? dev_name(dev) : "NULL", ## arg); \
234 dump_entry_trace(entry); \
236 if (!show_all_errors && show_num_errors > 0) \
237 show_num_errors -= 1; \
241 * Hash related functions
243 * Every DMA-API request is saved into a struct dma_debug_entry. To
244 * have quick access to these structs they are stored into a hash.
246 static int hash_fn(struct dma_debug_entry
*entry
)
249 * Hash function is based on the dma address.
250 * We use bits 20-27 here as the index into the hash
252 return (entry
->dev_addr
>> HASH_FN_SHIFT
) & HASH_FN_MASK
;
256 * Request exclusive access to a hash bucket for a given dma_debug_entry.
258 static struct hash_bucket
*get_hash_bucket(struct dma_debug_entry
*entry
,
259 unsigned long *flags
)
260 __acquires(&dma_entry_hash
[idx
].lock
)
262 int idx
= hash_fn(entry
);
263 unsigned long __flags
;
265 spin_lock_irqsave(&dma_entry_hash
[idx
].lock
, __flags
);
267 return &dma_entry_hash
[idx
];
271 * Give up exclusive access to the hash bucket
273 static void put_hash_bucket(struct hash_bucket
*bucket
,
274 unsigned long *flags
)
275 __releases(&bucket
->lock
)
277 unsigned long __flags
= *flags
;
279 spin_unlock_irqrestore(&bucket
->lock
, __flags
);
282 static bool exact_match(struct dma_debug_entry
*a
, struct dma_debug_entry
*b
)
284 return ((a
->dev_addr
== b
->dev_addr
) &&
285 (a
->dev
== b
->dev
)) ? true : false;
288 static bool containing_match(struct dma_debug_entry
*a
,
289 struct dma_debug_entry
*b
)
291 if (a
->dev
!= b
->dev
)
294 if ((b
->dev_addr
<= a
->dev_addr
) &&
295 ((b
->dev_addr
+ b
->size
) >= (a
->dev_addr
+ a
->size
)))
302 * Search a given entry in the hash bucket list
304 static struct dma_debug_entry
*__hash_bucket_find(struct hash_bucket
*bucket
,
305 struct dma_debug_entry
*ref
,
308 struct dma_debug_entry
*entry
, *ret
= NULL
;
309 int matches
= 0, match_lvl
, last_lvl
= -1;
311 list_for_each_entry(entry
, &bucket
->list
, list
) {
312 if (!match(ref
, entry
))
316 * Some drivers map the same physical address multiple
317 * times. Without a hardware IOMMU this results in the
318 * same device addresses being put into the dma-debug
319 * hash multiple times too. This can result in false
320 * positives being reported. Therefore we implement a
321 * best-fit algorithm here which returns the entry from
322 * the hash which fits best to the reference value
323 * instead of the first-fit.
327 entry
->size
== ref
->size
? ++match_lvl
: 0;
328 entry
->type
== ref
->type
? ++match_lvl
: 0;
329 entry
->direction
== ref
->direction
? ++match_lvl
: 0;
330 entry
->sg_call_ents
== ref
->sg_call_ents
? ++match_lvl
: 0;
332 if (match_lvl
== 4) {
333 /* perfect-fit - return the result */
335 } else if (match_lvl
> last_lvl
) {
337 * We found an entry that fits better then the
338 * previous one or it is the 1st match.
340 last_lvl
= match_lvl
;
346 * If we have multiple matches but no perfect-fit, just return
349 ret
= (matches
== 1) ? ret
: NULL
;
354 static struct dma_debug_entry
*bucket_find_exact(struct hash_bucket
*bucket
,
355 struct dma_debug_entry
*ref
)
357 return __hash_bucket_find(bucket
, ref
, exact_match
);
360 static struct dma_debug_entry
*bucket_find_contain(struct hash_bucket
**bucket
,
361 struct dma_debug_entry
*ref
,
362 unsigned long *flags
)
365 unsigned int max_range
= dma_get_max_seg_size(ref
->dev
);
366 struct dma_debug_entry
*entry
, index
= *ref
;
367 unsigned int range
= 0;
369 while (range
<= max_range
) {
370 entry
= __hash_bucket_find(*bucket
, ref
, containing_match
);
376 * Nothing found, go back a hash bucket
378 put_hash_bucket(*bucket
, flags
);
379 range
+= (1 << HASH_FN_SHIFT
);
380 index
.dev_addr
-= (1 << HASH_FN_SHIFT
);
381 *bucket
= get_hash_bucket(&index
, flags
);
388 * Add an entry to a hash bucket
390 static void hash_bucket_add(struct hash_bucket
*bucket
,
391 struct dma_debug_entry
*entry
)
393 list_add_tail(&entry
->list
, &bucket
->list
);
397 * Remove entry from a hash bucket list
399 static void hash_bucket_del(struct dma_debug_entry
*entry
)
401 list_del(&entry
->list
);
404 static unsigned long long phys_addr(struct dma_debug_entry
*entry
)
406 if (entry
->type
== dma_debug_resource
)
407 return __pfn_to_phys(entry
->pfn
) + entry
->offset
;
409 return page_to_phys(pfn_to_page(entry
->pfn
)) + entry
->offset
;
413 * Dump mapping entries for debugging purposes
415 void debug_dma_dump_mappings(struct device
*dev
)
419 for (idx
= 0; idx
< HASH_SIZE
; idx
++) {
420 struct hash_bucket
*bucket
= &dma_entry_hash
[idx
];
421 struct dma_debug_entry
*entry
;
424 spin_lock_irqsave(&bucket
->lock
, flags
);
426 list_for_each_entry(entry
, &bucket
->list
, list
) {
427 if (!dev
|| dev
== entry
->dev
) {
429 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
430 type2name
[entry
->type
], idx
,
431 phys_addr(entry
), entry
->pfn
,
432 entry
->dev_addr
, entry
->size
,
433 dir2name
[entry
->direction
],
434 maperr2str
[entry
->map_err_type
]);
438 spin_unlock_irqrestore(&bucket
->lock
, flags
);
441 EXPORT_SYMBOL(debug_dma_dump_mappings
);
444 * For each mapping (initial cacheline in the case of
445 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
446 * scatterlist, or the cacheline specified in dma_map_single) insert
447 * into this tree using the cacheline as the key. At
448 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
449 * the entry already exists at insertion time add a tag as a reference
450 * count for the overlapping mappings. For now, the overlap tracking
451 * just ensures that 'unmaps' balance 'maps' before marking the
452 * cacheline idle, but we should also be flagging overlaps as an API
455 * Memory usage is mostly constrained by the maximum number of available
456 * dma-debug entries in that we need a free dma_debug_entry before
457 * inserting into the tree. In the case of dma_map_page and
458 * dma_alloc_coherent there is only one dma_debug_entry and one
459 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
460 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
461 * entries into the tree.
463 * At any time debug_dma_assert_idle() can be called to trigger a
464 * warning if any cachelines in the given page are in the active set.
466 static RADIX_TREE(dma_active_cacheline
, GFP_NOWAIT
);
467 static DEFINE_SPINLOCK(radix_lock
);
468 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
469 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
470 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
472 static phys_addr_t
to_cacheline_number(struct dma_debug_entry
*entry
)
474 return (entry
->pfn
<< CACHELINE_PER_PAGE_SHIFT
) +
475 (entry
->offset
>> L1_CACHE_SHIFT
);
478 static int active_cacheline_read_overlap(phys_addr_t cln
)
482 for (i
= RADIX_TREE_MAX_TAGS
- 1; i
>= 0; i
--)
483 if (radix_tree_tag_get(&dma_active_cacheline
, cln
, i
))
488 static int active_cacheline_set_overlap(phys_addr_t cln
, int overlap
)
492 if (overlap
> ACTIVE_CACHELINE_MAX_OVERLAP
|| overlap
< 0)
495 for (i
= RADIX_TREE_MAX_TAGS
- 1; i
>= 0; i
--)
496 if (overlap
& 1 << i
)
497 radix_tree_tag_set(&dma_active_cacheline
, cln
, i
);
499 radix_tree_tag_clear(&dma_active_cacheline
, cln
, i
);
504 static void active_cacheline_inc_overlap(phys_addr_t cln
)
506 int overlap
= active_cacheline_read_overlap(cln
);
508 overlap
= active_cacheline_set_overlap(cln
, ++overlap
);
510 /* If we overflowed the overlap counter then we're potentially
511 * leaking dma-mappings. Otherwise, if maps and unmaps are
512 * balanced then this overflow may cause false negatives in
513 * debug_dma_assert_idle() as the cacheline may be marked idle
516 WARN_ONCE(overlap
> ACTIVE_CACHELINE_MAX_OVERLAP
,
517 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
518 ACTIVE_CACHELINE_MAX_OVERLAP
, &cln
);
521 static int active_cacheline_dec_overlap(phys_addr_t cln
)
523 int overlap
= active_cacheline_read_overlap(cln
);
525 return active_cacheline_set_overlap(cln
, --overlap
);
528 static int active_cacheline_insert(struct dma_debug_entry
*entry
)
530 phys_addr_t cln
= to_cacheline_number(entry
);
534 /* If the device is not writing memory then we don't have any
535 * concerns about the cpu consuming stale data. This mitigates
536 * legitimate usages of overlapping mappings.
538 if (entry
->direction
== DMA_TO_DEVICE
)
541 spin_lock_irqsave(&radix_lock
, flags
);
542 rc
= radix_tree_insert(&dma_active_cacheline
, cln
, entry
);
544 active_cacheline_inc_overlap(cln
);
545 spin_unlock_irqrestore(&radix_lock
, flags
);
550 static void active_cacheline_remove(struct dma_debug_entry
*entry
)
552 phys_addr_t cln
= to_cacheline_number(entry
);
555 /* ...mirror the insert case */
556 if (entry
->direction
== DMA_TO_DEVICE
)
559 spin_lock_irqsave(&radix_lock
, flags
);
560 /* since we are counting overlaps the final put of the
561 * cacheline will occur when the overlap count is 0.
562 * active_cacheline_dec_overlap() returns -1 in that case
564 if (active_cacheline_dec_overlap(cln
) < 0)
565 radix_tree_delete(&dma_active_cacheline
, cln
);
566 spin_unlock_irqrestore(&radix_lock
, flags
);
570 * debug_dma_assert_idle() - assert that a page is not undergoing dma
571 * @page: page to lookup in the dma_active_cacheline tree
573 * Place a call to this routine in cases where the cpu touching the page
574 * before the dma completes (page is dma_unmapped) will lead to data
577 void debug_dma_assert_idle(struct page
*page
)
579 static struct dma_debug_entry
*ents
[CACHELINES_PER_PAGE
];
580 struct dma_debug_entry
*entry
= NULL
;
581 void **results
= (void **) &ents
;
582 unsigned int nents
, i
;
586 if (dma_debug_disabled())
592 cln
= (phys_addr_t
) page_to_pfn(page
) << CACHELINE_PER_PAGE_SHIFT
;
593 spin_lock_irqsave(&radix_lock
, flags
);
594 nents
= radix_tree_gang_lookup(&dma_active_cacheline
, results
, cln
,
595 CACHELINES_PER_PAGE
);
596 for (i
= 0; i
< nents
; i
++) {
597 phys_addr_t ent_cln
= to_cacheline_number(ents
[i
]);
599 if (ent_cln
== cln
) {
602 } else if (ent_cln
>= cln
+ CACHELINES_PER_PAGE
)
605 spin_unlock_irqrestore(&radix_lock
, flags
);
610 cln
= to_cacheline_number(entry
);
611 err_printk(entry
->dev
, entry
,
612 "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
617 * Wrapper function for adding an entry to the hash.
618 * This function takes care of locking itself.
620 static void add_dma_entry(struct dma_debug_entry
*entry
)
622 struct hash_bucket
*bucket
;
626 bucket
= get_hash_bucket(entry
, &flags
);
627 hash_bucket_add(bucket
, entry
);
628 put_hash_bucket(bucket
, &flags
);
630 rc
= active_cacheline_insert(entry
);
632 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
633 global_disable
= true;
636 /* TODO: report -EEXIST errors here as overlapping mappings are
637 * not supported by the DMA API
641 static struct dma_debug_entry
*__dma_entry_alloc(void)
643 struct dma_debug_entry
*entry
;
645 entry
= list_entry(free_entries
.next
, struct dma_debug_entry
, list
);
646 list_del(&entry
->list
);
647 memset(entry
, 0, sizeof(*entry
));
649 num_free_entries
-= 1;
650 if (num_free_entries
< min_free_entries
)
651 min_free_entries
= num_free_entries
;
656 /* struct dma_entry allocator
658 * The next two functions implement the allocator for
659 * struct dma_debug_entries.
661 static struct dma_debug_entry
*dma_entry_alloc(void)
663 struct dma_debug_entry
*entry
;
666 spin_lock_irqsave(&free_entries_lock
, flags
);
668 if (list_empty(&free_entries
)) {
669 global_disable
= true;
670 spin_unlock_irqrestore(&free_entries_lock
, flags
);
671 pr_err("DMA-API: debugging out of memory - disabling\n");
675 entry
= __dma_entry_alloc();
677 spin_unlock_irqrestore(&free_entries_lock
, flags
);
679 #ifdef CONFIG_STACKTRACE
680 entry
->stacktrace
.max_entries
= DMA_DEBUG_STACKTRACE_ENTRIES
;
681 entry
->stacktrace
.entries
= entry
->st_entries
;
682 entry
->stacktrace
.skip
= 2;
683 save_stack_trace(&entry
->stacktrace
);
689 static void dma_entry_free(struct dma_debug_entry
*entry
)
693 active_cacheline_remove(entry
);
696 * add to beginning of the list - this way the entries are
697 * more likely cache hot when they are reallocated.
699 spin_lock_irqsave(&free_entries_lock
, flags
);
700 list_add(&entry
->list
, &free_entries
);
701 num_free_entries
+= 1;
702 spin_unlock_irqrestore(&free_entries_lock
, flags
);
705 int dma_debug_resize_entries(u32 num_entries
)
707 int i
, delta
, ret
= 0;
709 struct dma_debug_entry
*entry
;
712 spin_lock_irqsave(&free_entries_lock
, flags
);
714 if (nr_total_entries
< num_entries
) {
715 delta
= num_entries
- nr_total_entries
;
717 spin_unlock_irqrestore(&free_entries_lock
, flags
);
719 for (i
= 0; i
< delta
; i
++) {
720 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
724 list_add_tail(&entry
->list
, &tmp
);
727 spin_lock_irqsave(&free_entries_lock
, flags
);
729 list_splice(&tmp
, &free_entries
);
730 nr_total_entries
+= i
;
731 num_free_entries
+= i
;
733 delta
= nr_total_entries
- num_entries
;
735 for (i
= 0; i
< delta
&& !list_empty(&free_entries
); i
++) {
736 entry
= __dma_entry_alloc();
740 nr_total_entries
-= i
;
743 if (nr_total_entries
!= num_entries
)
746 spin_unlock_irqrestore(&free_entries_lock
, flags
);
750 EXPORT_SYMBOL(dma_debug_resize_entries
);
753 * DMA-API debugging init code
755 * The init code does two things:
756 * 1. Initialize core data structures
757 * 2. Preallocate a given number of dma_debug_entry structs
760 static int prealloc_memory(u32 num_entries
)
762 struct dma_debug_entry
*entry
, *next_entry
;
765 for (i
= 0; i
< num_entries
; ++i
) {
766 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
770 list_add_tail(&entry
->list
, &free_entries
);
773 num_free_entries
= num_entries
;
774 min_free_entries
= num_entries
;
776 pr_info("DMA-API: preallocated %d debug entries\n", num_entries
);
782 list_for_each_entry_safe(entry
, next_entry
, &free_entries
, list
) {
783 list_del(&entry
->list
);
790 static ssize_t
filter_read(struct file
*file
, char __user
*user_buf
,
791 size_t count
, loff_t
*ppos
)
793 char buf
[NAME_MAX_LEN
+ 1];
797 if (!current_driver_name
[0])
801 * We can't copy to userspace directly because current_driver_name can
802 * only be read under the driver_name_lock with irqs disabled. So
803 * create a temporary copy first.
805 read_lock_irqsave(&driver_name_lock
, flags
);
806 len
= scnprintf(buf
, NAME_MAX_LEN
+ 1, "%s\n", current_driver_name
);
807 read_unlock_irqrestore(&driver_name_lock
, flags
);
809 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
812 static ssize_t
filter_write(struct file
*file
, const char __user
*userbuf
,
813 size_t count
, loff_t
*ppos
)
815 char buf
[NAME_MAX_LEN
];
821 * We can't copy from userspace directly. Access to
822 * current_driver_name is protected with a write_lock with irqs
823 * disabled. Since copy_from_user can fault and may sleep we
824 * need to copy to temporary buffer first
826 len
= min(count
, (size_t)(NAME_MAX_LEN
- 1));
827 if (copy_from_user(buf
, userbuf
, len
))
832 write_lock_irqsave(&driver_name_lock
, flags
);
835 * Now handle the string we got from userspace very carefully.
837 * - only use the first token we got
838 * - token delimiter is everything looking like a space
839 * character (' ', '\n', '\t' ...)
842 if (!isalnum(buf
[0])) {
844 * If the first character userspace gave us is not
845 * alphanumerical then assume the filter should be
848 if (current_driver_name
[0])
849 pr_info("DMA-API: switching off dma-debug driver filter\n");
850 current_driver_name
[0] = 0;
851 current_driver
= NULL
;
856 * Now parse out the first token and use it as the name for the
857 * driver to filter for.
859 for (i
= 0; i
< NAME_MAX_LEN
- 1; ++i
) {
860 current_driver_name
[i
] = buf
[i
];
861 if (isspace(buf
[i
]) || buf
[i
] == ' ' || buf
[i
] == 0)
864 current_driver_name
[i
] = 0;
865 current_driver
= NULL
;
867 pr_info("DMA-API: enable driver filter for driver [%s]\n",
868 current_driver_name
);
871 write_unlock_irqrestore(&driver_name_lock
, flags
);
876 static const struct file_operations filter_fops
= {
878 .write
= filter_write
,
879 .llseek
= default_llseek
,
882 static int dma_debug_fs_init(void)
884 dma_debug_dent
= debugfs_create_dir("dma-api", NULL
);
885 if (!dma_debug_dent
) {
886 pr_err("DMA-API: can not create debugfs directory\n");
890 global_disable_dent
= debugfs_create_bool("disabled", 0444,
893 if (!global_disable_dent
)
896 error_count_dent
= debugfs_create_u32("error_count", 0444,
897 dma_debug_dent
, &error_count
);
898 if (!error_count_dent
)
901 show_all_errors_dent
= debugfs_create_u32("all_errors", 0644,
904 if (!show_all_errors_dent
)
907 show_num_errors_dent
= debugfs_create_u32("num_errors", 0644,
910 if (!show_num_errors_dent
)
913 num_free_entries_dent
= debugfs_create_u32("num_free_entries", 0444,
916 if (!num_free_entries_dent
)
919 min_free_entries_dent
= debugfs_create_u32("min_free_entries", 0444,
922 if (!min_free_entries_dent
)
925 filter_dent
= debugfs_create_file("driver_filter", 0644,
926 dma_debug_dent
, NULL
, &filter_fops
);
933 debugfs_remove_recursive(dma_debug_dent
);
938 static int device_dma_allocations(struct device
*dev
, struct dma_debug_entry
**out_entry
)
940 struct dma_debug_entry
*entry
;
944 local_irq_save(flags
);
946 for (i
= 0; i
< HASH_SIZE
; ++i
) {
947 spin_lock(&dma_entry_hash
[i
].lock
);
948 list_for_each_entry(entry
, &dma_entry_hash
[i
].list
, list
) {
949 if (entry
->dev
== dev
) {
954 spin_unlock(&dma_entry_hash
[i
].lock
);
957 local_irq_restore(flags
);
962 static int dma_debug_device_change(struct notifier_block
*nb
, unsigned long action
, void *data
)
964 struct device
*dev
= data
;
965 struct dma_debug_entry
*uninitialized_var(entry
);
968 if (dma_debug_disabled())
972 case BUS_NOTIFY_UNBOUND_DRIVER
:
973 count
= device_dma_allocations(dev
, &entry
);
976 err_printk(dev
, entry
, "DMA-API: device driver has pending "
977 "DMA allocations while released from device "
979 "One of leaked entries details: "
980 "[device address=0x%016llx] [size=%llu bytes] "
981 "[mapped with %s] [mapped as %s]\n",
982 count
, entry
->dev_addr
, entry
->size
,
983 dir2name
[entry
->direction
], type2name
[entry
->type
]);
992 void dma_debug_add_bus(struct bus_type
*bus
)
994 struct notifier_block
*nb
;
996 if (dma_debug_disabled())
999 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
1001 pr_err("dma_debug_add_bus: out of memory\n");
1005 nb
->notifier_call
= dma_debug_device_change
;
1007 bus_register_notifier(bus
, nb
);
1011 * Let the architectures decide how many entries should be preallocated.
1013 void dma_debug_init(u32 num_entries
)
1017 /* Do not use dma_debug_initialized here, since we really want to be
1018 * called to set dma_debug_initialized
1023 for (i
= 0; i
< HASH_SIZE
; ++i
) {
1024 INIT_LIST_HEAD(&dma_entry_hash
[i
].list
);
1025 spin_lock_init(&dma_entry_hash
[i
].lock
);
1028 if (dma_debug_fs_init() != 0) {
1029 pr_err("DMA-API: error creating debugfs entries - disabling\n");
1030 global_disable
= true;
1036 num_entries
= req_entries
;
1038 if (prealloc_memory(num_entries
) != 0) {
1039 pr_err("DMA-API: debugging out of memory error - disabled\n");
1040 global_disable
= true;
1045 nr_total_entries
= num_free_entries
;
1047 dma_debug_initialized
= true;
1049 pr_info("DMA-API: debugging enabled by kernel config\n");
1052 static __init
int dma_debug_cmdline(char *str
)
1057 if (strncmp(str
, "off", 3) == 0) {
1058 pr_info("DMA-API: debugging disabled on kernel command line\n");
1059 global_disable
= true;
1065 static __init
int dma_debug_entries_cmdline(char *str
)
1072 res
= get_option(&str
, &req_entries
);
1080 __setup("dma_debug=", dma_debug_cmdline
);
1081 __setup("dma_debug_entries=", dma_debug_entries_cmdline
);
1083 static void check_unmap(struct dma_debug_entry
*ref
)
1085 struct dma_debug_entry
*entry
;
1086 struct hash_bucket
*bucket
;
1087 unsigned long flags
;
1089 bucket
= get_hash_bucket(ref
, &flags
);
1090 entry
= bucket_find_exact(bucket
, ref
);
1093 /* must drop lock before calling dma_mapping_error */
1094 put_hash_bucket(bucket
, &flags
);
1096 if (dma_mapping_error(ref
->dev
, ref
->dev_addr
)) {
1097 err_printk(ref
->dev
, NULL
,
1098 "DMA-API: device driver tries to free an "
1099 "invalid DMA memory address\n");
1101 err_printk(ref
->dev
, NULL
,
1102 "DMA-API: device driver tries to free DMA "
1103 "memory it has not allocated [device "
1104 "address=0x%016llx] [size=%llu bytes]\n",
1105 ref
->dev_addr
, ref
->size
);
1110 if (ref
->size
!= entry
->size
) {
1111 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
1112 "DMA memory with different size "
1113 "[device address=0x%016llx] [map size=%llu bytes] "
1114 "[unmap size=%llu bytes]\n",
1115 ref
->dev_addr
, entry
->size
, ref
->size
);
1118 if (ref
->type
!= entry
->type
) {
1119 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
1120 "DMA memory with wrong function "
1121 "[device address=0x%016llx] [size=%llu bytes] "
1122 "[mapped as %s] [unmapped as %s]\n",
1123 ref
->dev_addr
, ref
->size
,
1124 type2name
[entry
->type
], type2name
[ref
->type
]);
1125 } else if ((entry
->type
== dma_debug_coherent
) &&
1126 (phys_addr(ref
) != phys_addr(entry
))) {
1127 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
1128 "DMA memory with different CPU address "
1129 "[device address=0x%016llx] [size=%llu bytes] "
1130 "[cpu alloc address=0x%016llx] "
1131 "[cpu free address=0x%016llx]",
1132 ref
->dev_addr
, ref
->size
,
1137 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
1138 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
1139 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
1140 "DMA sg list with different entry count "
1141 "[map count=%d] [unmap count=%d]\n",
1142 entry
->sg_call_ents
, ref
->sg_call_ents
);
1146 * This may be no bug in reality - but most implementations of the
1147 * DMA API don't handle this properly, so check for it here
1149 if (ref
->direction
!= entry
->direction
) {
1150 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
1151 "DMA memory with different direction "
1152 "[device address=0x%016llx] [size=%llu bytes] "
1153 "[mapped with %s] [unmapped with %s]\n",
1154 ref
->dev_addr
, ref
->size
,
1155 dir2name
[entry
->direction
],
1156 dir2name
[ref
->direction
]);
1160 * Drivers should use dma_mapping_error() to check the returned
1161 * addresses of dma_map_single() and dma_map_page().
1162 * If not, print this warning message. See Documentation/DMA-API.txt.
1164 if (entry
->map_err_type
== MAP_ERR_NOT_CHECKED
) {
1165 err_printk(ref
->dev
, entry
,
1166 "DMA-API: device driver failed to check map error"
1167 "[device address=0x%016llx] [size=%llu bytes] "
1169 ref
->dev_addr
, ref
->size
,
1170 type2name
[entry
->type
]);
1173 hash_bucket_del(entry
);
1174 dma_entry_free(entry
);
1176 put_hash_bucket(bucket
, &flags
);
1179 static void check_for_stack(struct device
*dev
,
1180 struct page
*page
, size_t offset
)
1183 struct vm_struct
*stack_vm_area
= task_stack_vm_area(current
);
1185 if (!stack_vm_area
) {
1186 /* Stack is direct-mapped. */
1187 if (PageHighMem(page
))
1189 addr
= page_address(page
) + offset
;
1190 if (object_is_on_stack(addr
))
1191 err_printk(dev
, NULL
, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr
);
1193 /* Stack is vmalloced. */
1196 for (i
= 0; i
< stack_vm_area
->nr_pages
; i
++) {
1197 if (page
!= stack_vm_area
->pages
[i
])
1200 addr
= (u8
*)current
->stack
+ i
* PAGE_SIZE
+ offset
;
1201 err_printk(dev
, NULL
, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr
);
1207 static inline bool overlap(void *addr
, unsigned long len
, void *start
, void *end
)
1209 unsigned long a1
= (unsigned long)addr
;
1210 unsigned long b1
= a1
+ len
;
1211 unsigned long a2
= (unsigned long)start
;
1212 unsigned long b2
= (unsigned long)end
;
1214 return !(b1
<= a2
|| a1
>= b2
);
1217 static void check_for_illegal_area(struct device
*dev
, void *addr
, unsigned long len
)
1219 if (overlap(addr
, len
, _stext
, _etext
) ||
1220 overlap(addr
, len
, __start_rodata
, __end_rodata
))
1221 err_printk(dev
, NULL
, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr
, len
);
1224 static void check_sync(struct device
*dev
,
1225 struct dma_debug_entry
*ref
,
1228 struct dma_debug_entry
*entry
;
1229 struct hash_bucket
*bucket
;
1230 unsigned long flags
;
1232 bucket
= get_hash_bucket(ref
, &flags
);
1234 entry
= bucket_find_contain(&bucket
, ref
, &flags
);
1237 err_printk(dev
, NULL
, "DMA-API: device driver tries "
1238 "to sync DMA memory it has not allocated "
1239 "[device address=0x%016llx] [size=%llu bytes]\n",
1240 (unsigned long long)ref
->dev_addr
, ref
->size
);
1244 if (ref
->size
> entry
->size
) {
1245 err_printk(dev
, entry
, "DMA-API: device driver syncs"
1246 " DMA memory outside allocated range "
1247 "[device address=0x%016llx] "
1248 "[allocation size=%llu bytes] "
1249 "[sync offset+size=%llu]\n",
1250 entry
->dev_addr
, entry
->size
,
1254 if (entry
->direction
== DMA_BIDIRECTIONAL
)
1257 if (ref
->direction
!= entry
->direction
) {
1258 err_printk(dev
, entry
, "DMA-API: device driver syncs "
1259 "DMA memory with different direction "
1260 "[device address=0x%016llx] [size=%llu bytes] "
1261 "[mapped with %s] [synced with %s]\n",
1262 (unsigned long long)ref
->dev_addr
, entry
->size
,
1263 dir2name
[entry
->direction
],
1264 dir2name
[ref
->direction
]);
1267 if (to_cpu
&& !(entry
->direction
== DMA_FROM_DEVICE
) &&
1268 !(ref
->direction
== DMA_TO_DEVICE
))
1269 err_printk(dev
, entry
, "DMA-API: device driver syncs "
1270 "device read-only DMA memory for cpu "
1271 "[device address=0x%016llx] [size=%llu bytes] "
1272 "[mapped with %s] [synced with %s]\n",
1273 (unsigned long long)ref
->dev_addr
, entry
->size
,
1274 dir2name
[entry
->direction
],
1275 dir2name
[ref
->direction
]);
1277 if (!to_cpu
&& !(entry
->direction
== DMA_TO_DEVICE
) &&
1278 !(ref
->direction
== DMA_FROM_DEVICE
))
1279 err_printk(dev
, entry
, "DMA-API: device driver syncs "
1280 "device write-only DMA memory to device "
1281 "[device address=0x%016llx] [size=%llu bytes] "
1282 "[mapped with %s] [synced with %s]\n",
1283 (unsigned long long)ref
->dev_addr
, entry
->size
,
1284 dir2name
[entry
->direction
],
1285 dir2name
[ref
->direction
]);
1287 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
1288 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
1289 err_printk(ref
->dev
, entry
, "DMA-API: device driver syncs "
1290 "DMA sg list with different entry count "
1291 "[map count=%d] [sync count=%d]\n",
1292 entry
->sg_call_ents
, ref
->sg_call_ents
);
1296 put_hash_bucket(bucket
, &flags
);
1299 void debug_dma_map_page(struct device
*dev
, struct page
*page
, size_t offset
,
1300 size_t size
, int direction
, dma_addr_t dma_addr
,
1303 struct dma_debug_entry
*entry
;
1305 if (unlikely(dma_debug_disabled()))
1308 if (dma_mapping_error(dev
, dma_addr
))
1311 entry
= dma_entry_alloc();
1316 entry
->type
= dma_debug_page
;
1317 entry
->pfn
= page_to_pfn(page
);
1318 entry
->offset
= offset
,
1319 entry
->dev_addr
= dma_addr
;
1321 entry
->direction
= direction
;
1322 entry
->map_err_type
= MAP_ERR_NOT_CHECKED
;
1325 entry
->type
= dma_debug_single
;
1327 check_for_stack(dev
, page
, offset
);
1329 if (!PageHighMem(page
)) {
1330 void *addr
= page_address(page
) + offset
;
1332 check_for_illegal_area(dev
, addr
, size
);
1335 add_dma_entry(entry
);
1337 EXPORT_SYMBOL(debug_dma_map_page
);
1339 void debug_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
1341 struct dma_debug_entry ref
;
1342 struct dma_debug_entry
*entry
;
1343 struct hash_bucket
*bucket
;
1344 unsigned long flags
;
1346 if (unlikely(dma_debug_disabled()))
1350 ref
.dev_addr
= dma_addr
;
1351 bucket
= get_hash_bucket(&ref
, &flags
);
1353 list_for_each_entry(entry
, &bucket
->list
, list
) {
1354 if (!exact_match(&ref
, entry
))
1358 * The same physical address can be mapped multiple
1359 * times. Without a hardware IOMMU this results in the
1360 * same device addresses being put into the dma-debug
1361 * hash multiple times too. This can result in false
1362 * positives being reported. Therefore we implement a
1363 * best-fit algorithm here which updates the first entry
1364 * from the hash which fits the reference value and is
1365 * not currently listed as being checked.
1367 if (entry
->map_err_type
== MAP_ERR_NOT_CHECKED
) {
1368 entry
->map_err_type
= MAP_ERR_CHECKED
;
1373 put_hash_bucket(bucket
, &flags
);
1375 EXPORT_SYMBOL(debug_dma_mapping_error
);
1377 void debug_dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
1378 size_t size
, int direction
, bool map_single
)
1380 struct dma_debug_entry ref
= {
1381 .type
= dma_debug_page
,
1385 .direction
= direction
,
1388 if (unlikely(dma_debug_disabled()))
1392 ref
.type
= dma_debug_single
;
1396 EXPORT_SYMBOL(debug_dma_unmap_page
);
1398 void debug_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1399 int nents
, int mapped_ents
, int direction
)
1401 struct dma_debug_entry
*entry
;
1402 struct scatterlist
*s
;
1405 if (unlikely(dma_debug_disabled()))
1408 for_each_sg(sg
, s
, mapped_ents
, i
) {
1409 entry
= dma_entry_alloc();
1413 entry
->type
= dma_debug_sg
;
1415 entry
->pfn
= page_to_pfn(sg_page(s
));
1416 entry
->offset
= s
->offset
,
1417 entry
->size
= sg_dma_len(s
);
1418 entry
->dev_addr
= sg_dma_address(s
);
1419 entry
->direction
= direction
;
1420 entry
->sg_call_ents
= nents
;
1421 entry
->sg_mapped_ents
= mapped_ents
;
1423 check_for_stack(dev
, sg_page(s
), s
->offset
);
1425 if (!PageHighMem(sg_page(s
))) {
1426 check_for_illegal_area(dev
, sg_virt(s
), sg_dma_len(s
));
1429 add_dma_entry(entry
);
1432 EXPORT_SYMBOL(debug_dma_map_sg
);
1434 static int get_nr_mapped_entries(struct device
*dev
,
1435 struct dma_debug_entry
*ref
)
1437 struct dma_debug_entry
*entry
;
1438 struct hash_bucket
*bucket
;
1439 unsigned long flags
;
1442 bucket
= get_hash_bucket(ref
, &flags
);
1443 entry
= bucket_find_exact(bucket
, ref
);
1447 mapped_ents
= entry
->sg_mapped_ents
;
1448 put_hash_bucket(bucket
, &flags
);
1453 void debug_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
1454 int nelems
, int dir
)
1456 struct scatterlist
*s
;
1457 int mapped_ents
= 0, i
;
1459 if (unlikely(dma_debug_disabled()))
1462 for_each_sg(sglist
, s
, nelems
, i
) {
1464 struct dma_debug_entry ref
= {
1465 .type
= dma_debug_sg
,
1467 .pfn
= page_to_pfn(sg_page(s
)),
1468 .offset
= s
->offset
,
1469 .dev_addr
= sg_dma_address(s
),
1470 .size
= sg_dma_len(s
),
1472 .sg_call_ents
= nelems
,
1475 if (mapped_ents
&& i
>= mapped_ents
)
1479 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1484 EXPORT_SYMBOL(debug_dma_unmap_sg
);
1486 void debug_dma_alloc_coherent(struct device
*dev
, size_t size
,
1487 dma_addr_t dma_addr
, void *virt
)
1489 struct dma_debug_entry
*entry
;
1491 if (unlikely(dma_debug_disabled()))
1494 if (unlikely(virt
== NULL
))
1497 entry
= dma_entry_alloc();
1501 entry
->type
= dma_debug_coherent
;
1503 entry
->pfn
= page_to_pfn(virt_to_page(virt
));
1504 entry
->offset
= (size_t) virt
& ~PAGE_MASK
;
1506 entry
->dev_addr
= dma_addr
;
1507 entry
->direction
= DMA_BIDIRECTIONAL
;
1509 add_dma_entry(entry
);
1511 EXPORT_SYMBOL(debug_dma_alloc_coherent
);
1513 void debug_dma_free_coherent(struct device
*dev
, size_t size
,
1514 void *virt
, dma_addr_t addr
)
1516 struct dma_debug_entry ref
= {
1517 .type
= dma_debug_coherent
,
1519 .pfn
= page_to_pfn(virt_to_page(virt
)),
1520 .offset
= (size_t) virt
& ~PAGE_MASK
,
1523 .direction
= DMA_BIDIRECTIONAL
,
1526 if (unlikely(dma_debug_disabled()))
1531 EXPORT_SYMBOL(debug_dma_free_coherent
);
1533 void debug_dma_map_resource(struct device
*dev
, phys_addr_t addr
, size_t size
,
1534 int direction
, dma_addr_t dma_addr
)
1536 struct dma_debug_entry
*entry
;
1538 if (unlikely(dma_debug_disabled()))
1541 entry
= dma_entry_alloc();
1545 entry
->type
= dma_debug_resource
;
1547 entry
->pfn
= PHYS_PFN(addr
);
1548 entry
->offset
= offset_in_page(addr
);
1550 entry
->dev_addr
= dma_addr
;
1551 entry
->direction
= direction
;
1552 entry
->map_err_type
= MAP_ERR_NOT_CHECKED
;
1554 add_dma_entry(entry
);
1556 EXPORT_SYMBOL(debug_dma_map_resource
);
1558 void debug_dma_unmap_resource(struct device
*dev
, dma_addr_t dma_addr
,
1559 size_t size
, int direction
)
1561 struct dma_debug_entry ref
= {
1562 .type
= dma_debug_resource
,
1564 .dev_addr
= dma_addr
,
1566 .direction
= direction
,
1569 if (unlikely(dma_debug_disabled()))
1574 EXPORT_SYMBOL(debug_dma_unmap_resource
);
1576 void debug_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
1577 size_t size
, int direction
)
1579 struct dma_debug_entry ref
;
1581 if (unlikely(dma_debug_disabled()))
1584 ref
.type
= dma_debug_single
;
1586 ref
.dev_addr
= dma_handle
;
1588 ref
.direction
= direction
;
1589 ref
.sg_call_ents
= 0;
1591 check_sync(dev
, &ref
, true);
1593 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu
);
1595 void debug_dma_sync_single_for_device(struct device
*dev
,
1596 dma_addr_t dma_handle
, size_t size
,
1599 struct dma_debug_entry ref
;
1601 if (unlikely(dma_debug_disabled()))
1604 ref
.type
= dma_debug_single
;
1606 ref
.dev_addr
= dma_handle
;
1608 ref
.direction
= direction
;
1609 ref
.sg_call_ents
= 0;
1611 check_sync(dev
, &ref
, false);
1613 EXPORT_SYMBOL(debug_dma_sync_single_for_device
);
1615 void debug_dma_sync_single_range_for_cpu(struct device
*dev
,
1616 dma_addr_t dma_handle
,
1617 unsigned long offset
, size_t size
,
1620 struct dma_debug_entry ref
;
1622 if (unlikely(dma_debug_disabled()))
1625 ref
.type
= dma_debug_single
;
1627 ref
.dev_addr
= dma_handle
;
1628 ref
.size
= offset
+ size
;
1629 ref
.direction
= direction
;
1630 ref
.sg_call_ents
= 0;
1632 check_sync(dev
, &ref
, true);
1634 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu
);
1636 void debug_dma_sync_single_range_for_device(struct device
*dev
,
1637 dma_addr_t dma_handle
,
1638 unsigned long offset
,
1639 size_t size
, int direction
)
1641 struct dma_debug_entry ref
;
1643 if (unlikely(dma_debug_disabled()))
1646 ref
.type
= dma_debug_single
;
1648 ref
.dev_addr
= dma_handle
;
1649 ref
.size
= offset
+ size
;
1650 ref
.direction
= direction
;
1651 ref
.sg_call_ents
= 0;
1653 check_sync(dev
, &ref
, false);
1655 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device
);
1657 void debug_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1658 int nelems
, int direction
)
1660 struct scatterlist
*s
;
1661 int mapped_ents
= 0, i
;
1663 if (unlikely(dma_debug_disabled()))
1666 for_each_sg(sg
, s
, nelems
, i
) {
1668 struct dma_debug_entry ref
= {
1669 .type
= dma_debug_sg
,
1671 .pfn
= page_to_pfn(sg_page(s
)),
1672 .offset
= s
->offset
,
1673 .dev_addr
= sg_dma_address(s
),
1674 .size
= sg_dma_len(s
),
1675 .direction
= direction
,
1676 .sg_call_ents
= nelems
,
1680 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1682 if (i
>= mapped_ents
)
1685 check_sync(dev
, &ref
, true);
1688 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu
);
1690 void debug_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1691 int nelems
, int direction
)
1693 struct scatterlist
*s
;
1694 int mapped_ents
= 0, i
;
1696 if (unlikely(dma_debug_disabled()))
1699 for_each_sg(sg
, s
, nelems
, i
) {
1701 struct dma_debug_entry ref
= {
1702 .type
= dma_debug_sg
,
1704 .pfn
= page_to_pfn(sg_page(s
)),
1705 .offset
= s
->offset
,
1706 .dev_addr
= sg_dma_address(s
),
1707 .size
= sg_dma_len(s
),
1708 .direction
= direction
,
1709 .sg_call_ents
= nelems
,
1712 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1714 if (i
>= mapped_ents
)
1717 check_sync(dev
, &ref
, false);
1720 EXPORT_SYMBOL(debug_dma_sync_sg_for_device
);
1722 static int __init
dma_debug_driver_setup(char *str
)
1726 for (i
= 0; i
< NAME_MAX_LEN
- 1; ++i
, ++str
) {
1727 current_driver_name
[i
] = *str
;
1732 if (current_driver_name
[0])
1733 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1734 current_driver_name
);
1739 __setup("dma_debug_driver=", dma_debug_driver_setup
);