]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - lib/dma-debug.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-bionic-kernel.git] / lib / dma-debug.c
1 /*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sched/task.h>
23 #include <linux/stacktrace.h>
24 #include <linux/dma-debug.h>
25 #include <linux/spinlock.h>
26 #include <linux/vmalloc.h>
27 #include <linux/debugfs.h>
28 #include <linux/uaccess.h>
29 #include <linux/export.h>
30 #include <linux/device.h>
31 #include <linux/types.h>
32 #include <linux/sched.h>
33 #include <linux/ctype.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36
37 #include <asm/sections.h>
38
39 #define HASH_SIZE 1024ULL
40 #define HASH_FN_SHIFT 13
41 #define HASH_FN_MASK (HASH_SIZE - 1)
42
43 enum {
44 dma_debug_single,
45 dma_debug_page,
46 dma_debug_sg,
47 dma_debug_coherent,
48 dma_debug_resource,
49 };
50
51 enum map_err_types {
52 MAP_ERR_CHECK_NOT_APPLICABLE,
53 MAP_ERR_NOT_CHECKED,
54 MAP_ERR_CHECKED,
55 };
56
57 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
58
59 /**
60 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
61 * @list: node on pre-allocated free_entries list
62 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
63 * @type: single, page, sg, coherent
64 * @pfn: page frame of the start address
65 * @offset: offset of mapping relative to pfn
66 * @size: length of the mapping
67 * @direction: enum dma_data_direction
68 * @sg_call_ents: 'nents' from dma_map_sg
69 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
70 * @map_err_type: track whether dma_mapping_error() was checked
71 * @stacktrace: support backtraces when a violation is detected
72 */
73 struct dma_debug_entry {
74 struct list_head list;
75 struct device *dev;
76 int type;
77 unsigned long pfn;
78 size_t offset;
79 u64 dev_addr;
80 u64 size;
81 int direction;
82 int sg_call_ents;
83 int sg_mapped_ents;
84 enum map_err_types map_err_type;
85 #ifdef CONFIG_STACKTRACE
86 struct stack_trace stacktrace;
87 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
88 #endif
89 };
90
91 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
92
93 struct hash_bucket {
94 struct list_head list;
95 spinlock_t lock;
96 } ____cacheline_aligned_in_smp;
97
98 /* Hash list to save the allocated dma addresses */
99 static struct hash_bucket dma_entry_hash[HASH_SIZE];
100 /* List of pre-allocated dma_debug_entry's */
101 static LIST_HEAD(free_entries);
102 /* Lock for the list above */
103 static DEFINE_SPINLOCK(free_entries_lock);
104
105 /* Global disable flag - will be set in case of an error */
106 static bool global_disable __read_mostly;
107
108 /* Early initialization disable flag, set at the end of dma_debug_init */
109 static bool dma_debug_initialized __read_mostly;
110
111 static inline bool dma_debug_disabled(void)
112 {
113 return global_disable || !dma_debug_initialized;
114 }
115
116 /* Global error count */
117 static u32 error_count;
118
119 /* Global error show enable*/
120 static u32 show_all_errors __read_mostly;
121 /* Number of errors to show */
122 static u32 show_num_errors = 1;
123
124 static u32 num_free_entries;
125 static u32 min_free_entries;
126 static u32 nr_total_entries;
127
128 /* number of preallocated entries requested by kernel cmdline */
129 static u32 req_entries;
130
131 /* debugfs dentry's for the stuff above */
132 static struct dentry *dma_debug_dent __read_mostly;
133 static struct dentry *global_disable_dent __read_mostly;
134 static struct dentry *error_count_dent __read_mostly;
135 static struct dentry *show_all_errors_dent __read_mostly;
136 static struct dentry *show_num_errors_dent __read_mostly;
137 static struct dentry *num_free_entries_dent __read_mostly;
138 static struct dentry *min_free_entries_dent __read_mostly;
139 static struct dentry *filter_dent __read_mostly;
140
141 /* per-driver filter related state */
142
143 #define NAME_MAX_LEN 64
144
145 static char current_driver_name[NAME_MAX_LEN] __read_mostly;
146 static struct device_driver *current_driver __read_mostly;
147
148 static DEFINE_RWLOCK(driver_name_lock);
149
150 static const char *const maperr2str[] = {
151 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
152 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
153 [MAP_ERR_CHECKED] = "dma map error checked",
154 };
155
156 static const char *type2name[5] = { "single", "page",
157 "scather-gather", "coherent",
158 "resource" };
159
160 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
161 "DMA_FROM_DEVICE", "DMA_NONE" };
162
163 /*
164 * The access to some variables in this macro is racy. We can't use atomic_t
165 * here because all these variables are exported to debugfs. Some of them even
166 * writeable. This is also the reason why a lock won't help much. But anyway,
167 * the races are no big deal. Here is why:
168 *
169 * error_count: the addition is racy, but the worst thing that can happen is
170 * that we don't count some errors
171 * show_num_errors: the subtraction is racy. Also no big deal because in
172 * worst case this will result in one warning more in the
173 * system log than the user configured. This variable is
174 * writeable via debugfs.
175 */
176 static inline void dump_entry_trace(struct dma_debug_entry *entry)
177 {
178 #ifdef CONFIG_STACKTRACE
179 if (entry) {
180 pr_warning("Mapped at:\n");
181 print_stack_trace(&entry->stacktrace, 0);
182 }
183 #endif
184 }
185
186 static bool driver_filter(struct device *dev)
187 {
188 struct device_driver *drv;
189 unsigned long flags;
190 bool ret;
191
192 /* driver filter off */
193 if (likely(!current_driver_name[0]))
194 return true;
195
196 /* driver filter on and initialized */
197 if (current_driver && dev && dev->driver == current_driver)
198 return true;
199
200 /* driver filter on, but we can't filter on a NULL device... */
201 if (!dev)
202 return false;
203
204 if (current_driver || !current_driver_name[0])
205 return false;
206
207 /* driver filter on but not yet initialized */
208 drv = dev->driver;
209 if (!drv)
210 return false;
211
212 /* lock to protect against change of current_driver_name */
213 read_lock_irqsave(&driver_name_lock, flags);
214
215 ret = false;
216 if (drv->name &&
217 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
218 current_driver = drv;
219 ret = true;
220 }
221
222 read_unlock_irqrestore(&driver_name_lock, flags);
223
224 return ret;
225 }
226
227 #define err_printk(dev, entry, format, arg...) do { \
228 error_count += 1; \
229 if (driver_filter(dev) && \
230 (show_all_errors || show_num_errors > 0)) { \
231 WARN(1, "%s %s: " format, \
232 dev ? dev_driver_string(dev) : "NULL", \
233 dev ? dev_name(dev) : "NULL", ## arg); \
234 dump_entry_trace(entry); \
235 } \
236 if (!show_all_errors && show_num_errors > 0) \
237 show_num_errors -= 1; \
238 } while (0);
239
240 /*
241 * Hash related functions
242 *
243 * Every DMA-API request is saved into a struct dma_debug_entry. To
244 * have quick access to these structs they are stored into a hash.
245 */
246 static int hash_fn(struct dma_debug_entry *entry)
247 {
248 /*
249 * Hash function is based on the dma address.
250 * We use bits 20-27 here as the index into the hash
251 */
252 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
253 }
254
255 /*
256 * Request exclusive access to a hash bucket for a given dma_debug_entry.
257 */
258 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
259 unsigned long *flags)
260 __acquires(&dma_entry_hash[idx].lock)
261 {
262 int idx = hash_fn(entry);
263 unsigned long __flags;
264
265 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
266 *flags = __flags;
267 return &dma_entry_hash[idx];
268 }
269
270 /*
271 * Give up exclusive access to the hash bucket
272 */
273 static void put_hash_bucket(struct hash_bucket *bucket,
274 unsigned long *flags)
275 __releases(&bucket->lock)
276 {
277 unsigned long __flags = *flags;
278
279 spin_unlock_irqrestore(&bucket->lock, __flags);
280 }
281
282 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
283 {
284 return ((a->dev_addr == b->dev_addr) &&
285 (a->dev == b->dev)) ? true : false;
286 }
287
288 static bool containing_match(struct dma_debug_entry *a,
289 struct dma_debug_entry *b)
290 {
291 if (a->dev != b->dev)
292 return false;
293
294 if ((b->dev_addr <= a->dev_addr) &&
295 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
296 return true;
297
298 return false;
299 }
300
301 /*
302 * Search a given entry in the hash bucket list
303 */
304 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
305 struct dma_debug_entry *ref,
306 match_fn match)
307 {
308 struct dma_debug_entry *entry, *ret = NULL;
309 int matches = 0, match_lvl, last_lvl = -1;
310
311 list_for_each_entry(entry, &bucket->list, list) {
312 if (!match(ref, entry))
313 continue;
314
315 /*
316 * Some drivers map the same physical address multiple
317 * times. Without a hardware IOMMU this results in the
318 * same device addresses being put into the dma-debug
319 * hash multiple times too. This can result in false
320 * positives being reported. Therefore we implement a
321 * best-fit algorithm here which returns the entry from
322 * the hash which fits best to the reference value
323 * instead of the first-fit.
324 */
325 matches += 1;
326 match_lvl = 0;
327 entry->size == ref->size ? ++match_lvl : 0;
328 entry->type == ref->type ? ++match_lvl : 0;
329 entry->direction == ref->direction ? ++match_lvl : 0;
330 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
331
332 if (match_lvl == 4) {
333 /* perfect-fit - return the result */
334 return entry;
335 } else if (match_lvl > last_lvl) {
336 /*
337 * We found an entry that fits better then the
338 * previous one or it is the 1st match.
339 */
340 last_lvl = match_lvl;
341 ret = entry;
342 }
343 }
344
345 /*
346 * If we have multiple matches but no perfect-fit, just return
347 * NULL.
348 */
349 ret = (matches == 1) ? ret : NULL;
350
351 return ret;
352 }
353
354 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
355 struct dma_debug_entry *ref)
356 {
357 return __hash_bucket_find(bucket, ref, exact_match);
358 }
359
360 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
361 struct dma_debug_entry *ref,
362 unsigned long *flags)
363 {
364
365 unsigned int max_range = dma_get_max_seg_size(ref->dev);
366 struct dma_debug_entry *entry, index = *ref;
367 unsigned int range = 0;
368
369 while (range <= max_range) {
370 entry = __hash_bucket_find(*bucket, ref, containing_match);
371
372 if (entry)
373 return entry;
374
375 /*
376 * Nothing found, go back a hash bucket
377 */
378 put_hash_bucket(*bucket, flags);
379 range += (1 << HASH_FN_SHIFT);
380 index.dev_addr -= (1 << HASH_FN_SHIFT);
381 *bucket = get_hash_bucket(&index, flags);
382 }
383
384 return NULL;
385 }
386
387 /*
388 * Add an entry to a hash bucket
389 */
390 static void hash_bucket_add(struct hash_bucket *bucket,
391 struct dma_debug_entry *entry)
392 {
393 list_add_tail(&entry->list, &bucket->list);
394 }
395
396 /*
397 * Remove entry from a hash bucket list
398 */
399 static void hash_bucket_del(struct dma_debug_entry *entry)
400 {
401 list_del(&entry->list);
402 }
403
404 static unsigned long long phys_addr(struct dma_debug_entry *entry)
405 {
406 if (entry->type == dma_debug_resource)
407 return __pfn_to_phys(entry->pfn) + entry->offset;
408
409 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
410 }
411
412 /*
413 * Dump mapping entries for debugging purposes
414 */
415 void debug_dma_dump_mappings(struct device *dev)
416 {
417 int idx;
418
419 for (idx = 0; idx < HASH_SIZE; idx++) {
420 struct hash_bucket *bucket = &dma_entry_hash[idx];
421 struct dma_debug_entry *entry;
422 unsigned long flags;
423
424 spin_lock_irqsave(&bucket->lock, flags);
425
426 list_for_each_entry(entry, &bucket->list, list) {
427 if (!dev || dev == entry->dev) {
428 dev_info(entry->dev,
429 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
430 type2name[entry->type], idx,
431 phys_addr(entry), entry->pfn,
432 entry->dev_addr, entry->size,
433 dir2name[entry->direction],
434 maperr2str[entry->map_err_type]);
435 }
436 }
437
438 spin_unlock_irqrestore(&bucket->lock, flags);
439 }
440 }
441 EXPORT_SYMBOL(debug_dma_dump_mappings);
442
443 /*
444 * For each mapping (initial cacheline in the case of
445 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
446 * scatterlist, or the cacheline specified in dma_map_single) insert
447 * into this tree using the cacheline as the key. At
448 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
449 * the entry already exists at insertion time add a tag as a reference
450 * count for the overlapping mappings. For now, the overlap tracking
451 * just ensures that 'unmaps' balance 'maps' before marking the
452 * cacheline idle, but we should also be flagging overlaps as an API
453 * violation.
454 *
455 * Memory usage is mostly constrained by the maximum number of available
456 * dma-debug entries in that we need a free dma_debug_entry before
457 * inserting into the tree. In the case of dma_map_page and
458 * dma_alloc_coherent there is only one dma_debug_entry and one
459 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
460 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
461 * entries into the tree.
462 *
463 * At any time debug_dma_assert_idle() can be called to trigger a
464 * warning if any cachelines in the given page are in the active set.
465 */
466 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
467 static DEFINE_SPINLOCK(radix_lock);
468 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
469 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
470 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
471
472 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
473 {
474 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
475 (entry->offset >> L1_CACHE_SHIFT);
476 }
477
478 static int active_cacheline_read_overlap(phys_addr_t cln)
479 {
480 int overlap = 0, i;
481
482 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
483 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
484 overlap |= 1 << i;
485 return overlap;
486 }
487
488 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
489 {
490 int i;
491
492 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
493 return overlap;
494
495 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
496 if (overlap & 1 << i)
497 radix_tree_tag_set(&dma_active_cacheline, cln, i);
498 else
499 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
500
501 return overlap;
502 }
503
504 static void active_cacheline_inc_overlap(phys_addr_t cln)
505 {
506 int overlap = active_cacheline_read_overlap(cln);
507
508 overlap = active_cacheline_set_overlap(cln, ++overlap);
509
510 /* If we overflowed the overlap counter then we're potentially
511 * leaking dma-mappings. Otherwise, if maps and unmaps are
512 * balanced then this overflow may cause false negatives in
513 * debug_dma_assert_idle() as the cacheline may be marked idle
514 * prematurely.
515 */
516 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
517 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
518 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
519 }
520
521 static int active_cacheline_dec_overlap(phys_addr_t cln)
522 {
523 int overlap = active_cacheline_read_overlap(cln);
524
525 return active_cacheline_set_overlap(cln, --overlap);
526 }
527
528 static int active_cacheline_insert(struct dma_debug_entry *entry)
529 {
530 phys_addr_t cln = to_cacheline_number(entry);
531 unsigned long flags;
532 int rc;
533
534 /* If the device is not writing memory then we don't have any
535 * concerns about the cpu consuming stale data. This mitigates
536 * legitimate usages of overlapping mappings.
537 */
538 if (entry->direction == DMA_TO_DEVICE)
539 return 0;
540
541 spin_lock_irqsave(&radix_lock, flags);
542 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
543 if (rc == -EEXIST)
544 active_cacheline_inc_overlap(cln);
545 spin_unlock_irqrestore(&radix_lock, flags);
546
547 return rc;
548 }
549
550 static void active_cacheline_remove(struct dma_debug_entry *entry)
551 {
552 phys_addr_t cln = to_cacheline_number(entry);
553 unsigned long flags;
554
555 /* ...mirror the insert case */
556 if (entry->direction == DMA_TO_DEVICE)
557 return;
558
559 spin_lock_irqsave(&radix_lock, flags);
560 /* since we are counting overlaps the final put of the
561 * cacheline will occur when the overlap count is 0.
562 * active_cacheline_dec_overlap() returns -1 in that case
563 */
564 if (active_cacheline_dec_overlap(cln) < 0)
565 radix_tree_delete(&dma_active_cacheline, cln);
566 spin_unlock_irqrestore(&radix_lock, flags);
567 }
568
569 /**
570 * debug_dma_assert_idle() - assert that a page is not undergoing dma
571 * @page: page to lookup in the dma_active_cacheline tree
572 *
573 * Place a call to this routine in cases where the cpu touching the page
574 * before the dma completes (page is dma_unmapped) will lead to data
575 * corruption.
576 */
577 void debug_dma_assert_idle(struct page *page)
578 {
579 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
580 struct dma_debug_entry *entry = NULL;
581 void **results = (void **) &ents;
582 unsigned int nents, i;
583 unsigned long flags;
584 phys_addr_t cln;
585
586 if (dma_debug_disabled())
587 return;
588
589 if (!page)
590 return;
591
592 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
593 spin_lock_irqsave(&radix_lock, flags);
594 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
595 CACHELINES_PER_PAGE);
596 for (i = 0; i < nents; i++) {
597 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
598
599 if (ent_cln == cln) {
600 entry = ents[i];
601 break;
602 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
603 break;
604 }
605 spin_unlock_irqrestore(&radix_lock, flags);
606
607 if (!entry)
608 return;
609
610 cln = to_cacheline_number(entry);
611 err_printk(entry->dev, entry,
612 "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
613 &cln);
614 }
615
616 /*
617 * Wrapper function for adding an entry to the hash.
618 * This function takes care of locking itself.
619 */
620 static void add_dma_entry(struct dma_debug_entry *entry)
621 {
622 struct hash_bucket *bucket;
623 unsigned long flags;
624 int rc;
625
626 bucket = get_hash_bucket(entry, &flags);
627 hash_bucket_add(bucket, entry);
628 put_hash_bucket(bucket, &flags);
629
630 rc = active_cacheline_insert(entry);
631 if (rc == -ENOMEM) {
632 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
633 global_disable = true;
634 }
635
636 /* TODO: report -EEXIST errors here as overlapping mappings are
637 * not supported by the DMA API
638 */
639 }
640
641 static struct dma_debug_entry *__dma_entry_alloc(void)
642 {
643 struct dma_debug_entry *entry;
644
645 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
646 list_del(&entry->list);
647 memset(entry, 0, sizeof(*entry));
648
649 num_free_entries -= 1;
650 if (num_free_entries < min_free_entries)
651 min_free_entries = num_free_entries;
652
653 return entry;
654 }
655
656 /* struct dma_entry allocator
657 *
658 * The next two functions implement the allocator for
659 * struct dma_debug_entries.
660 */
661 static struct dma_debug_entry *dma_entry_alloc(void)
662 {
663 struct dma_debug_entry *entry;
664 unsigned long flags;
665
666 spin_lock_irqsave(&free_entries_lock, flags);
667
668 if (list_empty(&free_entries)) {
669 global_disable = true;
670 spin_unlock_irqrestore(&free_entries_lock, flags);
671 pr_err("DMA-API: debugging out of memory - disabling\n");
672 return NULL;
673 }
674
675 entry = __dma_entry_alloc();
676
677 spin_unlock_irqrestore(&free_entries_lock, flags);
678
679 #ifdef CONFIG_STACKTRACE
680 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
681 entry->stacktrace.entries = entry->st_entries;
682 entry->stacktrace.skip = 2;
683 save_stack_trace(&entry->stacktrace);
684 #endif
685
686 return entry;
687 }
688
689 static void dma_entry_free(struct dma_debug_entry *entry)
690 {
691 unsigned long flags;
692
693 active_cacheline_remove(entry);
694
695 /*
696 * add to beginning of the list - this way the entries are
697 * more likely cache hot when they are reallocated.
698 */
699 spin_lock_irqsave(&free_entries_lock, flags);
700 list_add(&entry->list, &free_entries);
701 num_free_entries += 1;
702 spin_unlock_irqrestore(&free_entries_lock, flags);
703 }
704
705 int dma_debug_resize_entries(u32 num_entries)
706 {
707 int i, delta, ret = 0;
708 unsigned long flags;
709 struct dma_debug_entry *entry;
710 LIST_HEAD(tmp);
711
712 spin_lock_irqsave(&free_entries_lock, flags);
713
714 if (nr_total_entries < num_entries) {
715 delta = num_entries - nr_total_entries;
716
717 spin_unlock_irqrestore(&free_entries_lock, flags);
718
719 for (i = 0; i < delta; i++) {
720 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
721 if (!entry)
722 break;
723
724 list_add_tail(&entry->list, &tmp);
725 }
726
727 spin_lock_irqsave(&free_entries_lock, flags);
728
729 list_splice(&tmp, &free_entries);
730 nr_total_entries += i;
731 num_free_entries += i;
732 } else {
733 delta = nr_total_entries - num_entries;
734
735 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
736 entry = __dma_entry_alloc();
737 kfree(entry);
738 }
739
740 nr_total_entries -= i;
741 }
742
743 if (nr_total_entries != num_entries)
744 ret = 1;
745
746 spin_unlock_irqrestore(&free_entries_lock, flags);
747
748 return ret;
749 }
750 EXPORT_SYMBOL(dma_debug_resize_entries);
751
752 /*
753 * DMA-API debugging init code
754 *
755 * The init code does two things:
756 * 1. Initialize core data structures
757 * 2. Preallocate a given number of dma_debug_entry structs
758 */
759
760 static int prealloc_memory(u32 num_entries)
761 {
762 struct dma_debug_entry *entry, *next_entry;
763 int i;
764
765 for (i = 0; i < num_entries; ++i) {
766 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
767 if (!entry)
768 goto out_err;
769
770 list_add_tail(&entry->list, &free_entries);
771 }
772
773 num_free_entries = num_entries;
774 min_free_entries = num_entries;
775
776 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
777
778 return 0;
779
780 out_err:
781
782 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
783 list_del(&entry->list);
784 kfree(entry);
785 }
786
787 return -ENOMEM;
788 }
789
790 static ssize_t filter_read(struct file *file, char __user *user_buf,
791 size_t count, loff_t *ppos)
792 {
793 char buf[NAME_MAX_LEN + 1];
794 unsigned long flags;
795 int len;
796
797 if (!current_driver_name[0])
798 return 0;
799
800 /*
801 * We can't copy to userspace directly because current_driver_name can
802 * only be read under the driver_name_lock with irqs disabled. So
803 * create a temporary copy first.
804 */
805 read_lock_irqsave(&driver_name_lock, flags);
806 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
807 read_unlock_irqrestore(&driver_name_lock, flags);
808
809 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
810 }
811
812 static ssize_t filter_write(struct file *file, const char __user *userbuf,
813 size_t count, loff_t *ppos)
814 {
815 char buf[NAME_MAX_LEN];
816 unsigned long flags;
817 size_t len;
818 int i;
819
820 /*
821 * We can't copy from userspace directly. Access to
822 * current_driver_name is protected with a write_lock with irqs
823 * disabled. Since copy_from_user can fault and may sleep we
824 * need to copy to temporary buffer first
825 */
826 len = min(count, (size_t)(NAME_MAX_LEN - 1));
827 if (copy_from_user(buf, userbuf, len))
828 return -EFAULT;
829
830 buf[len] = 0;
831
832 write_lock_irqsave(&driver_name_lock, flags);
833
834 /*
835 * Now handle the string we got from userspace very carefully.
836 * The rules are:
837 * - only use the first token we got
838 * - token delimiter is everything looking like a space
839 * character (' ', '\n', '\t' ...)
840 *
841 */
842 if (!isalnum(buf[0])) {
843 /*
844 * If the first character userspace gave us is not
845 * alphanumerical then assume the filter should be
846 * switched off.
847 */
848 if (current_driver_name[0])
849 pr_info("DMA-API: switching off dma-debug driver filter\n");
850 current_driver_name[0] = 0;
851 current_driver = NULL;
852 goto out_unlock;
853 }
854
855 /*
856 * Now parse out the first token and use it as the name for the
857 * driver to filter for.
858 */
859 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
860 current_driver_name[i] = buf[i];
861 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
862 break;
863 }
864 current_driver_name[i] = 0;
865 current_driver = NULL;
866
867 pr_info("DMA-API: enable driver filter for driver [%s]\n",
868 current_driver_name);
869
870 out_unlock:
871 write_unlock_irqrestore(&driver_name_lock, flags);
872
873 return count;
874 }
875
876 static const struct file_operations filter_fops = {
877 .read = filter_read,
878 .write = filter_write,
879 .llseek = default_llseek,
880 };
881
882 static int dma_debug_fs_init(void)
883 {
884 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
885 if (!dma_debug_dent) {
886 pr_err("DMA-API: can not create debugfs directory\n");
887 return -ENOMEM;
888 }
889
890 global_disable_dent = debugfs_create_bool("disabled", 0444,
891 dma_debug_dent,
892 &global_disable);
893 if (!global_disable_dent)
894 goto out_err;
895
896 error_count_dent = debugfs_create_u32("error_count", 0444,
897 dma_debug_dent, &error_count);
898 if (!error_count_dent)
899 goto out_err;
900
901 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
902 dma_debug_dent,
903 &show_all_errors);
904 if (!show_all_errors_dent)
905 goto out_err;
906
907 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
908 dma_debug_dent,
909 &show_num_errors);
910 if (!show_num_errors_dent)
911 goto out_err;
912
913 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
914 dma_debug_dent,
915 &num_free_entries);
916 if (!num_free_entries_dent)
917 goto out_err;
918
919 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
920 dma_debug_dent,
921 &min_free_entries);
922 if (!min_free_entries_dent)
923 goto out_err;
924
925 filter_dent = debugfs_create_file("driver_filter", 0644,
926 dma_debug_dent, NULL, &filter_fops);
927 if (!filter_dent)
928 goto out_err;
929
930 return 0;
931
932 out_err:
933 debugfs_remove_recursive(dma_debug_dent);
934
935 return -ENOMEM;
936 }
937
938 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
939 {
940 struct dma_debug_entry *entry;
941 unsigned long flags;
942 int count = 0, i;
943
944 local_irq_save(flags);
945
946 for (i = 0; i < HASH_SIZE; ++i) {
947 spin_lock(&dma_entry_hash[i].lock);
948 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
949 if (entry->dev == dev) {
950 count += 1;
951 *out_entry = entry;
952 }
953 }
954 spin_unlock(&dma_entry_hash[i].lock);
955 }
956
957 local_irq_restore(flags);
958
959 return count;
960 }
961
962 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
963 {
964 struct device *dev = data;
965 struct dma_debug_entry *uninitialized_var(entry);
966 int count;
967
968 if (dma_debug_disabled())
969 return 0;
970
971 switch (action) {
972 case BUS_NOTIFY_UNBOUND_DRIVER:
973 count = device_dma_allocations(dev, &entry);
974 if (count == 0)
975 break;
976 err_printk(dev, entry, "DMA-API: device driver has pending "
977 "DMA allocations while released from device "
978 "[count=%d]\n"
979 "One of leaked entries details: "
980 "[device address=0x%016llx] [size=%llu bytes] "
981 "[mapped with %s] [mapped as %s]\n",
982 count, entry->dev_addr, entry->size,
983 dir2name[entry->direction], type2name[entry->type]);
984 break;
985 default:
986 break;
987 }
988
989 return 0;
990 }
991
992 void dma_debug_add_bus(struct bus_type *bus)
993 {
994 struct notifier_block *nb;
995
996 if (dma_debug_disabled())
997 return;
998
999 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1000 if (nb == NULL) {
1001 pr_err("dma_debug_add_bus: out of memory\n");
1002 return;
1003 }
1004
1005 nb->notifier_call = dma_debug_device_change;
1006
1007 bus_register_notifier(bus, nb);
1008 }
1009
1010 /*
1011 * Let the architectures decide how many entries should be preallocated.
1012 */
1013 void dma_debug_init(u32 num_entries)
1014 {
1015 int i;
1016
1017 /* Do not use dma_debug_initialized here, since we really want to be
1018 * called to set dma_debug_initialized
1019 */
1020 if (global_disable)
1021 return;
1022
1023 for (i = 0; i < HASH_SIZE; ++i) {
1024 INIT_LIST_HEAD(&dma_entry_hash[i].list);
1025 spin_lock_init(&dma_entry_hash[i].lock);
1026 }
1027
1028 if (dma_debug_fs_init() != 0) {
1029 pr_err("DMA-API: error creating debugfs entries - disabling\n");
1030 global_disable = true;
1031
1032 return;
1033 }
1034
1035 if (req_entries)
1036 num_entries = req_entries;
1037
1038 if (prealloc_memory(num_entries) != 0) {
1039 pr_err("DMA-API: debugging out of memory error - disabled\n");
1040 global_disable = true;
1041
1042 return;
1043 }
1044
1045 nr_total_entries = num_free_entries;
1046
1047 dma_debug_initialized = true;
1048
1049 pr_info("DMA-API: debugging enabled by kernel config\n");
1050 }
1051
1052 static __init int dma_debug_cmdline(char *str)
1053 {
1054 if (!str)
1055 return -EINVAL;
1056
1057 if (strncmp(str, "off", 3) == 0) {
1058 pr_info("DMA-API: debugging disabled on kernel command line\n");
1059 global_disable = true;
1060 }
1061
1062 return 0;
1063 }
1064
1065 static __init int dma_debug_entries_cmdline(char *str)
1066 {
1067 int res;
1068
1069 if (!str)
1070 return -EINVAL;
1071
1072 res = get_option(&str, &req_entries);
1073
1074 if (!res)
1075 req_entries = 0;
1076
1077 return 0;
1078 }
1079
1080 __setup("dma_debug=", dma_debug_cmdline);
1081 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
1082
1083 static void check_unmap(struct dma_debug_entry *ref)
1084 {
1085 struct dma_debug_entry *entry;
1086 struct hash_bucket *bucket;
1087 unsigned long flags;
1088
1089 bucket = get_hash_bucket(ref, &flags);
1090 entry = bucket_find_exact(bucket, ref);
1091
1092 if (!entry) {
1093 /* must drop lock before calling dma_mapping_error */
1094 put_hash_bucket(bucket, &flags);
1095
1096 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1097 err_printk(ref->dev, NULL,
1098 "DMA-API: device driver tries to free an "
1099 "invalid DMA memory address\n");
1100 } else {
1101 err_printk(ref->dev, NULL,
1102 "DMA-API: device driver tries to free DMA "
1103 "memory it has not allocated [device "
1104 "address=0x%016llx] [size=%llu bytes]\n",
1105 ref->dev_addr, ref->size);
1106 }
1107 return;
1108 }
1109
1110 if (ref->size != entry->size) {
1111 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1112 "DMA memory with different size "
1113 "[device address=0x%016llx] [map size=%llu bytes] "
1114 "[unmap size=%llu bytes]\n",
1115 ref->dev_addr, entry->size, ref->size);
1116 }
1117
1118 if (ref->type != entry->type) {
1119 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1120 "DMA memory with wrong function "
1121 "[device address=0x%016llx] [size=%llu bytes] "
1122 "[mapped as %s] [unmapped as %s]\n",
1123 ref->dev_addr, ref->size,
1124 type2name[entry->type], type2name[ref->type]);
1125 } else if ((entry->type == dma_debug_coherent) &&
1126 (phys_addr(ref) != phys_addr(entry))) {
1127 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1128 "DMA memory with different CPU address "
1129 "[device address=0x%016llx] [size=%llu bytes] "
1130 "[cpu alloc address=0x%016llx] "
1131 "[cpu free address=0x%016llx]",
1132 ref->dev_addr, ref->size,
1133 phys_addr(entry),
1134 phys_addr(ref));
1135 }
1136
1137 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1138 ref->sg_call_ents != entry->sg_call_ents) {
1139 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1140 "DMA sg list with different entry count "
1141 "[map count=%d] [unmap count=%d]\n",
1142 entry->sg_call_ents, ref->sg_call_ents);
1143 }
1144
1145 /*
1146 * This may be no bug in reality - but most implementations of the
1147 * DMA API don't handle this properly, so check for it here
1148 */
1149 if (ref->direction != entry->direction) {
1150 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1151 "DMA memory with different direction "
1152 "[device address=0x%016llx] [size=%llu bytes] "
1153 "[mapped with %s] [unmapped with %s]\n",
1154 ref->dev_addr, ref->size,
1155 dir2name[entry->direction],
1156 dir2name[ref->direction]);
1157 }
1158
1159 /*
1160 * Drivers should use dma_mapping_error() to check the returned
1161 * addresses of dma_map_single() and dma_map_page().
1162 * If not, print this warning message. See Documentation/DMA-API.txt.
1163 */
1164 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1165 err_printk(ref->dev, entry,
1166 "DMA-API: device driver failed to check map error"
1167 "[device address=0x%016llx] [size=%llu bytes] "
1168 "[mapped as %s]",
1169 ref->dev_addr, ref->size,
1170 type2name[entry->type]);
1171 }
1172
1173 hash_bucket_del(entry);
1174 dma_entry_free(entry);
1175
1176 put_hash_bucket(bucket, &flags);
1177 }
1178
1179 static void check_for_stack(struct device *dev,
1180 struct page *page, size_t offset)
1181 {
1182 void *addr;
1183 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1184
1185 if (!stack_vm_area) {
1186 /* Stack is direct-mapped. */
1187 if (PageHighMem(page))
1188 return;
1189 addr = page_address(page) + offset;
1190 if (object_is_on_stack(addr))
1191 err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
1192 } else {
1193 /* Stack is vmalloced. */
1194 int i;
1195
1196 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1197 if (page != stack_vm_area->pages[i])
1198 continue;
1199
1200 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1201 err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
1202 break;
1203 }
1204 }
1205 }
1206
1207 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1208 {
1209 unsigned long a1 = (unsigned long)addr;
1210 unsigned long b1 = a1 + len;
1211 unsigned long a2 = (unsigned long)start;
1212 unsigned long b2 = (unsigned long)end;
1213
1214 return !(b1 <= a2 || a1 >= b2);
1215 }
1216
1217 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1218 {
1219 if (overlap(addr, len, _stext, _etext) ||
1220 overlap(addr, len, __start_rodata, __end_rodata))
1221 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1222 }
1223
1224 static void check_sync(struct device *dev,
1225 struct dma_debug_entry *ref,
1226 bool to_cpu)
1227 {
1228 struct dma_debug_entry *entry;
1229 struct hash_bucket *bucket;
1230 unsigned long flags;
1231
1232 bucket = get_hash_bucket(ref, &flags);
1233
1234 entry = bucket_find_contain(&bucket, ref, &flags);
1235
1236 if (!entry) {
1237 err_printk(dev, NULL, "DMA-API: device driver tries "
1238 "to sync DMA memory it has not allocated "
1239 "[device address=0x%016llx] [size=%llu bytes]\n",
1240 (unsigned long long)ref->dev_addr, ref->size);
1241 goto out;
1242 }
1243
1244 if (ref->size > entry->size) {
1245 err_printk(dev, entry, "DMA-API: device driver syncs"
1246 " DMA memory outside allocated range "
1247 "[device address=0x%016llx] "
1248 "[allocation size=%llu bytes] "
1249 "[sync offset+size=%llu]\n",
1250 entry->dev_addr, entry->size,
1251 ref->size);
1252 }
1253
1254 if (entry->direction == DMA_BIDIRECTIONAL)
1255 goto out;
1256
1257 if (ref->direction != entry->direction) {
1258 err_printk(dev, entry, "DMA-API: device driver syncs "
1259 "DMA memory with different direction "
1260 "[device address=0x%016llx] [size=%llu bytes] "
1261 "[mapped with %s] [synced with %s]\n",
1262 (unsigned long long)ref->dev_addr, entry->size,
1263 dir2name[entry->direction],
1264 dir2name[ref->direction]);
1265 }
1266
1267 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1268 !(ref->direction == DMA_TO_DEVICE))
1269 err_printk(dev, entry, "DMA-API: device driver syncs "
1270 "device read-only DMA memory for cpu "
1271 "[device address=0x%016llx] [size=%llu bytes] "
1272 "[mapped with %s] [synced with %s]\n",
1273 (unsigned long long)ref->dev_addr, entry->size,
1274 dir2name[entry->direction],
1275 dir2name[ref->direction]);
1276
1277 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1278 !(ref->direction == DMA_FROM_DEVICE))
1279 err_printk(dev, entry, "DMA-API: device driver syncs "
1280 "device write-only DMA memory to device "
1281 "[device address=0x%016llx] [size=%llu bytes] "
1282 "[mapped with %s] [synced with %s]\n",
1283 (unsigned long long)ref->dev_addr, entry->size,
1284 dir2name[entry->direction],
1285 dir2name[ref->direction]);
1286
1287 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1288 ref->sg_call_ents != entry->sg_call_ents) {
1289 err_printk(ref->dev, entry, "DMA-API: device driver syncs "
1290 "DMA sg list with different entry count "
1291 "[map count=%d] [sync count=%d]\n",
1292 entry->sg_call_ents, ref->sg_call_ents);
1293 }
1294
1295 out:
1296 put_hash_bucket(bucket, &flags);
1297 }
1298
1299 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1300 size_t size, int direction, dma_addr_t dma_addr,
1301 bool map_single)
1302 {
1303 struct dma_debug_entry *entry;
1304
1305 if (unlikely(dma_debug_disabled()))
1306 return;
1307
1308 if (dma_mapping_error(dev, dma_addr))
1309 return;
1310
1311 entry = dma_entry_alloc();
1312 if (!entry)
1313 return;
1314
1315 entry->dev = dev;
1316 entry->type = dma_debug_page;
1317 entry->pfn = page_to_pfn(page);
1318 entry->offset = offset,
1319 entry->dev_addr = dma_addr;
1320 entry->size = size;
1321 entry->direction = direction;
1322 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1323
1324 if (map_single)
1325 entry->type = dma_debug_single;
1326
1327 check_for_stack(dev, page, offset);
1328
1329 if (!PageHighMem(page)) {
1330 void *addr = page_address(page) + offset;
1331
1332 check_for_illegal_area(dev, addr, size);
1333 }
1334
1335 add_dma_entry(entry);
1336 }
1337 EXPORT_SYMBOL(debug_dma_map_page);
1338
1339 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1340 {
1341 struct dma_debug_entry ref;
1342 struct dma_debug_entry *entry;
1343 struct hash_bucket *bucket;
1344 unsigned long flags;
1345
1346 if (unlikely(dma_debug_disabled()))
1347 return;
1348
1349 ref.dev = dev;
1350 ref.dev_addr = dma_addr;
1351 bucket = get_hash_bucket(&ref, &flags);
1352
1353 list_for_each_entry(entry, &bucket->list, list) {
1354 if (!exact_match(&ref, entry))
1355 continue;
1356
1357 /*
1358 * The same physical address can be mapped multiple
1359 * times. Without a hardware IOMMU this results in the
1360 * same device addresses being put into the dma-debug
1361 * hash multiple times too. This can result in false
1362 * positives being reported. Therefore we implement a
1363 * best-fit algorithm here which updates the first entry
1364 * from the hash which fits the reference value and is
1365 * not currently listed as being checked.
1366 */
1367 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1368 entry->map_err_type = MAP_ERR_CHECKED;
1369 break;
1370 }
1371 }
1372
1373 put_hash_bucket(bucket, &flags);
1374 }
1375 EXPORT_SYMBOL(debug_dma_mapping_error);
1376
1377 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1378 size_t size, int direction, bool map_single)
1379 {
1380 struct dma_debug_entry ref = {
1381 .type = dma_debug_page,
1382 .dev = dev,
1383 .dev_addr = addr,
1384 .size = size,
1385 .direction = direction,
1386 };
1387
1388 if (unlikely(dma_debug_disabled()))
1389 return;
1390
1391 if (map_single)
1392 ref.type = dma_debug_single;
1393
1394 check_unmap(&ref);
1395 }
1396 EXPORT_SYMBOL(debug_dma_unmap_page);
1397
1398 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1399 int nents, int mapped_ents, int direction)
1400 {
1401 struct dma_debug_entry *entry;
1402 struct scatterlist *s;
1403 int i;
1404
1405 if (unlikely(dma_debug_disabled()))
1406 return;
1407
1408 for_each_sg(sg, s, mapped_ents, i) {
1409 entry = dma_entry_alloc();
1410 if (!entry)
1411 return;
1412
1413 entry->type = dma_debug_sg;
1414 entry->dev = dev;
1415 entry->pfn = page_to_pfn(sg_page(s));
1416 entry->offset = s->offset,
1417 entry->size = sg_dma_len(s);
1418 entry->dev_addr = sg_dma_address(s);
1419 entry->direction = direction;
1420 entry->sg_call_ents = nents;
1421 entry->sg_mapped_ents = mapped_ents;
1422
1423 check_for_stack(dev, sg_page(s), s->offset);
1424
1425 if (!PageHighMem(sg_page(s))) {
1426 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1427 }
1428
1429 add_dma_entry(entry);
1430 }
1431 }
1432 EXPORT_SYMBOL(debug_dma_map_sg);
1433
1434 static int get_nr_mapped_entries(struct device *dev,
1435 struct dma_debug_entry *ref)
1436 {
1437 struct dma_debug_entry *entry;
1438 struct hash_bucket *bucket;
1439 unsigned long flags;
1440 int mapped_ents;
1441
1442 bucket = get_hash_bucket(ref, &flags);
1443 entry = bucket_find_exact(bucket, ref);
1444 mapped_ents = 0;
1445
1446 if (entry)
1447 mapped_ents = entry->sg_mapped_ents;
1448 put_hash_bucket(bucket, &flags);
1449
1450 return mapped_ents;
1451 }
1452
1453 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1454 int nelems, int dir)
1455 {
1456 struct scatterlist *s;
1457 int mapped_ents = 0, i;
1458
1459 if (unlikely(dma_debug_disabled()))
1460 return;
1461
1462 for_each_sg(sglist, s, nelems, i) {
1463
1464 struct dma_debug_entry ref = {
1465 .type = dma_debug_sg,
1466 .dev = dev,
1467 .pfn = page_to_pfn(sg_page(s)),
1468 .offset = s->offset,
1469 .dev_addr = sg_dma_address(s),
1470 .size = sg_dma_len(s),
1471 .direction = dir,
1472 .sg_call_ents = nelems,
1473 };
1474
1475 if (mapped_ents && i >= mapped_ents)
1476 break;
1477
1478 if (!i)
1479 mapped_ents = get_nr_mapped_entries(dev, &ref);
1480
1481 check_unmap(&ref);
1482 }
1483 }
1484 EXPORT_SYMBOL(debug_dma_unmap_sg);
1485
1486 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1487 dma_addr_t dma_addr, void *virt)
1488 {
1489 struct dma_debug_entry *entry;
1490
1491 if (unlikely(dma_debug_disabled()))
1492 return;
1493
1494 if (unlikely(virt == NULL))
1495 return;
1496
1497 entry = dma_entry_alloc();
1498 if (!entry)
1499 return;
1500
1501 entry->type = dma_debug_coherent;
1502 entry->dev = dev;
1503 entry->pfn = page_to_pfn(virt_to_page(virt));
1504 entry->offset = (size_t) virt & ~PAGE_MASK;
1505 entry->size = size;
1506 entry->dev_addr = dma_addr;
1507 entry->direction = DMA_BIDIRECTIONAL;
1508
1509 add_dma_entry(entry);
1510 }
1511 EXPORT_SYMBOL(debug_dma_alloc_coherent);
1512
1513 void debug_dma_free_coherent(struct device *dev, size_t size,
1514 void *virt, dma_addr_t addr)
1515 {
1516 struct dma_debug_entry ref = {
1517 .type = dma_debug_coherent,
1518 .dev = dev,
1519 .pfn = page_to_pfn(virt_to_page(virt)),
1520 .offset = (size_t) virt & ~PAGE_MASK,
1521 .dev_addr = addr,
1522 .size = size,
1523 .direction = DMA_BIDIRECTIONAL,
1524 };
1525
1526 if (unlikely(dma_debug_disabled()))
1527 return;
1528
1529 check_unmap(&ref);
1530 }
1531 EXPORT_SYMBOL(debug_dma_free_coherent);
1532
1533 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1534 int direction, dma_addr_t dma_addr)
1535 {
1536 struct dma_debug_entry *entry;
1537
1538 if (unlikely(dma_debug_disabled()))
1539 return;
1540
1541 entry = dma_entry_alloc();
1542 if (!entry)
1543 return;
1544
1545 entry->type = dma_debug_resource;
1546 entry->dev = dev;
1547 entry->pfn = PHYS_PFN(addr);
1548 entry->offset = offset_in_page(addr);
1549 entry->size = size;
1550 entry->dev_addr = dma_addr;
1551 entry->direction = direction;
1552 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1553
1554 add_dma_entry(entry);
1555 }
1556 EXPORT_SYMBOL(debug_dma_map_resource);
1557
1558 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1559 size_t size, int direction)
1560 {
1561 struct dma_debug_entry ref = {
1562 .type = dma_debug_resource,
1563 .dev = dev,
1564 .dev_addr = dma_addr,
1565 .size = size,
1566 .direction = direction,
1567 };
1568
1569 if (unlikely(dma_debug_disabled()))
1570 return;
1571
1572 check_unmap(&ref);
1573 }
1574 EXPORT_SYMBOL(debug_dma_unmap_resource);
1575
1576 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1577 size_t size, int direction)
1578 {
1579 struct dma_debug_entry ref;
1580
1581 if (unlikely(dma_debug_disabled()))
1582 return;
1583
1584 ref.type = dma_debug_single;
1585 ref.dev = dev;
1586 ref.dev_addr = dma_handle;
1587 ref.size = size;
1588 ref.direction = direction;
1589 ref.sg_call_ents = 0;
1590
1591 check_sync(dev, &ref, true);
1592 }
1593 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1594
1595 void debug_dma_sync_single_for_device(struct device *dev,
1596 dma_addr_t dma_handle, size_t size,
1597 int direction)
1598 {
1599 struct dma_debug_entry ref;
1600
1601 if (unlikely(dma_debug_disabled()))
1602 return;
1603
1604 ref.type = dma_debug_single;
1605 ref.dev = dev;
1606 ref.dev_addr = dma_handle;
1607 ref.size = size;
1608 ref.direction = direction;
1609 ref.sg_call_ents = 0;
1610
1611 check_sync(dev, &ref, false);
1612 }
1613 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1614
1615 void debug_dma_sync_single_range_for_cpu(struct device *dev,
1616 dma_addr_t dma_handle,
1617 unsigned long offset, size_t size,
1618 int direction)
1619 {
1620 struct dma_debug_entry ref;
1621
1622 if (unlikely(dma_debug_disabled()))
1623 return;
1624
1625 ref.type = dma_debug_single;
1626 ref.dev = dev;
1627 ref.dev_addr = dma_handle;
1628 ref.size = offset + size;
1629 ref.direction = direction;
1630 ref.sg_call_ents = 0;
1631
1632 check_sync(dev, &ref, true);
1633 }
1634 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1635
1636 void debug_dma_sync_single_range_for_device(struct device *dev,
1637 dma_addr_t dma_handle,
1638 unsigned long offset,
1639 size_t size, int direction)
1640 {
1641 struct dma_debug_entry ref;
1642
1643 if (unlikely(dma_debug_disabled()))
1644 return;
1645
1646 ref.type = dma_debug_single;
1647 ref.dev = dev;
1648 ref.dev_addr = dma_handle;
1649 ref.size = offset + size;
1650 ref.direction = direction;
1651 ref.sg_call_ents = 0;
1652
1653 check_sync(dev, &ref, false);
1654 }
1655 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1656
1657 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1658 int nelems, int direction)
1659 {
1660 struct scatterlist *s;
1661 int mapped_ents = 0, i;
1662
1663 if (unlikely(dma_debug_disabled()))
1664 return;
1665
1666 for_each_sg(sg, s, nelems, i) {
1667
1668 struct dma_debug_entry ref = {
1669 .type = dma_debug_sg,
1670 .dev = dev,
1671 .pfn = page_to_pfn(sg_page(s)),
1672 .offset = s->offset,
1673 .dev_addr = sg_dma_address(s),
1674 .size = sg_dma_len(s),
1675 .direction = direction,
1676 .sg_call_ents = nelems,
1677 };
1678
1679 if (!i)
1680 mapped_ents = get_nr_mapped_entries(dev, &ref);
1681
1682 if (i >= mapped_ents)
1683 break;
1684
1685 check_sync(dev, &ref, true);
1686 }
1687 }
1688 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1689
1690 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1691 int nelems, int direction)
1692 {
1693 struct scatterlist *s;
1694 int mapped_ents = 0, i;
1695
1696 if (unlikely(dma_debug_disabled()))
1697 return;
1698
1699 for_each_sg(sg, s, nelems, i) {
1700
1701 struct dma_debug_entry ref = {
1702 .type = dma_debug_sg,
1703 .dev = dev,
1704 .pfn = page_to_pfn(sg_page(s)),
1705 .offset = s->offset,
1706 .dev_addr = sg_dma_address(s),
1707 .size = sg_dma_len(s),
1708 .direction = direction,
1709 .sg_call_ents = nelems,
1710 };
1711 if (!i)
1712 mapped_ents = get_nr_mapped_entries(dev, &ref);
1713
1714 if (i >= mapped_ents)
1715 break;
1716
1717 check_sync(dev, &ref, false);
1718 }
1719 }
1720 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1721
1722 static int __init dma_debug_driver_setup(char *str)
1723 {
1724 int i;
1725
1726 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1727 current_driver_name[i] = *str;
1728 if (*str == 0)
1729 break;
1730 }
1731
1732 if (current_driver_name[0])
1733 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1734 current_driver_name);
1735
1736
1737 return 1;
1738 }
1739 __setup("dma_debug_driver=", dma_debug_driver_setup);