]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - lib/dma-debug.c
fork: Add generic vmalloced stack support
[mirror_ubuntu-artful-kernel.git] / lib / dma-debug.c
CommitLineData
f2f45e5f
JR
1/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
972aa45c 20#include <linux/scatterlist.h>
2d62ece1 21#include <linux/dma-mapping.h>
6c132d1b 22#include <linux/stacktrace.h>
f2f45e5f 23#include <linux/dma-debug.h>
30dfa90c 24#include <linux/spinlock.h>
788dcfa6 25#include <linux/debugfs.h>
8a6fc708 26#include <linux/uaccess.h>
23a7bfae 27#include <linux/export.h>
2d62ece1 28#include <linux/device.h>
f2f45e5f 29#include <linux/types.h>
2d62ece1 30#include <linux/sched.h>
8a6fc708 31#include <linux/ctype.h>
f2f45e5f 32#include <linux/list.h>
6bf07871 33#include <linux/slab.h>
f2f45e5f 34
2e34bde1
JR
35#include <asm/sections.h>
36
30dfa90c
JR
37#define HASH_SIZE 1024ULL
38#define HASH_FN_SHIFT 13
39#define HASH_FN_MASK (HASH_SIZE - 1)
40
f2f45e5f
JR
41enum {
42 dma_debug_single,
43 dma_debug_page,
44 dma_debug_sg,
45 dma_debug_coherent,
46};
47
6c9c6d63
SK
48enum map_err_types {
49 MAP_ERR_CHECK_NOT_APPLICABLE,
50 MAP_ERR_NOT_CHECKED,
51 MAP_ERR_CHECKED,
52};
53
6c132d1b
DW
54#define DMA_DEBUG_STACKTRACE_ENTRIES 5
55
0abdd7a8
DW
56/**
57 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
58 * @list: node on pre-allocated free_entries list
59 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
60 * @type: single, page, sg, coherent
61 * @pfn: page frame of the start address
62 * @offset: offset of mapping relative to pfn
63 * @size: length of the mapping
64 * @direction: enum dma_data_direction
65 * @sg_call_ents: 'nents' from dma_map_sg
66 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
67 * @map_err_type: track whether dma_mapping_error() was checked
68 * @stacktrace: support backtraces when a violation is detected
69 */
f2f45e5f
JR
70struct dma_debug_entry {
71 struct list_head list;
72 struct device *dev;
73 int type;
0abdd7a8
DW
74 unsigned long pfn;
75 size_t offset;
f2f45e5f
JR
76 u64 dev_addr;
77 u64 size;
78 int direction;
79 int sg_call_ents;
80 int sg_mapped_ents;
6c9c6d63 81 enum map_err_types map_err_type;
6c132d1b
DW
82#ifdef CONFIG_STACKTRACE
83 struct stack_trace stacktrace;
84 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
85#endif
f2f45e5f
JR
86};
87
c6a21d0b
NH
88typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
89
30dfa90c
JR
90struct hash_bucket {
91 struct list_head list;
92 spinlock_t lock;
2d62ece1 93} ____cacheline_aligned_in_smp;
30dfa90c
JR
94
95/* Hash list to save the allocated dma addresses */
96static struct hash_bucket dma_entry_hash[HASH_SIZE];
3b1e79ed
JR
97/* List of pre-allocated dma_debug_entry's */
98static LIST_HEAD(free_entries);
99/* Lock for the list above */
100static DEFINE_SPINLOCK(free_entries_lock);
101
102/* Global disable flag - will be set in case of an error */
621a5f7a 103static bool global_disable __read_mostly;
3b1e79ed 104
2ce8e7ed
FF
105/* Early initialization disable flag, set at the end of dma_debug_init */
106static bool dma_debug_initialized __read_mostly;
107
01ce18b3
FF
108static inline bool dma_debug_disabled(void)
109{
2ce8e7ed 110 return global_disable || !dma_debug_initialized;
01ce18b3
FF
111}
112
788dcfa6
JR
113/* Global error count */
114static u32 error_count;
115
116/* Global error show enable*/
117static u32 show_all_errors __read_mostly;
118/* Number of errors to show */
119static u32 show_num_errors = 1;
120
3b1e79ed
JR
121static u32 num_free_entries;
122static u32 min_free_entries;
e6a1a89d 123static u32 nr_total_entries;
30dfa90c 124
59d3daaf
JR
125/* number of preallocated entries requested by kernel cmdline */
126static u32 req_entries;
127
788dcfa6
JR
128/* debugfs dentry's for the stuff above */
129static struct dentry *dma_debug_dent __read_mostly;
130static struct dentry *global_disable_dent __read_mostly;
131static struct dentry *error_count_dent __read_mostly;
132static struct dentry *show_all_errors_dent __read_mostly;
133static struct dentry *show_num_errors_dent __read_mostly;
134static struct dentry *num_free_entries_dent __read_mostly;
135static struct dentry *min_free_entries_dent __read_mostly;
8a6fc708 136static struct dentry *filter_dent __read_mostly;
788dcfa6 137
2e507d84
JR
138/* per-driver filter related state */
139
140#define NAME_MAX_LEN 64
141
142static char current_driver_name[NAME_MAX_LEN] __read_mostly;
143static struct device_driver *current_driver __read_mostly;
144
145static DEFINE_RWLOCK(driver_name_lock);
788dcfa6 146
6c9c6d63
SK
147static const char *const maperr2str[] = {
148 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
149 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
150 [MAP_ERR_CHECKED] = "dma map error checked",
151};
152
2d62ece1
JR
153static const char *type2name[4] = { "single", "page",
154 "scather-gather", "coherent" };
155
156static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
157 "DMA_FROM_DEVICE", "DMA_NONE" };
158
159/*
160 * The access to some variables in this macro is racy. We can't use atomic_t
161 * here because all these variables are exported to debugfs. Some of them even
162 * writeable. This is also the reason why a lock won't help much. But anyway,
163 * the races are no big deal. Here is why:
164 *
165 * error_count: the addition is racy, but the worst thing that can happen is
166 * that we don't count some errors
167 * show_num_errors: the subtraction is racy. Also no big deal because in
168 * worst case this will result in one warning more in the
169 * system log than the user configured. This variable is
170 * writeable via debugfs.
171 */
6c132d1b
DW
172static inline void dump_entry_trace(struct dma_debug_entry *entry)
173{
174#ifdef CONFIG_STACKTRACE
175 if (entry) {
e7ed70ee 176 pr_warning("Mapped at:\n");
6c132d1b
DW
177 print_stack_trace(&entry->stacktrace, 0);
178 }
179#endif
180}
181
2e507d84
JR
182static bool driver_filter(struct device *dev)
183{
0bf84128
JR
184 struct device_driver *drv;
185 unsigned long flags;
186 bool ret;
187
2e507d84
JR
188 /* driver filter off */
189 if (likely(!current_driver_name[0]))
190 return true;
191
192 /* driver filter on and initialized */
ec9c96ef 193 if (current_driver && dev && dev->driver == current_driver)
2e507d84
JR
194 return true;
195
ec9c96ef
KM
196 /* driver filter on, but we can't filter on a NULL device... */
197 if (!dev)
198 return false;
199
0bf84128
JR
200 if (current_driver || !current_driver_name[0])
201 return false;
2e507d84 202
0bf84128 203 /* driver filter on but not yet initialized */
f3ff9247 204 drv = dev->driver;
0bf84128
JR
205 if (!drv)
206 return false;
207
208 /* lock to protect against change of current_driver_name */
209 read_lock_irqsave(&driver_name_lock, flags);
210
211 ret = false;
212 if (drv->name &&
213 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
214 current_driver = drv;
215 ret = true;
2e507d84
JR
216 }
217
0bf84128 218 read_unlock_irqrestore(&driver_name_lock, flags);
0bf84128
JR
219
220 return ret;
2e507d84
JR
221}
222
ec9c96ef
KM
223#define err_printk(dev, entry, format, arg...) do { \
224 error_count += 1; \
225 if (driver_filter(dev) && \
226 (show_all_errors || show_num_errors > 0)) { \
227 WARN(1, "%s %s: " format, \
228 dev ? dev_driver_string(dev) : "NULL", \
229 dev ? dev_name(dev) : "NULL", ## arg); \
230 dump_entry_trace(entry); \
231 } \
232 if (!show_all_errors && show_num_errors > 0) \
233 show_num_errors -= 1; \
2d62ece1
JR
234 } while (0);
235
30dfa90c
JR
236/*
237 * Hash related functions
238 *
239 * Every DMA-API request is saved into a struct dma_debug_entry. To
240 * have quick access to these structs they are stored into a hash.
241 */
242static int hash_fn(struct dma_debug_entry *entry)
243{
244 /*
245 * Hash function is based on the dma address.
246 * We use bits 20-27 here as the index into the hash
247 */
248 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
249}
250
251/*
252 * Request exclusive access to a hash bucket for a given dma_debug_entry.
253 */
254static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
255 unsigned long *flags)
d5dfc80f 256 __acquires(&dma_entry_hash[idx].lock)
30dfa90c
JR
257{
258 int idx = hash_fn(entry);
259 unsigned long __flags;
260
261 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
262 *flags = __flags;
263 return &dma_entry_hash[idx];
264}
265
266/*
267 * Give up exclusive access to the hash bucket
268 */
269static void put_hash_bucket(struct hash_bucket *bucket,
270 unsigned long *flags)
d5dfc80f 271 __releases(&bucket->lock)
30dfa90c
JR
272{
273 unsigned long __flags = *flags;
274
275 spin_unlock_irqrestore(&bucket->lock, __flags);
276}
277
c6a21d0b
NH
278static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
279{
91ec37cc 280 return ((a->dev_addr == b->dev_addr) &&
c6a21d0b
NH
281 (a->dev == b->dev)) ? true : false;
282}
283
284static bool containing_match(struct dma_debug_entry *a,
285 struct dma_debug_entry *b)
286{
287 if (a->dev != b->dev)
288 return false;
289
290 if ((b->dev_addr <= a->dev_addr) &&
291 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
292 return true;
293
294 return false;
295}
296
30dfa90c
JR
297/*
298 * Search a given entry in the hash bucket list
299 */
c6a21d0b
NH
300static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
301 struct dma_debug_entry *ref,
302 match_fn match)
30dfa90c 303{
7caf6a49 304 struct dma_debug_entry *entry, *ret = NULL;
fe73fbe1 305 int matches = 0, match_lvl, last_lvl = -1;
30dfa90c
JR
306
307 list_for_each_entry(entry, &bucket->list, list) {
c6a21d0b 308 if (!match(ref, entry))
7caf6a49
JR
309 continue;
310
311 /*
312 * Some drivers map the same physical address multiple
313 * times. Without a hardware IOMMU this results in the
314 * same device addresses being put into the dma-debug
315 * hash multiple times too. This can result in false
af901ca1 316 * positives being reported. Therefore we implement a
7caf6a49
JR
317 * best-fit algorithm here which returns the entry from
318 * the hash which fits best to the reference value
319 * instead of the first-fit.
320 */
321 matches += 1;
322 match_lvl = 0;
e5e8c5b9
JR
323 entry->size == ref->size ? ++match_lvl : 0;
324 entry->type == ref->type ? ++match_lvl : 0;
325 entry->direction == ref->direction ? ++match_lvl : 0;
326 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
7caf6a49 327
e5e8c5b9 328 if (match_lvl == 4) {
7caf6a49 329 /* perfect-fit - return the result */
30dfa90c 330 return entry;
7caf6a49
JR
331 } else if (match_lvl > last_lvl) {
332 /*
333 * We found an entry that fits better then the
fe73fbe1 334 * previous one or it is the 1st match.
7caf6a49
JR
335 */
336 last_lvl = match_lvl;
337 ret = entry;
338 }
30dfa90c
JR
339 }
340
7caf6a49
JR
341 /*
342 * If we have multiple matches but no perfect-fit, just return
343 * NULL.
344 */
345 ret = (matches == 1) ? ret : NULL;
346
347 return ret;
30dfa90c
JR
348}
349
c6a21d0b
NH
350static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
351 struct dma_debug_entry *ref)
352{
353 return __hash_bucket_find(bucket, ref, exact_match);
354}
355
356static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
357 struct dma_debug_entry *ref,
358 unsigned long *flags)
359{
360
361 unsigned int max_range = dma_get_max_seg_size(ref->dev);
362 struct dma_debug_entry *entry, index = *ref;
363 unsigned int range = 0;
364
365 while (range <= max_range) {
a7a2c02a 366 entry = __hash_bucket_find(*bucket, ref, containing_match);
c6a21d0b
NH
367
368 if (entry)
369 return entry;
370
371 /*
372 * Nothing found, go back a hash bucket
373 */
374 put_hash_bucket(*bucket, flags);
375 range += (1 << HASH_FN_SHIFT);
376 index.dev_addr -= (1 << HASH_FN_SHIFT);
377 *bucket = get_hash_bucket(&index, flags);
378 }
379
380 return NULL;
381}
382
30dfa90c
JR
383/*
384 * Add an entry to a hash bucket
385 */
386static void hash_bucket_add(struct hash_bucket *bucket,
387 struct dma_debug_entry *entry)
388{
389 list_add_tail(&entry->list, &bucket->list);
390}
391
392/*
393 * Remove entry from a hash bucket list
394 */
395static void hash_bucket_del(struct dma_debug_entry *entry)
396{
397 list_del(&entry->list);
398}
399
0abdd7a8
DW
400static unsigned long long phys_addr(struct dma_debug_entry *entry)
401{
402 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
403}
404
ac26c18b
DW
405/*
406 * Dump mapping entries for debugging purposes
407 */
408void debug_dma_dump_mappings(struct device *dev)
409{
410 int idx;
411
412 for (idx = 0; idx < HASH_SIZE; idx++) {
413 struct hash_bucket *bucket = &dma_entry_hash[idx];
414 struct dma_debug_entry *entry;
415 unsigned long flags;
416
417 spin_lock_irqsave(&bucket->lock, flags);
418
419 list_for_each_entry(entry, &bucket->list, list) {
420 if (!dev || dev == entry->dev) {
421 dev_info(entry->dev,
0abdd7a8 422 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
ac26c18b 423 type2name[entry->type], idx,
0abdd7a8 424 phys_addr(entry), entry->pfn,
ac26c18b 425 entry->dev_addr, entry->size,
6c9c6d63
SK
426 dir2name[entry->direction],
427 maperr2str[entry->map_err_type]);
ac26c18b
DW
428 }
429 }
430
431 spin_unlock_irqrestore(&bucket->lock, flags);
432 }
433}
434EXPORT_SYMBOL(debug_dma_dump_mappings);
435
0abdd7a8 436/*
3b7a6418
DW
437 * For each mapping (initial cacheline in the case of
438 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
439 * scatterlist, or the cacheline specified in dma_map_single) insert
440 * into this tree using the cacheline as the key. At
0abdd7a8 441 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
3b7a6418 442 * the entry already exists at insertion time add a tag as a reference
0abdd7a8 443 * count for the overlapping mappings. For now, the overlap tracking
3b7a6418
DW
444 * just ensures that 'unmaps' balance 'maps' before marking the
445 * cacheline idle, but we should also be flagging overlaps as an API
446 * violation.
0abdd7a8
DW
447 *
448 * Memory usage is mostly constrained by the maximum number of available
449 * dma-debug entries in that we need a free dma_debug_entry before
3b7a6418
DW
450 * inserting into the tree. In the case of dma_map_page and
451 * dma_alloc_coherent there is only one dma_debug_entry and one
452 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
453 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
454 * entries into the tree.
0abdd7a8
DW
455 *
456 * At any time debug_dma_assert_idle() can be called to trigger a
3b7a6418 457 * warning if any cachelines in the given page are in the active set.
0abdd7a8 458 */
3b7a6418 459static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
0abdd7a8 460static DEFINE_SPINLOCK(radix_lock);
3b7a6418
DW
461#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
462#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
463#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
0abdd7a8 464
3b7a6418
DW
465static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
466{
467 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
468 (entry->offset >> L1_CACHE_SHIFT);
469}
470
471static int active_cacheline_read_overlap(phys_addr_t cln)
0abdd7a8
DW
472{
473 int overlap = 0, i;
474
475 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
3b7a6418 476 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
0abdd7a8
DW
477 overlap |= 1 << i;
478 return overlap;
479}
480
3b7a6418 481static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
0abdd7a8
DW
482{
483 int i;
484
3b7a6418 485 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
59f2e7df 486 return overlap;
0abdd7a8
DW
487
488 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
489 if (overlap & 1 << i)
3b7a6418 490 radix_tree_tag_set(&dma_active_cacheline, cln, i);
0abdd7a8 491 else
3b7a6418 492 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
0abdd7a8
DW
493
494 return overlap;
495}
496
3b7a6418 497static void active_cacheline_inc_overlap(phys_addr_t cln)
0abdd7a8 498{
3b7a6418 499 int overlap = active_cacheline_read_overlap(cln);
0abdd7a8 500
3b7a6418 501 overlap = active_cacheline_set_overlap(cln, ++overlap);
0abdd7a8
DW
502
503 /* If we overflowed the overlap counter then we're potentially
504 * leaking dma-mappings. Otherwise, if maps and unmaps are
505 * balanced then this overflow may cause false negatives in
3b7a6418 506 * debug_dma_assert_idle() as the cacheline may be marked idle
0abdd7a8
DW
507 * prematurely.
508 */
3b7a6418
DW
509 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
510 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
511 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
0abdd7a8
DW
512}
513
3b7a6418 514static int active_cacheline_dec_overlap(phys_addr_t cln)
0abdd7a8 515{
3b7a6418 516 int overlap = active_cacheline_read_overlap(cln);
0abdd7a8 517
3b7a6418 518 return active_cacheline_set_overlap(cln, --overlap);
0abdd7a8
DW
519}
520
3b7a6418 521static int active_cacheline_insert(struct dma_debug_entry *entry)
0abdd7a8 522{
3b7a6418 523 phys_addr_t cln = to_cacheline_number(entry);
0abdd7a8
DW
524 unsigned long flags;
525 int rc;
526
3b7a6418
DW
527 /* If the device is not writing memory then we don't have any
528 * concerns about the cpu consuming stale data. This mitigates
529 * legitimate usages of overlapping mappings.
530 */
531 if (entry->direction == DMA_TO_DEVICE)
532 return 0;
533
0abdd7a8 534 spin_lock_irqsave(&radix_lock, flags);
3b7a6418 535 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
0abdd7a8 536 if (rc == -EEXIST)
3b7a6418 537 active_cacheline_inc_overlap(cln);
0abdd7a8
DW
538 spin_unlock_irqrestore(&radix_lock, flags);
539
540 return rc;
541}
542
3b7a6418 543static void active_cacheline_remove(struct dma_debug_entry *entry)
0abdd7a8 544{
3b7a6418 545 phys_addr_t cln = to_cacheline_number(entry);
0abdd7a8
DW
546 unsigned long flags;
547
3b7a6418
DW
548 /* ...mirror the insert case */
549 if (entry->direction == DMA_TO_DEVICE)
550 return;
551
0abdd7a8 552 spin_lock_irqsave(&radix_lock, flags);
59f2e7df 553 /* since we are counting overlaps the final put of the
3b7a6418
DW
554 * cacheline will occur when the overlap count is 0.
555 * active_cacheline_dec_overlap() returns -1 in that case
59f2e7df 556 */
3b7a6418
DW
557 if (active_cacheline_dec_overlap(cln) < 0)
558 radix_tree_delete(&dma_active_cacheline, cln);
0abdd7a8
DW
559 spin_unlock_irqrestore(&radix_lock, flags);
560}
561
562/**
563 * debug_dma_assert_idle() - assert that a page is not undergoing dma
3b7a6418 564 * @page: page to lookup in the dma_active_cacheline tree
0abdd7a8
DW
565 *
566 * Place a call to this routine in cases where the cpu touching the page
567 * before the dma completes (page is dma_unmapped) will lead to data
568 * corruption.
569 */
570void debug_dma_assert_idle(struct page *page)
571{
3b7a6418
DW
572 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
573 struct dma_debug_entry *entry = NULL;
574 void **results = (void **) &ents;
575 unsigned int nents, i;
0abdd7a8 576 unsigned long flags;
3b7a6418 577 phys_addr_t cln;
0abdd7a8 578
c9d120b0
HE
579 if (dma_debug_disabled())
580 return;
581
0abdd7a8
DW
582 if (!page)
583 return;
584
3b7a6418 585 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
0abdd7a8 586 spin_lock_irqsave(&radix_lock, flags);
3b7a6418
DW
587 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
588 CACHELINES_PER_PAGE);
589 for (i = 0; i < nents; i++) {
590 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
591
592 if (ent_cln == cln) {
593 entry = ents[i];
594 break;
595 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
596 break;
597 }
0abdd7a8
DW
598 spin_unlock_irqrestore(&radix_lock, flags);
599
600 if (!entry)
601 return;
602
3b7a6418 603 cln = to_cacheline_number(entry);
0abdd7a8 604 err_printk(entry->dev, entry,
3b7a6418
DW
605 "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
606 &cln);
0abdd7a8
DW
607}
608
30dfa90c
JR
609/*
610 * Wrapper function for adding an entry to the hash.
611 * This function takes care of locking itself.
612 */
613static void add_dma_entry(struct dma_debug_entry *entry)
614{
615 struct hash_bucket *bucket;
616 unsigned long flags;
0abdd7a8 617 int rc;
30dfa90c
JR
618
619 bucket = get_hash_bucket(entry, &flags);
620 hash_bucket_add(bucket, entry);
621 put_hash_bucket(bucket, &flags);
0abdd7a8 622
3b7a6418 623 rc = active_cacheline_insert(entry);
0abdd7a8 624 if (rc == -ENOMEM) {
3b7a6418 625 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
0abdd7a8
DW
626 global_disable = true;
627 }
628
629 /* TODO: report -EEXIST errors here as overlapping mappings are
630 * not supported by the DMA API
631 */
30dfa90c
JR
632}
633
e6a1a89d
FT
634static struct dma_debug_entry *__dma_entry_alloc(void)
635{
636 struct dma_debug_entry *entry;
637
638 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
639 list_del(&entry->list);
640 memset(entry, 0, sizeof(*entry));
641
642 num_free_entries -= 1;
643 if (num_free_entries < min_free_entries)
644 min_free_entries = num_free_entries;
645
646 return entry;
647}
648
3b1e79ed
JR
649/* struct dma_entry allocator
650 *
651 * The next two functions implement the allocator for
652 * struct dma_debug_entries.
653 */
654static struct dma_debug_entry *dma_entry_alloc(void)
655{
29cdd4e4 656 struct dma_debug_entry *entry;
3b1e79ed
JR
657 unsigned long flags;
658
659 spin_lock_irqsave(&free_entries_lock, flags);
660
661 if (list_empty(&free_entries)) {
3b1e79ed 662 global_disable = true;
29cdd4e4 663 spin_unlock_irqrestore(&free_entries_lock, flags);
3017cd63 664 pr_err("DMA-API: debugging out of memory - disabling\n");
29cdd4e4 665 return NULL;
3b1e79ed
JR
666 }
667
e6a1a89d 668 entry = __dma_entry_alloc();
3b1e79ed 669
29cdd4e4
JK
670 spin_unlock_irqrestore(&free_entries_lock, flags);
671
6c132d1b
DW
672#ifdef CONFIG_STACKTRACE
673 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
674 entry->stacktrace.entries = entry->st_entries;
675 entry->stacktrace.skip = 2;
676 save_stack_trace(&entry->stacktrace);
677#endif
3b1e79ed 678
3b1e79ed
JR
679 return entry;
680}
681
682static void dma_entry_free(struct dma_debug_entry *entry)
683{
684 unsigned long flags;
685
3b7a6418 686 active_cacheline_remove(entry);
0abdd7a8 687
3b1e79ed
JR
688 /*
689 * add to beginning of the list - this way the entries are
690 * more likely cache hot when they are reallocated.
691 */
692 spin_lock_irqsave(&free_entries_lock, flags);
693 list_add(&entry->list, &free_entries);
694 num_free_entries += 1;
695 spin_unlock_irqrestore(&free_entries_lock, flags);
696}
697
e6a1a89d
FT
698int dma_debug_resize_entries(u32 num_entries)
699{
700 int i, delta, ret = 0;
701 unsigned long flags;
702 struct dma_debug_entry *entry;
703 LIST_HEAD(tmp);
704
705 spin_lock_irqsave(&free_entries_lock, flags);
706
707 if (nr_total_entries < num_entries) {
708 delta = num_entries - nr_total_entries;
709
710 spin_unlock_irqrestore(&free_entries_lock, flags);
711
712 for (i = 0; i < delta; i++) {
713 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
714 if (!entry)
715 break;
716
717 list_add_tail(&entry->list, &tmp);
718 }
719
720 spin_lock_irqsave(&free_entries_lock, flags);
721
722 list_splice(&tmp, &free_entries);
723 nr_total_entries += i;
724 num_free_entries += i;
725 } else {
726 delta = nr_total_entries - num_entries;
727
728 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
729 entry = __dma_entry_alloc();
730 kfree(entry);
731 }
732
733 nr_total_entries -= i;
734 }
735
736 if (nr_total_entries != num_entries)
737 ret = 1;
738
739 spin_unlock_irqrestore(&free_entries_lock, flags);
740
741 return ret;
742}
743EXPORT_SYMBOL(dma_debug_resize_entries);
744
6bf07871
JR
745/*
746 * DMA-API debugging init code
747 *
748 * The init code does two things:
749 * 1. Initialize core data structures
750 * 2. Preallocate a given number of dma_debug_entry structs
751 */
752
753static int prealloc_memory(u32 num_entries)
754{
755 struct dma_debug_entry *entry, *next_entry;
756 int i;
757
758 for (i = 0; i < num_entries; ++i) {
759 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
760 if (!entry)
761 goto out_err;
762
763 list_add_tail(&entry->list, &free_entries);
764 }
765
766 num_free_entries = num_entries;
767 min_free_entries = num_entries;
768
e7ed70ee 769 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
6bf07871
JR
770
771 return 0;
772
773out_err:
774
775 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
776 list_del(&entry->list);
777 kfree(entry);
778 }
779
780 return -ENOMEM;
781}
782
8a6fc708
JR
783static ssize_t filter_read(struct file *file, char __user *user_buf,
784 size_t count, loff_t *ppos)
785{
8a6fc708 786 char buf[NAME_MAX_LEN + 1];
c17e2cf7 787 unsigned long flags;
8a6fc708
JR
788 int len;
789
790 if (!current_driver_name[0])
791 return 0;
792
793 /*
794 * We can't copy to userspace directly because current_driver_name can
795 * only be read under the driver_name_lock with irqs disabled. So
796 * create a temporary copy first.
797 */
798 read_lock_irqsave(&driver_name_lock, flags);
799 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
800 read_unlock_irqrestore(&driver_name_lock, flags);
801
802 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
803}
804
805static ssize_t filter_write(struct file *file, const char __user *userbuf,
806 size_t count, loff_t *ppos)
807{
8a6fc708 808 char buf[NAME_MAX_LEN];
c17e2cf7
JR
809 unsigned long flags;
810 size_t len;
8a6fc708
JR
811 int i;
812
813 /*
814 * We can't copy from userspace directly. Access to
815 * current_driver_name is protected with a write_lock with irqs
816 * disabled. Since copy_from_user can fault and may sleep we
817 * need to copy to temporary buffer first
818 */
e7ed70ee 819 len = min(count, (size_t)(NAME_MAX_LEN - 1));
8a6fc708
JR
820 if (copy_from_user(buf, userbuf, len))
821 return -EFAULT;
822
823 buf[len] = 0;
824
825 write_lock_irqsave(&driver_name_lock, flags);
826
31232509
JR
827 /*
828 * Now handle the string we got from userspace very carefully.
8a6fc708
JR
829 * The rules are:
830 * - only use the first token we got
831 * - token delimiter is everything looking like a space
832 * character (' ', '\n', '\t' ...)
833 *
834 */
835 if (!isalnum(buf[0])) {
836 /*
31232509 837 * If the first character userspace gave us is not
8a6fc708
JR
838 * alphanumerical then assume the filter should be
839 * switched off.
840 */
841 if (current_driver_name[0])
e7ed70ee 842 pr_info("DMA-API: switching off dma-debug driver filter\n");
8a6fc708
JR
843 current_driver_name[0] = 0;
844 current_driver = NULL;
845 goto out_unlock;
846 }
847
848 /*
849 * Now parse out the first token and use it as the name for the
850 * driver to filter for.
851 */
39a37ce1 852 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
8a6fc708
JR
853 current_driver_name[i] = buf[i];
854 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
855 break;
856 }
857 current_driver_name[i] = 0;
858 current_driver = NULL;
859
e7ed70ee
JR
860 pr_info("DMA-API: enable driver filter for driver [%s]\n",
861 current_driver_name);
8a6fc708
JR
862
863out_unlock:
864 write_unlock_irqrestore(&driver_name_lock, flags);
865
866 return count;
867}
868
aeb583d0 869static const struct file_operations filter_fops = {
8a6fc708
JR
870 .read = filter_read,
871 .write = filter_write,
6038f373 872 .llseek = default_llseek,
8a6fc708
JR
873};
874
788dcfa6
JR
875static int dma_debug_fs_init(void)
876{
877 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
878 if (!dma_debug_dent) {
e7ed70ee 879 pr_err("DMA-API: can not create debugfs directory\n");
788dcfa6
JR
880 return -ENOMEM;
881 }
882
883 global_disable_dent = debugfs_create_bool("disabled", 0444,
884 dma_debug_dent,
68ee6d22 885 &global_disable);
788dcfa6
JR
886 if (!global_disable_dent)
887 goto out_err;
888
889 error_count_dent = debugfs_create_u32("error_count", 0444,
890 dma_debug_dent, &error_count);
891 if (!error_count_dent)
892 goto out_err;
893
894 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
895 dma_debug_dent,
896 &show_all_errors);
897 if (!show_all_errors_dent)
898 goto out_err;
899
900 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
901 dma_debug_dent,
902 &show_num_errors);
903 if (!show_num_errors_dent)
904 goto out_err;
905
906 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
907 dma_debug_dent,
908 &num_free_entries);
909 if (!num_free_entries_dent)
910 goto out_err;
911
912 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
913 dma_debug_dent,
914 &min_free_entries);
915 if (!min_free_entries_dent)
916 goto out_err;
917
8a6fc708
JR
918 filter_dent = debugfs_create_file("driver_filter", 0644,
919 dma_debug_dent, NULL, &filter_fops);
920 if (!filter_dent)
921 goto out_err;
922
788dcfa6
JR
923 return 0;
924
925out_err:
926 debugfs_remove_recursive(dma_debug_dent);
927
928 return -ENOMEM;
929}
930
ba4b87ad 931static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
ed888aef
JR
932{
933 struct dma_debug_entry *entry;
934 unsigned long flags;
935 int count = 0, i;
936
be81c6ea
JR
937 local_irq_save(flags);
938
ed888aef 939 for (i = 0; i < HASH_SIZE; ++i) {
be81c6ea 940 spin_lock(&dma_entry_hash[i].lock);
ed888aef 941 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
ba4b87ad 942 if (entry->dev == dev) {
ed888aef 943 count += 1;
ba4b87ad
SG
944 *out_entry = entry;
945 }
ed888aef 946 }
be81c6ea 947 spin_unlock(&dma_entry_hash[i].lock);
ed888aef
JR
948 }
949
be81c6ea
JR
950 local_irq_restore(flags);
951
ed888aef
JR
952 return count;
953}
954
a8fe9ea2 955static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
ed888aef
JR
956{
957 struct device *dev = data;
ba4b87ad 958 struct dma_debug_entry *uninitialized_var(entry);
ed888aef
JR
959 int count;
960
01ce18b3 961 if (dma_debug_disabled())
a8fe9ea2 962 return 0;
ed888aef
JR
963
964 switch (action) {
965 case BUS_NOTIFY_UNBOUND_DRIVER:
ba4b87ad 966 count = device_dma_allocations(dev, &entry);
ed888aef
JR
967 if (count == 0)
968 break;
ba4b87ad 969 err_printk(dev, entry, "DMA-API: device driver has pending "
ed888aef 970 "DMA allocations while released from device "
ba4b87ad
SG
971 "[count=%d]\n"
972 "One of leaked entries details: "
973 "[device address=0x%016llx] [size=%llu bytes] "
974 "[mapped with %s] [mapped as %s]\n",
975 count, entry->dev_addr, entry->size,
976 dir2name[entry->direction], type2name[entry->type]);
ed888aef
JR
977 break;
978 default:
979 break;
980 }
981
982 return 0;
983}
984
41531c8f
JR
985void dma_debug_add_bus(struct bus_type *bus)
986{
ed888aef
JR
987 struct notifier_block *nb;
988
01ce18b3 989 if (dma_debug_disabled())
f797d988
SR
990 return;
991
ed888aef
JR
992 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
993 if (nb == NULL) {
e7ed70ee 994 pr_err("dma_debug_add_bus: out of memory\n");
ed888aef
JR
995 return;
996 }
997
998 nb->notifier_call = dma_debug_device_change;
999
1000 bus_register_notifier(bus, nb);
41531c8f 1001}
788dcfa6 1002
6bf07871
JR
1003/*
1004 * Let the architectures decide how many entries should be preallocated.
1005 */
1006void dma_debug_init(u32 num_entries)
1007{
1008 int i;
1009
2ce8e7ed
FF
1010 /* Do not use dma_debug_initialized here, since we really want to be
1011 * called to set dma_debug_initialized
1012 */
1013 if (global_disable)
6bf07871
JR
1014 return;
1015
1016 for (i = 0; i < HASH_SIZE; ++i) {
1017 INIT_LIST_HEAD(&dma_entry_hash[i].list);
b0a5b83e 1018 spin_lock_init(&dma_entry_hash[i].lock);
6bf07871
JR
1019 }
1020
788dcfa6 1021 if (dma_debug_fs_init() != 0) {
e7ed70ee 1022 pr_err("DMA-API: error creating debugfs entries - disabling\n");
788dcfa6
JR
1023 global_disable = true;
1024
1025 return;
1026 }
1027
59d3daaf
JR
1028 if (req_entries)
1029 num_entries = req_entries;
1030
6bf07871 1031 if (prealloc_memory(num_entries) != 0) {
e7ed70ee 1032 pr_err("DMA-API: debugging out of memory error - disabled\n");
6bf07871
JR
1033 global_disable = true;
1034
1035 return;
1036 }
1037
e6a1a89d
FT
1038 nr_total_entries = num_free_entries;
1039
2ce8e7ed
FF
1040 dma_debug_initialized = true;
1041
e7ed70ee 1042 pr_info("DMA-API: debugging enabled by kernel config\n");
6bf07871
JR
1043}
1044
59d3daaf
JR
1045static __init int dma_debug_cmdline(char *str)
1046{
1047 if (!str)
1048 return -EINVAL;
1049
1050 if (strncmp(str, "off", 3) == 0) {
e7ed70ee 1051 pr_info("DMA-API: debugging disabled on kernel command line\n");
59d3daaf
JR
1052 global_disable = true;
1053 }
1054
1055 return 0;
1056}
1057
1058static __init int dma_debug_entries_cmdline(char *str)
1059{
1060 int res;
1061
1062 if (!str)
1063 return -EINVAL;
1064
1065 res = get_option(&str, &req_entries);
1066
1067 if (!res)
1068 req_entries = 0;
1069
1070 return 0;
1071}
1072
1073__setup("dma_debug=", dma_debug_cmdline);
1074__setup("dma_debug_entries=", dma_debug_entries_cmdline);
1075
2d62ece1
JR
1076static void check_unmap(struct dma_debug_entry *ref)
1077{
1078 struct dma_debug_entry *entry;
1079 struct hash_bucket *bucket;
1080 unsigned long flags;
1081
2d62ece1 1082 bucket = get_hash_bucket(ref, &flags);
c6a21d0b 1083 entry = bucket_find_exact(bucket, ref);
2d62ece1
JR
1084
1085 if (!entry) {
8d640a51
AD
1086 /* must drop lock before calling dma_mapping_error */
1087 put_hash_bucket(bucket, &flags);
1088
bfe0fb0f
SK
1089 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1090 err_printk(ref->dev, NULL,
8d640a51
AD
1091 "DMA-API: device driver tries to free an "
1092 "invalid DMA memory address\n");
1093 } else {
1094 err_printk(ref->dev, NULL,
1095 "DMA-API: device driver tries to free DMA "
1096 "memory it has not allocated [device "
1097 "address=0x%016llx] [size=%llu bytes]\n",
1098 ref->dev_addr, ref->size);
bfe0fb0f 1099 }
8d640a51 1100 return;
2d62ece1
JR
1101 }
1102
1103 if (ref->size != entry->size) {
6c132d1b 1104 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
1105 "DMA memory with different size "
1106 "[device address=0x%016llx] [map size=%llu bytes] "
1107 "[unmap size=%llu bytes]\n",
1108 ref->dev_addr, entry->size, ref->size);
1109 }
1110
1111 if (ref->type != entry->type) {
6c132d1b 1112 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
1113 "DMA memory with wrong function "
1114 "[device address=0x%016llx] [size=%llu bytes] "
1115 "[mapped as %s] [unmapped as %s]\n",
1116 ref->dev_addr, ref->size,
1117 type2name[entry->type], type2name[ref->type]);
1118 } else if ((entry->type == dma_debug_coherent) &&
0abdd7a8 1119 (phys_addr(ref) != phys_addr(entry))) {
6c132d1b 1120 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
1121 "DMA memory with different CPU address "
1122 "[device address=0x%016llx] [size=%llu bytes] "
59a40e70
JR
1123 "[cpu alloc address=0x%016llx] "
1124 "[cpu free address=0x%016llx]",
2d62ece1 1125 ref->dev_addr, ref->size,
0abdd7a8
DW
1126 phys_addr(entry),
1127 phys_addr(ref));
2d62ece1
JR
1128 }
1129
1130 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1131 ref->sg_call_ents != entry->sg_call_ents) {
6c132d1b 1132 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
1133 "DMA sg list with different entry count "
1134 "[map count=%d] [unmap count=%d]\n",
1135 entry->sg_call_ents, ref->sg_call_ents);
1136 }
1137
1138 /*
1139 * This may be no bug in reality - but most implementations of the
1140 * DMA API don't handle this properly, so check for it here
1141 */
1142 if (ref->direction != entry->direction) {
6c132d1b 1143 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
1144 "DMA memory with different direction "
1145 "[device address=0x%016llx] [size=%llu bytes] "
1146 "[mapped with %s] [unmapped with %s]\n",
1147 ref->dev_addr, ref->size,
1148 dir2name[entry->direction],
1149 dir2name[ref->direction]);
1150 }
1151
6c9c6d63
SK
1152 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1153 err_printk(ref->dev, entry,
1154 "DMA-API: device driver failed to check map error"
1155 "[device address=0x%016llx] [size=%llu bytes] "
1156 "[mapped as %s]",
1157 ref->dev_addr, ref->size,
1158 type2name[entry->type]);
1159 }
1160
2d62ece1
JR
1161 hash_bucket_del(entry);
1162 dma_entry_free(entry);
1163
2d62ece1
JR
1164 put_hash_bucket(bucket, &flags);
1165}
1166
1167static void check_for_stack(struct device *dev, void *addr)
1168{
1169 if (object_is_on_stack(addr))
f9134be4 1170 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
6c132d1b 1171 "stack [addr=%p]\n", addr);
2d62ece1
JR
1172}
1173
f39d1b97 1174static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
2e34bde1 1175{
f39d1b97
IM
1176 unsigned long a1 = (unsigned long)addr;
1177 unsigned long b1 = a1 + len;
1178 unsigned long a2 = (unsigned long)start;
1179 unsigned long b2 = (unsigned long)end;
2e34bde1 1180
f39d1b97 1181 return !(b1 <= a2 || a1 >= b2);
2e34bde1
JR
1182}
1183
f39d1b97 1184static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
2e34bde1 1185{
ea535e41 1186 if (overlap(addr, len, _stext, _etext) ||
f39d1b97
IM
1187 overlap(addr, len, __start_rodata, __end_rodata))
1188 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
2e34bde1
JR
1189}
1190
aa010efb
JR
1191static void check_sync(struct device *dev,
1192 struct dma_debug_entry *ref,
1193 bool to_cpu)
2d62ece1 1194{
2d62ece1
JR
1195 struct dma_debug_entry *entry;
1196 struct hash_bucket *bucket;
1197 unsigned long flags;
1198
aa010efb 1199 bucket = get_hash_bucket(ref, &flags);
2d62ece1 1200
c6a21d0b 1201 entry = bucket_find_contain(&bucket, ref, &flags);
2d62ece1
JR
1202
1203 if (!entry) {
6c132d1b 1204 err_printk(dev, NULL, "DMA-API: device driver tries "
2d62ece1
JR
1205 "to sync DMA memory it has not allocated "
1206 "[device address=0x%016llx] [size=%llu bytes]\n",
aa010efb 1207 (unsigned long long)ref->dev_addr, ref->size);
2d62ece1
JR
1208 goto out;
1209 }
1210
aa010efb 1211 if (ref->size > entry->size) {
6c132d1b 1212 err_printk(dev, entry, "DMA-API: device driver syncs"
2d62ece1
JR
1213 " DMA memory outside allocated range "
1214 "[device address=0x%016llx] "
aa010efb
JR
1215 "[allocation size=%llu bytes] "
1216 "[sync offset+size=%llu]\n",
1217 entry->dev_addr, entry->size,
1218 ref->size);
2d62ece1
JR
1219 }
1220
42d53b4f
KH
1221 if (entry->direction == DMA_BIDIRECTIONAL)
1222 goto out;
1223
aa010efb 1224 if (ref->direction != entry->direction) {
6c132d1b 1225 err_printk(dev, entry, "DMA-API: device driver syncs "
2d62ece1
JR
1226 "DMA memory with different direction "
1227 "[device address=0x%016llx] [size=%llu bytes] "
1228 "[mapped with %s] [synced with %s]\n",
aa010efb 1229 (unsigned long long)ref->dev_addr, entry->size,
2d62ece1 1230 dir2name[entry->direction],
aa010efb 1231 dir2name[ref->direction]);
2d62ece1
JR
1232 }
1233
2d62ece1 1234 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
aa010efb 1235 !(ref->direction == DMA_TO_DEVICE))
6c132d1b 1236 err_printk(dev, entry, "DMA-API: device driver syncs "
2d62ece1
JR
1237 "device read-only DMA memory for cpu "
1238 "[device address=0x%016llx] [size=%llu bytes] "
1239 "[mapped with %s] [synced with %s]\n",
aa010efb 1240 (unsigned long long)ref->dev_addr, entry->size,
2d62ece1 1241 dir2name[entry->direction],
aa010efb 1242 dir2name[ref->direction]);
2d62ece1
JR
1243
1244 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
aa010efb 1245 !(ref->direction == DMA_FROM_DEVICE))
6c132d1b 1246 err_printk(dev, entry, "DMA-API: device driver syncs "
2d62ece1
JR
1247 "device write-only DMA memory to device "
1248 "[device address=0x%016llx] [size=%llu bytes] "
1249 "[mapped with %s] [synced with %s]\n",
aa010efb 1250 (unsigned long long)ref->dev_addr, entry->size,
2d62ece1 1251 dir2name[entry->direction],
aa010efb 1252 dir2name[ref->direction]);
2d62ece1 1253
7f830642
RM
1254 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1255 ref->sg_call_ents != entry->sg_call_ents) {
1256 err_printk(ref->dev, entry, "DMA-API: device driver syncs "
1257 "DMA sg list with different entry count "
1258 "[map count=%d] [sync count=%d]\n",
1259 entry->sg_call_ents, ref->sg_call_ents);
1260 }
1261
2d62ece1
JR
1262out:
1263 put_hash_bucket(bucket, &flags);
2d62ece1
JR
1264}
1265
f62bc980
JR
1266void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1267 size_t size, int direction, dma_addr_t dma_addr,
1268 bool map_single)
1269{
1270 struct dma_debug_entry *entry;
1271
01ce18b3 1272 if (unlikely(dma_debug_disabled()))
f62bc980
JR
1273 return;
1274
bfe0fb0f 1275 if (dma_mapping_error(dev, dma_addr))
f62bc980
JR
1276 return;
1277
1278 entry = dma_entry_alloc();
1279 if (!entry)
1280 return;
1281
1282 entry->dev = dev;
1283 entry->type = dma_debug_page;
0abdd7a8
DW
1284 entry->pfn = page_to_pfn(page);
1285 entry->offset = offset,
f62bc980
JR
1286 entry->dev_addr = dma_addr;
1287 entry->size = size;
1288 entry->direction = direction;
6c9c6d63 1289 entry->map_err_type = MAP_ERR_NOT_CHECKED;
f62bc980 1290
9537a48e 1291 if (map_single)
f62bc980 1292 entry->type = dma_debug_single;
9537a48e
JR
1293
1294 if (!PageHighMem(page)) {
f39d1b97
IM
1295 void *addr = page_address(page) + offset;
1296
2e34bde1
JR
1297 check_for_stack(dev, addr);
1298 check_for_illegal_area(dev, addr, size);
f62bc980
JR
1299 }
1300
1301 add_dma_entry(entry);
1302}
1303EXPORT_SYMBOL(debug_dma_map_page);
1304
6c9c6d63
SK
1305void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1306{
1307 struct dma_debug_entry ref;
1308 struct dma_debug_entry *entry;
1309 struct hash_bucket *bucket;
1310 unsigned long flags;
1311
01ce18b3 1312 if (unlikely(dma_debug_disabled()))
6c9c6d63
SK
1313 return;
1314
1315 ref.dev = dev;
1316 ref.dev_addr = dma_addr;
1317 bucket = get_hash_bucket(&ref, &flags);
6c9c6d63 1318
96e7d7a1
AD
1319 list_for_each_entry(entry, &bucket->list, list) {
1320 if (!exact_match(&ref, entry))
1321 continue;
1322
1323 /*
1324 * The same physical address can be mapped multiple
1325 * times. Without a hardware IOMMU this results in the
1326 * same device addresses being put into the dma-debug
1327 * hash multiple times too. This can result in false
1328 * positives being reported. Therefore we implement a
1329 * best-fit algorithm here which updates the first entry
1330 * from the hash which fits the reference value and is
1331 * not currently listed as being checked.
1332 */
1333 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1334 entry->map_err_type = MAP_ERR_CHECKED;
1335 break;
1336 }
1337 }
6c9c6d63 1338
6c9c6d63
SK
1339 put_hash_bucket(bucket, &flags);
1340}
1341EXPORT_SYMBOL(debug_dma_mapping_error);
1342
f62bc980
JR
1343void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1344 size_t size, int direction, bool map_single)
1345{
1346 struct dma_debug_entry ref = {
1347 .type = dma_debug_page,
1348 .dev = dev,
1349 .dev_addr = addr,
1350 .size = size,
1351 .direction = direction,
1352 };
1353
01ce18b3 1354 if (unlikely(dma_debug_disabled()))
f62bc980
JR
1355 return;
1356
1357 if (map_single)
1358 ref.type = dma_debug_single;
1359
1360 check_unmap(&ref);
1361}
1362EXPORT_SYMBOL(debug_dma_unmap_page);
1363
972aa45c
JR
1364void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1365 int nents, int mapped_ents, int direction)
1366{
1367 struct dma_debug_entry *entry;
1368 struct scatterlist *s;
1369 int i;
1370
01ce18b3 1371 if (unlikely(dma_debug_disabled()))
972aa45c
JR
1372 return;
1373
1374 for_each_sg(sg, s, mapped_ents, i) {
1375 entry = dma_entry_alloc();
1376 if (!entry)
1377 return;
1378
1379 entry->type = dma_debug_sg;
1380 entry->dev = dev;
0abdd7a8
DW
1381 entry->pfn = page_to_pfn(sg_page(s));
1382 entry->offset = s->offset,
884d0597 1383 entry->size = sg_dma_len(s);
15aedea4 1384 entry->dev_addr = sg_dma_address(s);
972aa45c
JR
1385 entry->direction = direction;
1386 entry->sg_call_ents = nents;
1387 entry->sg_mapped_ents = mapped_ents;
1388
9537a48e
JR
1389 if (!PageHighMem(sg_page(s))) {
1390 check_for_stack(dev, sg_virt(s));
884d0597 1391 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
9537a48e 1392 }
972aa45c
JR
1393
1394 add_dma_entry(entry);
1395 }
1396}
1397EXPORT_SYMBOL(debug_dma_map_sg);
1398
aa010efb
JR
1399static int get_nr_mapped_entries(struct device *dev,
1400 struct dma_debug_entry *ref)
88f3907f 1401{
aa010efb 1402 struct dma_debug_entry *entry;
88f3907f
FT
1403 struct hash_bucket *bucket;
1404 unsigned long flags;
c17e2cf7 1405 int mapped_ents;
88f3907f 1406
aa010efb 1407 bucket = get_hash_bucket(ref, &flags);
c6a21d0b 1408 entry = bucket_find_exact(bucket, ref);
c17e2cf7 1409 mapped_ents = 0;
88f3907f 1410
88f3907f
FT
1411 if (entry)
1412 mapped_ents = entry->sg_mapped_ents;
1413 put_hash_bucket(bucket, &flags);
1414
1415 return mapped_ents;
1416}
1417
972aa45c
JR
1418void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1419 int nelems, int dir)
1420{
972aa45c
JR
1421 struct scatterlist *s;
1422 int mapped_ents = 0, i;
972aa45c 1423
01ce18b3 1424 if (unlikely(dma_debug_disabled()))
972aa45c
JR
1425 return;
1426
1427 for_each_sg(sglist, s, nelems, i) {
1428
1429 struct dma_debug_entry ref = {
1430 .type = dma_debug_sg,
1431 .dev = dev,
0abdd7a8
DW
1432 .pfn = page_to_pfn(sg_page(s)),
1433 .offset = s->offset,
15aedea4 1434 .dev_addr = sg_dma_address(s),
884d0597 1435 .size = sg_dma_len(s),
972aa45c 1436 .direction = dir,
e5e8c5b9 1437 .sg_call_ents = nelems,
972aa45c
JR
1438 };
1439
1440 if (mapped_ents && i >= mapped_ents)
1441 break;
1442
e5e8c5b9 1443 if (!i)
aa010efb 1444 mapped_ents = get_nr_mapped_entries(dev, &ref);
972aa45c
JR
1445
1446 check_unmap(&ref);
1447 }
1448}
1449EXPORT_SYMBOL(debug_dma_unmap_sg);
1450
6bfd4498
JR
1451void debug_dma_alloc_coherent(struct device *dev, size_t size,
1452 dma_addr_t dma_addr, void *virt)
1453{
1454 struct dma_debug_entry *entry;
1455
01ce18b3 1456 if (unlikely(dma_debug_disabled()))
6bfd4498
JR
1457 return;
1458
1459 if (unlikely(virt == NULL))
1460 return;
1461
1462 entry = dma_entry_alloc();
1463 if (!entry)
1464 return;
1465
1466 entry->type = dma_debug_coherent;
1467 entry->dev = dev;
0abdd7a8 1468 entry->pfn = page_to_pfn(virt_to_page(virt));
0354aec1 1469 entry->offset = (size_t) virt & ~PAGE_MASK;
6bfd4498
JR
1470 entry->size = size;
1471 entry->dev_addr = dma_addr;
1472 entry->direction = DMA_BIDIRECTIONAL;
1473
1474 add_dma_entry(entry);
1475}
1476EXPORT_SYMBOL(debug_dma_alloc_coherent);
1477
1478void debug_dma_free_coherent(struct device *dev, size_t size,
1479 void *virt, dma_addr_t addr)
1480{
1481 struct dma_debug_entry ref = {
1482 .type = dma_debug_coherent,
1483 .dev = dev,
0abdd7a8 1484 .pfn = page_to_pfn(virt_to_page(virt)),
0354aec1 1485 .offset = (size_t) virt & ~PAGE_MASK,
6bfd4498
JR
1486 .dev_addr = addr,
1487 .size = size,
1488 .direction = DMA_BIDIRECTIONAL,
1489 };
1490
01ce18b3 1491 if (unlikely(dma_debug_disabled()))
6bfd4498
JR
1492 return;
1493
1494 check_unmap(&ref);
1495}
1496EXPORT_SYMBOL(debug_dma_free_coherent);
1497
b9d2317e
JR
1498void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1499 size_t size, int direction)
1500{
aa010efb
JR
1501 struct dma_debug_entry ref;
1502
01ce18b3 1503 if (unlikely(dma_debug_disabled()))
b9d2317e
JR
1504 return;
1505
aa010efb
JR
1506 ref.type = dma_debug_single;
1507 ref.dev = dev;
1508 ref.dev_addr = dma_handle;
1509 ref.size = size;
1510 ref.direction = direction;
1511 ref.sg_call_ents = 0;
1512
1513 check_sync(dev, &ref, true);
b9d2317e
JR
1514}
1515EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1516
1517void debug_dma_sync_single_for_device(struct device *dev,
1518 dma_addr_t dma_handle, size_t size,
1519 int direction)
1520{
aa010efb
JR
1521 struct dma_debug_entry ref;
1522
01ce18b3 1523 if (unlikely(dma_debug_disabled()))
b9d2317e
JR
1524 return;
1525
aa010efb
JR
1526 ref.type = dma_debug_single;
1527 ref.dev = dev;
1528 ref.dev_addr = dma_handle;
1529 ref.size = size;
1530 ref.direction = direction;
1531 ref.sg_call_ents = 0;
1532
1533 check_sync(dev, &ref, false);
b9d2317e
JR
1534}
1535EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1536
948408ba
JR
1537void debug_dma_sync_single_range_for_cpu(struct device *dev,
1538 dma_addr_t dma_handle,
1539 unsigned long offset, size_t size,
1540 int direction)
1541{
aa010efb
JR
1542 struct dma_debug_entry ref;
1543
01ce18b3 1544 if (unlikely(dma_debug_disabled()))
948408ba
JR
1545 return;
1546
aa010efb
JR
1547 ref.type = dma_debug_single;
1548 ref.dev = dev;
1549 ref.dev_addr = dma_handle;
1550 ref.size = offset + size;
1551 ref.direction = direction;
1552 ref.sg_call_ents = 0;
1553
1554 check_sync(dev, &ref, true);
948408ba
JR
1555}
1556EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1557
1558void debug_dma_sync_single_range_for_device(struct device *dev,
1559 dma_addr_t dma_handle,
1560 unsigned long offset,
1561 size_t size, int direction)
1562{
aa010efb
JR
1563 struct dma_debug_entry ref;
1564
01ce18b3 1565 if (unlikely(dma_debug_disabled()))
948408ba
JR
1566 return;
1567
aa010efb
JR
1568 ref.type = dma_debug_single;
1569 ref.dev = dev;
1570 ref.dev_addr = dma_handle;
1571 ref.size = offset + size;
1572 ref.direction = direction;
1573 ref.sg_call_ents = 0;
1574
1575 check_sync(dev, &ref, false);
948408ba
JR
1576}
1577EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1578
a31fba5d
JR
1579void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1580 int nelems, int direction)
1581{
1582 struct scatterlist *s;
88f3907f 1583 int mapped_ents = 0, i;
a31fba5d 1584
01ce18b3 1585 if (unlikely(dma_debug_disabled()))
a31fba5d
JR
1586 return;
1587
1588 for_each_sg(sg, s, nelems, i) {
aa010efb
JR
1589
1590 struct dma_debug_entry ref = {
1591 .type = dma_debug_sg,
1592 .dev = dev,
0abdd7a8
DW
1593 .pfn = page_to_pfn(sg_page(s)),
1594 .offset = s->offset,
aa010efb
JR
1595 .dev_addr = sg_dma_address(s),
1596 .size = sg_dma_len(s),
1597 .direction = direction,
1598 .sg_call_ents = nelems,
1599 };
1600
88f3907f 1601 if (!i)
aa010efb 1602 mapped_ents = get_nr_mapped_entries(dev, &ref);
88f3907f
FT
1603
1604 if (i >= mapped_ents)
1605 break;
1606
aa010efb 1607 check_sync(dev, &ref, true);
a31fba5d
JR
1608 }
1609}
1610EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1611
1612void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1613 int nelems, int direction)
1614{
1615 struct scatterlist *s;
88f3907f 1616 int mapped_ents = 0, i;
a31fba5d 1617
01ce18b3 1618 if (unlikely(dma_debug_disabled()))
a31fba5d
JR
1619 return;
1620
1621 for_each_sg(sg, s, nelems, i) {
aa010efb
JR
1622
1623 struct dma_debug_entry ref = {
1624 .type = dma_debug_sg,
1625 .dev = dev,
0abdd7a8
DW
1626 .pfn = page_to_pfn(sg_page(s)),
1627 .offset = s->offset,
aa010efb
JR
1628 .dev_addr = sg_dma_address(s),
1629 .size = sg_dma_len(s),
1630 .direction = direction,
1631 .sg_call_ents = nelems,
1632 };
88f3907f 1633 if (!i)
aa010efb 1634 mapped_ents = get_nr_mapped_entries(dev, &ref);
88f3907f
FT
1635
1636 if (i >= mapped_ents)
1637 break;
1638
aa010efb 1639 check_sync(dev, &ref, false);
a31fba5d
JR
1640 }
1641}
1642EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1643
1745de5e
JR
1644static int __init dma_debug_driver_setup(char *str)
1645{
1646 int i;
1647
1648 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1649 current_driver_name[i] = *str;
1650 if (*str == 0)
1651 break;
1652 }
1653
1654 if (current_driver_name[0])
e7ed70ee
JR
1655 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1656 current_driver_name);
1745de5e
JR
1657
1658
1659 return 1;
1660}
1661__setup("dma_debug_driver=", dma_debug_driver_setup);