]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
20 | #include <linux/scatterlist.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/stacktrace.h> | |
23 | #include <linux/dma-debug.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/debugfs.h> | |
26 | #include <linux/uaccess.h> | |
27 | #include <linux/export.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/types.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/ctype.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/slab.h> | |
34 | ||
35 | #include <asm/sections.h> | |
36 | ||
37 | #define HASH_SIZE 1024ULL | |
38 | #define HASH_FN_SHIFT 13 | |
39 | #define HASH_FN_MASK (HASH_SIZE - 1) | |
40 | ||
41 | enum { | |
42 | dma_debug_single, | |
43 | dma_debug_page, | |
44 | dma_debug_sg, | |
45 | dma_debug_coherent, | |
46 | }; | |
47 | ||
48 | enum map_err_types { | |
49 | MAP_ERR_CHECK_NOT_APPLICABLE, | |
50 | MAP_ERR_NOT_CHECKED, | |
51 | MAP_ERR_CHECKED, | |
52 | }; | |
53 | ||
54 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 | |
55 | ||
56 | /** | |
57 | * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping | |
58 | * @list: node on pre-allocated free_entries list | |
59 | * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent | |
60 | * @type: single, page, sg, coherent | |
61 | * @pfn: page frame of the start address | |
62 | * @offset: offset of mapping relative to pfn | |
63 | * @size: length of the mapping | |
64 | * @direction: enum dma_data_direction | |
65 | * @sg_call_ents: 'nents' from dma_map_sg | |
66 | * @sg_mapped_ents: 'mapped_ents' from dma_map_sg | |
67 | * @map_err_type: track whether dma_mapping_error() was checked | |
68 | * @stacktrace: support backtraces when a violation is detected | |
69 | */ | |
70 | struct dma_debug_entry { | |
71 | struct list_head list; | |
72 | struct device *dev; | |
73 | int type; | |
74 | unsigned long pfn; | |
75 | size_t offset; | |
76 | u64 dev_addr; | |
77 | u64 size; | |
78 | int direction; | |
79 | int sg_call_ents; | |
80 | int sg_mapped_ents; | |
81 | enum map_err_types map_err_type; | |
82 | #ifdef CONFIG_STACKTRACE | |
83 | struct stack_trace stacktrace; | |
84 | unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; | |
85 | #endif | |
86 | }; | |
87 | ||
88 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); | |
89 | ||
90 | struct hash_bucket { | |
91 | struct list_head list; | |
92 | spinlock_t lock; | |
93 | } ____cacheline_aligned_in_smp; | |
94 | ||
95 | /* Hash list to save the allocated dma addresses */ | |
96 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; | |
97 | /* List of pre-allocated dma_debug_entry's */ | |
98 | static LIST_HEAD(free_entries); | |
99 | /* Lock for the list above */ | |
100 | static DEFINE_SPINLOCK(free_entries_lock); | |
101 | ||
102 | /* Global disable flag - will be set in case of an error */ | |
103 | static bool global_disable __read_mostly; | |
104 | ||
105 | /* Early initialization disable flag, set at the end of dma_debug_init */ | |
106 | static bool dma_debug_initialized __read_mostly; | |
107 | ||
108 | static inline bool dma_debug_disabled(void) | |
109 | { | |
110 | return global_disable || !dma_debug_initialized; | |
111 | } | |
112 | ||
113 | /* Global error count */ | |
114 | static u32 error_count; | |
115 | ||
116 | /* Global error show enable*/ | |
117 | static u32 show_all_errors __read_mostly; | |
118 | /* Number of errors to show */ | |
119 | static u32 show_num_errors = 1; | |
120 | ||
121 | static u32 num_free_entries; | |
122 | static u32 min_free_entries; | |
123 | static u32 nr_total_entries; | |
124 | ||
125 | /* number of preallocated entries requested by kernel cmdline */ | |
126 | static u32 req_entries; | |
127 | ||
128 | /* debugfs dentry's for the stuff above */ | |
129 | static struct dentry *dma_debug_dent __read_mostly; | |
130 | static struct dentry *global_disable_dent __read_mostly; | |
131 | static struct dentry *error_count_dent __read_mostly; | |
132 | static struct dentry *show_all_errors_dent __read_mostly; | |
133 | static struct dentry *show_num_errors_dent __read_mostly; | |
134 | static struct dentry *num_free_entries_dent __read_mostly; | |
135 | static struct dentry *min_free_entries_dent __read_mostly; | |
136 | static struct dentry *filter_dent __read_mostly; | |
137 | ||
138 | /* per-driver filter related state */ | |
139 | ||
140 | #define NAME_MAX_LEN 64 | |
141 | ||
142 | static char current_driver_name[NAME_MAX_LEN] __read_mostly; | |
143 | static struct device_driver *current_driver __read_mostly; | |
144 | ||
145 | static DEFINE_RWLOCK(driver_name_lock); | |
146 | ||
147 | static const char *const maperr2str[] = { | |
148 | [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", | |
149 | [MAP_ERR_NOT_CHECKED] = "dma map error not checked", | |
150 | [MAP_ERR_CHECKED] = "dma map error checked", | |
151 | }; | |
152 | ||
153 | static const char *type2name[4] = { "single", "page", | |
154 | "scather-gather", "coherent" }; | |
155 | ||
156 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | |
157 | "DMA_FROM_DEVICE", "DMA_NONE" }; | |
158 | ||
159 | /* | |
160 | * The access to some variables in this macro is racy. We can't use atomic_t | |
161 | * here because all these variables are exported to debugfs. Some of them even | |
162 | * writeable. This is also the reason why a lock won't help much. But anyway, | |
163 | * the races are no big deal. Here is why: | |
164 | * | |
165 | * error_count: the addition is racy, but the worst thing that can happen is | |
166 | * that we don't count some errors | |
167 | * show_num_errors: the subtraction is racy. Also no big deal because in | |
168 | * worst case this will result in one warning more in the | |
169 | * system log than the user configured. This variable is | |
170 | * writeable via debugfs. | |
171 | */ | |
172 | static inline void dump_entry_trace(struct dma_debug_entry *entry) | |
173 | { | |
174 | #ifdef CONFIG_STACKTRACE | |
175 | if (entry) { | |
176 | pr_warning("Mapped at:\n"); | |
177 | print_stack_trace(&entry->stacktrace, 0); | |
178 | } | |
179 | #endif | |
180 | } | |
181 | ||
182 | static bool driver_filter(struct device *dev) | |
183 | { | |
184 | struct device_driver *drv; | |
185 | unsigned long flags; | |
186 | bool ret; | |
187 | ||
188 | /* driver filter off */ | |
189 | if (likely(!current_driver_name[0])) | |
190 | return true; | |
191 | ||
192 | /* driver filter on and initialized */ | |
193 | if (current_driver && dev && dev->driver == current_driver) | |
194 | return true; | |
195 | ||
196 | /* driver filter on, but we can't filter on a NULL device... */ | |
197 | if (!dev) | |
198 | return false; | |
199 | ||
200 | if (current_driver || !current_driver_name[0]) | |
201 | return false; | |
202 | ||
203 | /* driver filter on but not yet initialized */ | |
204 | drv = dev->driver; | |
205 | if (!drv) | |
206 | return false; | |
207 | ||
208 | /* lock to protect against change of current_driver_name */ | |
209 | read_lock_irqsave(&driver_name_lock, flags); | |
210 | ||
211 | ret = false; | |
212 | if (drv->name && | |
213 | strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { | |
214 | current_driver = drv; | |
215 | ret = true; | |
216 | } | |
217 | ||
218 | read_unlock_irqrestore(&driver_name_lock, flags); | |
219 | ||
220 | return ret; | |
221 | } | |
222 | ||
223 | #define err_printk(dev, entry, format, arg...) do { \ | |
224 | error_count += 1; \ | |
225 | if (driver_filter(dev) && \ | |
226 | (show_all_errors || show_num_errors > 0)) { \ | |
227 | WARN(1, "%s %s: " format, \ | |
228 | dev ? dev_driver_string(dev) : "NULL", \ | |
229 | dev ? dev_name(dev) : "NULL", ## arg); \ | |
230 | dump_entry_trace(entry); \ | |
231 | } \ | |
232 | if (!show_all_errors && show_num_errors > 0) \ | |
233 | show_num_errors -= 1; \ | |
234 | } while (0); | |
235 | ||
236 | /* | |
237 | * Hash related functions | |
238 | * | |
239 | * Every DMA-API request is saved into a struct dma_debug_entry. To | |
240 | * have quick access to these structs they are stored into a hash. | |
241 | */ | |
242 | static int hash_fn(struct dma_debug_entry *entry) | |
243 | { | |
244 | /* | |
245 | * Hash function is based on the dma address. | |
246 | * We use bits 20-27 here as the index into the hash | |
247 | */ | |
248 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Request exclusive access to a hash bucket for a given dma_debug_entry. | |
253 | */ | |
254 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, | |
255 | unsigned long *flags) | |
256 | { | |
257 | int idx = hash_fn(entry); | |
258 | unsigned long __flags; | |
259 | ||
260 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); | |
261 | *flags = __flags; | |
262 | return &dma_entry_hash[idx]; | |
263 | } | |
264 | ||
265 | /* | |
266 | * Give up exclusive access to the hash bucket | |
267 | */ | |
268 | static void put_hash_bucket(struct hash_bucket *bucket, | |
269 | unsigned long *flags) | |
270 | { | |
271 | unsigned long __flags = *flags; | |
272 | ||
273 | spin_unlock_irqrestore(&bucket->lock, __flags); | |
274 | } | |
275 | ||
276 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) | |
277 | { | |
278 | return ((a->dev_addr == b->dev_addr) && | |
279 | (a->dev == b->dev)) ? true : false; | |
280 | } | |
281 | ||
282 | static bool containing_match(struct dma_debug_entry *a, | |
283 | struct dma_debug_entry *b) | |
284 | { | |
285 | if (a->dev != b->dev) | |
286 | return false; | |
287 | ||
288 | if ((b->dev_addr <= a->dev_addr) && | |
289 | ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) | |
290 | return true; | |
291 | ||
292 | return false; | |
293 | } | |
294 | ||
295 | /* | |
296 | * Search a given entry in the hash bucket list | |
297 | */ | |
298 | static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, | |
299 | struct dma_debug_entry *ref, | |
300 | match_fn match) | |
301 | { | |
302 | struct dma_debug_entry *entry, *ret = NULL; | |
303 | int matches = 0, match_lvl, last_lvl = -1; | |
304 | ||
305 | list_for_each_entry(entry, &bucket->list, list) { | |
306 | if (!match(ref, entry)) | |
307 | continue; | |
308 | ||
309 | /* | |
310 | * Some drivers map the same physical address multiple | |
311 | * times. Without a hardware IOMMU this results in the | |
312 | * same device addresses being put into the dma-debug | |
313 | * hash multiple times too. This can result in false | |
314 | * positives being reported. Therefore we implement a | |
315 | * best-fit algorithm here which returns the entry from | |
316 | * the hash which fits best to the reference value | |
317 | * instead of the first-fit. | |
318 | */ | |
319 | matches += 1; | |
320 | match_lvl = 0; | |
321 | entry->size == ref->size ? ++match_lvl : 0; | |
322 | entry->type == ref->type ? ++match_lvl : 0; | |
323 | entry->direction == ref->direction ? ++match_lvl : 0; | |
324 | entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; | |
325 | ||
326 | if (match_lvl == 4) { | |
327 | /* perfect-fit - return the result */ | |
328 | return entry; | |
329 | } else if (match_lvl > last_lvl) { | |
330 | /* | |
331 | * We found an entry that fits better then the | |
332 | * previous one or it is the 1st match. | |
333 | */ | |
334 | last_lvl = match_lvl; | |
335 | ret = entry; | |
336 | } | |
337 | } | |
338 | ||
339 | /* | |
340 | * If we have multiple matches but no perfect-fit, just return | |
341 | * NULL. | |
342 | */ | |
343 | ret = (matches == 1) ? ret : NULL; | |
344 | ||
345 | return ret; | |
346 | } | |
347 | ||
348 | static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, | |
349 | struct dma_debug_entry *ref) | |
350 | { | |
351 | return __hash_bucket_find(bucket, ref, exact_match); | |
352 | } | |
353 | ||
354 | static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, | |
355 | struct dma_debug_entry *ref, | |
356 | unsigned long *flags) | |
357 | { | |
358 | ||
359 | unsigned int max_range = dma_get_max_seg_size(ref->dev); | |
360 | struct dma_debug_entry *entry, index = *ref; | |
361 | unsigned int range = 0; | |
362 | ||
363 | while (range <= max_range) { | |
364 | entry = __hash_bucket_find(*bucket, ref, containing_match); | |
365 | ||
366 | if (entry) | |
367 | return entry; | |
368 | ||
369 | /* | |
370 | * Nothing found, go back a hash bucket | |
371 | */ | |
372 | put_hash_bucket(*bucket, flags); | |
373 | range += (1 << HASH_FN_SHIFT); | |
374 | index.dev_addr -= (1 << HASH_FN_SHIFT); | |
375 | *bucket = get_hash_bucket(&index, flags); | |
376 | } | |
377 | ||
378 | return NULL; | |
379 | } | |
380 | ||
381 | /* | |
382 | * Add an entry to a hash bucket | |
383 | */ | |
384 | static void hash_bucket_add(struct hash_bucket *bucket, | |
385 | struct dma_debug_entry *entry) | |
386 | { | |
387 | list_add_tail(&entry->list, &bucket->list); | |
388 | } | |
389 | ||
390 | /* | |
391 | * Remove entry from a hash bucket list | |
392 | */ | |
393 | static void hash_bucket_del(struct dma_debug_entry *entry) | |
394 | { | |
395 | list_del(&entry->list); | |
396 | } | |
397 | ||
398 | static unsigned long long phys_addr(struct dma_debug_entry *entry) | |
399 | { | |
400 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; | |
401 | } | |
402 | ||
403 | /* | |
404 | * Dump mapping entries for debugging purposes | |
405 | */ | |
406 | void debug_dma_dump_mappings(struct device *dev) | |
407 | { | |
408 | int idx; | |
409 | ||
410 | for (idx = 0; idx < HASH_SIZE; idx++) { | |
411 | struct hash_bucket *bucket = &dma_entry_hash[idx]; | |
412 | struct dma_debug_entry *entry; | |
413 | unsigned long flags; | |
414 | ||
415 | spin_lock_irqsave(&bucket->lock, flags); | |
416 | ||
417 | list_for_each_entry(entry, &bucket->list, list) { | |
418 | if (!dev || dev == entry->dev) { | |
419 | dev_info(entry->dev, | |
420 | "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", | |
421 | type2name[entry->type], idx, | |
422 | phys_addr(entry), entry->pfn, | |
423 | entry->dev_addr, entry->size, | |
424 | dir2name[entry->direction], | |
425 | maperr2str[entry->map_err_type]); | |
426 | } | |
427 | } | |
428 | ||
429 | spin_unlock_irqrestore(&bucket->lock, flags); | |
430 | } | |
431 | } | |
432 | EXPORT_SYMBOL(debug_dma_dump_mappings); | |
433 | ||
434 | /* | |
435 | * For each mapping (initial cacheline in the case of | |
436 | * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a | |
437 | * scatterlist, or the cacheline specified in dma_map_single) insert | |
438 | * into this tree using the cacheline as the key. At | |
439 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If | |
440 | * the entry already exists at insertion time add a tag as a reference | |
441 | * count for the overlapping mappings. For now, the overlap tracking | |
442 | * just ensures that 'unmaps' balance 'maps' before marking the | |
443 | * cacheline idle, but we should also be flagging overlaps as an API | |
444 | * violation. | |
445 | * | |
446 | * Memory usage is mostly constrained by the maximum number of available | |
447 | * dma-debug entries in that we need a free dma_debug_entry before | |
448 | * inserting into the tree. In the case of dma_map_page and | |
449 | * dma_alloc_coherent there is only one dma_debug_entry and one | |
450 | * dma_active_cacheline entry to track per event. dma_map_sg(), on the | |
451 | * other hand, consumes a single dma_debug_entry, but inserts 'nents' | |
452 | * entries into the tree. | |
453 | * | |
454 | * At any time debug_dma_assert_idle() can be called to trigger a | |
455 | * warning if any cachelines in the given page are in the active set. | |
456 | */ | |
457 | static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); | |
458 | static DEFINE_SPINLOCK(radix_lock); | |
459 | #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) | |
460 | #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) | |
461 | #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) | |
462 | ||
463 | static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) | |
464 | { | |
465 | return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + | |
466 | (entry->offset >> L1_CACHE_SHIFT); | |
467 | } | |
468 | ||
469 | static int active_cacheline_read_overlap(phys_addr_t cln) | |
470 | { | |
471 | int overlap = 0, i; | |
472 | ||
473 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | |
474 | if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) | |
475 | overlap |= 1 << i; | |
476 | return overlap; | |
477 | } | |
478 | ||
479 | static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) | |
480 | { | |
481 | int i; | |
482 | ||
483 | if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) | |
484 | return overlap; | |
485 | ||
486 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | |
487 | if (overlap & 1 << i) | |
488 | radix_tree_tag_set(&dma_active_cacheline, cln, i); | |
489 | else | |
490 | radix_tree_tag_clear(&dma_active_cacheline, cln, i); | |
491 | ||
492 | return overlap; | |
493 | } | |
494 | ||
495 | static void active_cacheline_inc_overlap(phys_addr_t cln) | |
496 | { | |
497 | int overlap = active_cacheline_read_overlap(cln); | |
498 | ||
499 | overlap = active_cacheline_set_overlap(cln, ++overlap); | |
500 | ||
501 | /* If we overflowed the overlap counter then we're potentially | |
502 | * leaking dma-mappings. Otherwise, if maps and unmaps are | |
503 | * balanced then this overflow may cause false negatives in | |
504 | * debug_dma_assert_idle() as the cacheline may be marked idle | |
505 | * prematurely. | |
506 | */ | |
507 | WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, | |
508 | "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", | |
509 | ACTIVE_CACHELINE_MAX_OVERLAP, &cln); | |
510 | } | |
511 | ||
512 | static int active_cacheline_dec_overlap(phys_addr_t cln) | |
513 | { | |
514 | int overlap = active_cacheline_read_overlap(cln); | |
515 | ||
516 | return active_cacheline_set_overlap(cln, --overlap); | |
517 | } | |
518 | ||
519 | static int active_cacheline_insert(struct dma_debug_entry *entry) | |
520 | { | |
521 | phys_addr_t cln = to_cacheline_number(entry); | |
522 | unsigned long flags; | |
523 | int rc; | |
524 | ||
525 | /* If the device is not writing memory then we don't have any | |
526 | * concerns about the cpu consuming stale data. This mitigates | |
527 | * legitimate usages of overlapping mappings. | |
528 | */ | |
529 | if (entry->direction == DMA_TO_DEVICE) | |
530 | return 0; | |
531 | ||
532 | spin_lock_irqsave(&radix_lock, flags); | |
533 | rc = radix_tree_insert(&dma_active_cacheline, cln, entry); | |
534 | if (rc == -EEXIST) | |
535 | active_cacheline_inc_overlap(cln); | |
536 | spin_unlock_irqrestore(&radix_lock, flags); | |
537 | ||
538 | return rc; | |
539 | } | |
540 | ||
541 | static void active_cacheline_remove(struct dma_debug_entry *entry) | |
542 | { | |
543 | phys_addr_t cln = to_cacheline_number(entry); | |
544 | unsigned long flags; | |
545 | ||
546 | /* ...mirror the insert case */ | |
547 | if (entry->direction == DMA_TO_DEVICE) | |
548 | return; | |
549 | ||
550 | spin_lock_irqsave(&radix_lock, flags); | |
551 | /* since we are counting overlaps the final put of the | |
552 | * cacheline will occur when the overlap count is 0. | |
553 | * active_cacheline_dec_overlap() returns -1 in that case | |
554 | */ | |
555 | if (active_cacheline_dec_overlap(cln) < 0) | |
556 | radix_tree_delete(&dma_active_cacheline, cln); | |
557 | spin_unlock_irqrestore(&radix_lock, flags); | |
558 | } | |
559 | ||
560 | /** | |
561 | * debug_dma_assert_idle() - assert that a page is not undergoing dma | |
562 | * @page: page to lookup in the dma_active_cacheline tree | |
563 | * | |
564 | * Place a call to this routine in cases where the cpu touching the page | |
565 | * before the dma completes (page is dma_unmapped) will lead to data | |
566 | * corruption. | |
567 | */ | |
568 | void debug_dma_assert_idle(struct page *page) | |
569 | { | |
570 | static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; | |
571 | struct dma_debug_entry *entry = NULL; | |
572 | void **results = (void **) &ents; | |
573 | unsigned int nents, i; | |
574 | unsigned long flags; | |
575 | phys_addr_t cln; | |
576 | ||
577 | if (dma_debug_disabled()) | |
578 | return; | |
579 | ||
580 | if (!page) | |
581 | return; | |
582 | ||
583 | cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; | |
584 | spin_lock_irqsave(&radix_lock, flags); | |
585 | nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, | |
586 | CACHELINES_PER_PAGE); | |
587 | for (i = 0; i < nents; i++) { | |
588 | phys_addr_t ent_cln = to_cacheline_number(ents[i]); | |
589 | ||
590 | if (ent_cln == cln) { | |
591 | entry = ents[i]; | |
592 | break; | |
593 | } else if (ent_cln >= cln + CACHELINES_PER_PAGE) | |
594 | break; | |
595 | } | |
596 | spin_unlock_irqrestore(&radix_lock, flags); | |
597 | ||
598 | if (!entry) | |
599 | return; | |
600 | ||
601 | cln = to_cacheline_number(entry); | |
602 | err_printk(entry->dev, entry, | |
603 | "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", | |
604 | &cln); | |
605 | } | |
606 | ||
607 | /* | |
608 | * Wrapper function for adding an entry to the hash. | |
609 | * This function takes care of locking itself. | |
610 | */ | |
611 | static void add_dma_entry(struct dma_debug_entry *entry) | |
612 | { | |
613 | struct hash_bucket *bucket; | |
614 | unsigned long flags; | |
615 | int rc; | |
616 | ||
617 | bucket = get_hash_bucket(entry, &flags); | |
618 | hash_bucket_add(bucket, entry); | |
619 | put_hash_bucket(bucket, &flags); | |
620 | ||
621 | rc = active_cacheline_insert(entry); | |
622 | if (rc == -ENOMEM) { | |
623 | pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); | |
624 | global_disable = true; | |
625 | } | |
626 | ||
627 | /* TODO: report -EEXIST errors here as overlapping mappings are | |
628 | * not supported by the DMA API | |
629 | */ | |
630 | } | |
631 | ||
632 | static struct dma_debug_entry *__dma_entry_alloc(void) | |
633 | { | |
634 | struct dma_debug_entry *entry; | |
635 | ||
636 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | |
637 | list_del(&entry->list); | |
638 | memset(entry, 0, sizeof(*entry)); | |
639 | ||
640 | num_free_entries -= 1; | |
641 | if (num_free_entries < min_free_entries) | |
642 | min_free_entries = num_free_entries; | |
643 | ||
644 | return entry; | |
645 | } | |
646 | ||
647 | /* struct dma_entry allocator | |
648 | * | |
649 | * The next two functions implement the allocator for | |
650 | * struct dma_debug_entries. | |
651 | */ | |
652 | static struct dma_debug_entry *dma_entry_alloc(void) | |
653 | { | |
654 | struct dma_debug_entry *entry; | |
655 | unsigned long flags; | |
656 | ||
657 | spin_lock_irqsave(&free_entries_lock, flags); | |
658 | ||
659 | if (list_empty(&free_entries)) { | |
660 | global_disable = true; | |
661 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
662 | pr_err("DMA-API: debugging out of memory - disabling\n"); | |
663 | return NULL; | |
664 | } | |
665 | ||
666 | entry = __dma_entry_alloc(); | |
667 | ||
668 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
669 | ||
670 | #ifdef CONFIG_STACKTRACE | |
671 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; | |
672 | entry->stacktrace.entries = entry->st_entries; | |
673 | entry->stacktrace.skip = 2; | |
674 | save_stack_trace(&entry->stacktrace); | |
675 | #endif | |
676 | ||
677 | return entry; | |
678 | } | |
679 | ||
680 | static void dma_entry_free(struct dma_debug_entry *entry) | |
681 | { | |
682 | unsigned long flags; | |
683 | ||
684 | active_cacheline_remove(entry); | |
685 | ||
686 | /* | |
687 | * add to beginning of the list - this way the entries are | |
688 | * more likely cache hot when they are reallocated. | |
689 | */ | |
690 | spin_lock_irqsave(&free_entries_lock, flags); | |
691 | list_add(&entry->list, &free_entries); | |
692 | num_free_entries += 1; | |
693 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
694 | } | |
695 | ||
696 | int dma_debug_resize_entries(u32 num_entries) | |
697 | { | |
698 | int i, delta, ret = 0; | |
699 | unsigned long flags; | |
700 | struct dma_debug_entry *entry; | |
701 | LIST_HEAD(tmp); | |
702 | ||
703 | spin_lock_irqsave(&free_entries_lock, flags); | |
704 | ||
705 | if (nr_total_entries < num_entries) { | |
706 | delta = num_entries - nr_total_entries; | |
707 | ||
708 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
709 | ||
710 | for (i = 0; i < delta; i++) { | |
711 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
712 | if (!entry) | |
713 | break; | |
714 | ||
715 | list_add_tail(&entry->list, &tmp); | |
716 | } | |
717 | ||
718 | spin_lock_irqsave(&free_entries_lock, flags); | |
719 | ||
720 | list_splice(&tmp, &free_entries); | |
721 | nr_total_entries += i; | |
722 | num_free_entries += i; | |
723 | } else { | |
724 | delta = nr_total_entries - num_entries; | |
725 | ||
726 | for (i = 0; i < delta && !list_empty(&free_entries); i++) { | |
727 | entry = __dma_entry_alloc(); | |
728 | kfree(entry); | |
729 | } | |
730 | ||
731 | nr_total_entries -= i; | |
732 | } | |
733 | ||
734 | if (nr_total_entries != num_entries) | |
735 | ret = 1; | |
736 | ||
737 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
738 | ||
739 | return ret; | |
740 | } | |
741 | EXPORT_SYMBOL(dma_debug_resize_entries); | |
742 | ||
743 | /* | |
744 | * DMA-API debugging init code | |
745 | * | |
746 | * The init code does two things: | |
747 | * 1. Initialize core data structures | |
748 | * 2. Preallocate a given number of dma_debug_entry structs | |
749 | */ | |
750 | ||
751 | static int prealloc_memory(u32 num_entries) | |
752 | { | |
753 | struct dma_debug_entry *entry, *next_entry; | |
754 | int i; | |
755 | ||
756 | for (i = 0; i < num_entries; ++i) { | |
757 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
758 | if (!entry) | |
759 | goto out_err; | |
760 | ||
761 | list_add_tail(&entry->list, &free_entries); | |
762 | } | |
763 | ||
764 | num_free_entries = num_entries; | |
765 | min_free_entries = num_entries; | |
766 | ||
767 | pr_info("DMA-API: preallocated %d debug entries\n", num_entries); | |
768 | ||
769 | return 0; | |
770 | ||
771 | out_err: | |
772 | ||
773 | list_for_each_entry_safe(entry, next_entry, &free_entries, list) { | |
774 | list_del(&entry->list); | |
775 | kfree(entry); | |
776 | } | |
777 | ||
778 | return -ENOMEM; | |
779 | } | |
780 | ||
781 | static ssize_t filter_read(struct file *file, char __user *user_buf, | |
782 | size_t count, loff_t *ppos) | |
783 | { | |
784 | char buf[NAME_MAX_LEN + 1]; | |
785 | unsigned long flags; | |
786 | int len; | |
787 | ||
788 | if (!current_driver_name[0]) | |
789 | return 0; | |
790 | ||
791 | /* | |
792 | * We can't copy to userspace directly because current_driver_name can | |
793 | * only be read under the driver_name_lock with irqs disabled. So | |
794 | * create a temporary copy first. | |
795 | */ | |
796 | read_lock_irqsave(&driver_name_lock, flags); | |
797 | len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); | |
798 | read_unlock_irqrestore(&driver_name_lock, flags); | |
799 | ||
800 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | |
801 | } | |
802 | ||
803 | static ssize_t filter_write(struct file *file, const char __user *userbuf, | |
804 | size_t count, loff_t *ppos) | |
805 | { | |
806 | char buf[NAME_MAX_LEN]; | |
807 | unsigned long flags; | |
808 | size_t len; | |
809 | int i; | |
810 | ||
811 | /* | |
812 | * We can't copy from userspace directly. Access to | |
813 | * current_driver_name is protected with a write_lock with irqs | |
814 | * disabled. Since copy_from_user can fault and may sleep we | |
815 | * need to copy to temporary buffer first | |
816 | */ | |
817 | len = min(count, (size_t)(NAME_MAX_LEN - 1)); | |
818 | if (copy_from_user(buf, userbuf, len)) | |
819 | return -EFAULT; | |
820 | ||
821 | buf[len] = 0; | |
822 | ||
823 | write_lock_irqsave(&driver_name_lock, flags); | |
824 | ||
825 | /* | |
826 | * Now handle the string we got from userspace very carefully. | |
827 | * The rules are: | |
828 | * - only use the first token we got | |
829 | * - token delimiter is everything looking like a space | |
830 | * character (' ', '\n', '\t' ...) | |
831 | * | |
832 | */ | |
833 | if (!isalnum(buf[0])) { | |
834 | /* | |
835 | * If the first character userspace gave us is not | |
836 | * alphanumerical then assume the filter should be | |
837 | * switched off. | |
838 | */ | |
839 | if (current_driver_name[0]) | |
840 | pr_info("DMA-API: switching off dma-debug driver filter\n"); | |
841 | current_driver_name[0] = 0; | |
842 | current_driver = NULL; | |
843 | goto out_unlock; | |
844 | } | |
845 | ||
846 | /* | |
847 | * Now parse out the first token and use it as the name for the | |
848 | * driver to filter for. | |
849 | */ | |
850 | for (i = 0; i < NAME_MAX_LEN - 1; ++i) { | |
851 | current_driver_name[i] = buf[i]; | |
852 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) | |
853 | break; | |
854 | } | |
855 | current_driver_name[i] = 0; | |
856 | current_driver = NULL; | |
857 | ||
858 | pr_info("DMA-API: enable driver filter for driver [%s]\n", | |
859 | current_driver_name); | |
860 | ||
861 | out_unlock: | |
862 | write_unlock_irqrestore(&driver_name_lock, flags); | |
863 | ||
864 | return count; | |
865 | } | |
866 | ||
867 | static const struct file_operations filter_fops = { | |
868 | .read = filter_read, | |
869 | .write = filter_write, | |
870 | .llseek = default_llseek, | |
871 | }; | |
872 | ||
873 | static int dma_debug_fs_init(void) | |
874 | { | |
875 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); | |
876 | if (!dma_debug_dent) { | |
877 | pr_err("DMA-API: can not create debugfs directory\n"); | |
878 | return -ENOMEM; | |
879 | } | |
880 | ||
881 | global_disable_dent = debugfs_create_bool("disabled", 0444, | |
882 | dma_debug_dent, | |
883 | &global_disable); | |
884 | if (!global_disable_dent) | |
885 | goto out_err; | |
886 | ||
887 | error_count_dent = debugfs_create_u32("error_count", 0444, | |
888 | dma_debug_dent, &error_count); | |
889 | if (!error_count_dent) | |
890 | goto out_err; | |
891 | ||
892 | show_all_errors_dent = debugfs_create_u32("all_errors", 0644, | |
893 | dma_debug_dent, | |
894 | &show_all_errors); | |
895 | if (!show_all_errors_dent) | |
896 | goto out_err; | |
897 | ||
898 | show_num_errors_dent = debugfs_create_u32("num_errors", 0644, | |
899 | dma_debug_dent, | |
900 | &show_num_errors); | |
901 | if (!show_num_errors_dent) | |
902 | goto out_err; | |
903 | ||
904 | num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, | |
905 | dma_debug_dent, | |
906 | &num_free_entries); | |
907 | if (!num_free_entries_dent) | |
908 | goto out_err; | |
909 | ||
910 | min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, | |
911 | dma_debug_dent, | |
912 | &min_free_entries); | |
913 | if (!min_free_entries_dent) | |
914 | goto out_err; | |
915 | ||
916 | filter_dent = debugfs_create_file("driver_filter", 0644, | |
917 | dma_debug_dent, NULL, &filter_fops); | |
918 | if (!filter_dent) | |
919 | goto out_err; | |
920 | ||
921 | return 0; | |
922 | ||
923 | out_err: | |
924 | debugfs_remove_recursive(dma_debug_dent); | |
925 | ||
926 | return -ENOMEM; | |
927 | } | |
928 | ||
929 | static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) | |
930 | { | |
931 | struct dma_debug_entry *entry; | |
932 | unsigned long flags; | |
933 | int count = 0, i; | |
934 | ||
935 | local_irq_save(flags); | |
936 | ||
937 | for (i = 0; i < HASH_SIZE; ++i) { | |
938 | spin_lock(&dma_entry_hash[i].lock); | |
939 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { | |
940 | if (entry->dev == dev) { | |
941 | count += 1; | |
942 | *out_entry = entry; | |
943 | } | |
944 | } | |
945 | spin_unlock(&dma_entry_hash[i].lock); | |
946 | } | |
947 | ||
948 | local_irq_restore(flags); | |
949 | ||
950 | return count; | |
951 | } | |
952 | ||
953 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) | |
954 | { | |
955 | struct device *dev = data; | |
956 | struct dma_debug_entry *uninitialized_var(entry); | |
957 | int count; | |
958 | ||
959 | if (dma_debug_disabled()) | |
960 | return 0; | |
961 | ||
962 | switch (action) { | |
963 | case BUS_NOTIFY_UNBOUND_DRIVER: | |
964 | count = device_dma_allocations(dev, &entry); | |
965 | if (count == 0) | |
966 | break; | |
967 | err_printk(dev, entry, "DMA-API: device driver has pending " | |
968 | "DMA allocations while released from device " | |
969 | "[count=%d]\n" | |
970 | "One of leaked entries details: " | |
971 | "[device address=0x%016llx] [size=%llu bytes] " | |
972 | "[mapped with %s] [mapped as %s]\n", | |
973 | count, entry->dev_addr, entry->size, | |
974 | dir2name[entry->direction], type2name[entry->type]); | |
975 | break; | |
976 | default: | |
977 | break; | |
978 | } | |
979 | ||
980 | return 0; | |
981 | } | |
982 | ||
983 | void dma_debug_add_bus(struct bus_type *bus) | |
984 | { | |
985 | struct notifier_block *nb; | |
986 | ||
987 | if (dma_debug_disabled()) | |
988 | return; | |
989 | ||
990 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | |
991 | if (nb == NULL) { | |
992 | pr_err("dma_debug_add_bus: out of memory\n"); | |
993 | return; | |
994 | } | |
995 | ||
996 | nb->notifier_call = dma_debug_device_change; | |
997 | ||
998 | bus_register_notifier(bus, nb); | |
999 | } | |
1000 | ||
1001 | /* | |
1002 | * Let the architectures decide how many entries should be preallocated. | |
1003 | */ | |
1004 | void dma_debug_init(u32 num_entries) | |
1005 | { | |
1006 | int i; | |
1007 | ||
1008 | /* Do not use dma_debug_initialized here, since we really want to be | |
1009 | * called to set dma_debug_initialized | |
1010 | */ | |
1011 | if (global_disable) | |
1012 | return; | |
1013 | ||
1014 | for (i = 0; i < HASH_SIZE; ++i) { | |
1015 | INIT_LIST_HEAD(&dma_entry_hash[i].list); | |
1016 | spin_lock_init(&dma_entry_hash[i].lock); | |
1017 | } | |
1018 | ||
1019 | if (dma_debug_fs_init() != 0) { | |
1020 | pr_err("DMA-API: error creating debugfs entries - disabling\n"); | |
1021 | global_disable = true; | |
1022 | ||
1023 | return; | |
1024 | } | |
1025 | ||
1026 | if (req_entries) | |
1027 | num_entries = req_entries; | |
1028 | ||
1029 | if (prealloc_memory(num_entries) != 0) { | |
1030 | pr_err("DMA-API: debugging out of memory error - disabled\n"); | |
1031 | global_disable = true; | |
1032 | ||
1033 | return; | |
1034 | } | |
1035 | ||
1036 | nr_total_entries = num_free_entries; | |
1037 | ||
1038 | dma_debug_initialized = true; | |
1039 | ||
1040 | pr_info("DMA-API: debugging enabled by kernel config\n"); | |
1041 | } | |
1042 | ||
1043 | static __init int dma_debug_cmdline(char *str) | |
1044 | { | |
1045 | if (!str) | |
1046 | return -EINVAL; | |
1047 | ||
1048 | if (strncmp(str, "off", 3) == 0) { | |
1049 | pr_info("DMA-API: debugging disabled on kernel command line\n"); | |
1050 | global_disable = true; | |
1051 | } | |
1052 | ||
1053 | return 0; | |
1054 | } | |
1055 | ||
1056 | static __init int dma_debug_entries_cmdline(char *str) | |
1057 | { | |
1058 | int res; | |
1059 | ||
1060 | if (!str) | |
1061 | return -EINVAL; | |
1062 | ||
1063 | res = get_option(&str, &req_entries); | |
1064 | ||
1065 | if (!res) | |
1066 | req_entries = 0; | |
1067 | ||
1068 | return 0; | |
1069 | } | |
1070 | ||
1071 | __setup("dma_debug=", dma_debug_cmdline); | |
1072 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); | |
1073 | ||
1074 | static void check_unmap(struct dma_debug_entry *ref) | |
1075 | { | |
1076 | struct dma_debug_entry *entry; | |
1077 | struct hash_bucket *bucket; | |
1078 | unsigned long flags; | |
1079 | ||
1080 | bucket = get_hash_bucket(ref, &flags); | |
1081 | entry = bucket_find_exact(bucket, ref); | |
1082 | ||
1083 | if (!entry) { | |
1084 | /* must drop lock before calling dma_mapping_error */ | |
1085 | put_hash_bucket(bucket, &flags); | |
1086 | ||
1087 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { | |
1088 | err_printk(ref->dev, NULL, | |
1089 | "DMA-API: device driver tries to free an " | |
1090 | "invalid DMA memory address\n"); | |
1091 | } else { | |
1092 | err_printk(ref->dev, NULL, | |
1093 | "DMA-API: device driver tries to free DMA " | |
1094 | "memory it has not allocated [device " | |
1095 | "address=0x%016llx] [size=%llu bytes]\n", | |
1096 | ref->dev_addr, ref->size); | |
1097 | } | |
1098 | return; | |
1099 | } | |
1100 | ||
1101 | if (ref->size != entry->size) { | |
1102 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | |
1103 | "DMA memory with different size " | |
1104 | "[device address=0x%016llx] [map size=%llu bytes] " | |
1105 | "[unmap size=%llu bytes]\n", | |
1106 | ref->dev_addr, entry->size, ref->size); | |
1107 | } | |
1108 | ||
1109 | if (ref->type != entry->type) { | |
1110 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | |
1111 | "DMA memory with wrong function " | |
1112 | "[device address=0x%016llx] [size=%llu bytes] " | |
1113 | "[mapped as %s] [unmapped as %s]\n", | |
1114 | ref->dev_addr, ref->size, | |
1115 | type2name[entry->type], type2name[ref->type]); | |
1116 | } else if ((entry->type == dma_debug_coherent) && | |
1117 | (phys_addr(ref) != phys_addr(entry))) { | |
1118 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | |
1119 | "DMA memory with different CPU address " | |
1120 | "[device address=0x%016llx] [size=%llu bytes] " | |
1121 | "[cpu alloc address=0x%016llx] " | |
1122 | "[cpu free address=0x%016llx]", | |
1123 | ref->dev_addr, ref->size, | |
1124 | phys_addr(entry), | |
1125 | phys_addr(ref)); | |
1126 | } | |
1127 | ||
1128 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | |
1129 | ref->sg_call_ents != entry->sg_call_ents) { | |
1130 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | |
1131 | "DMA sg list with different entry count " | |
1132 | "[map count=%d] [unmap count=%d]\n", | |
1133 | entry->sg_call_ents, ref->sg_call_ents); | |
1134 | } | |
1135 | ||
1136 | /* | |
1137 | * This may be no bug in reality - but most implementations of the | |
1138 | * DMA API don't handle this properly, so check for it here | |
1139 | */ | |
1140 | if (ref->direction != entry->direction) { | |
1141 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | |
1142 | "DMA memory with different direction " | |
1143 | "[device address=0x%016llx] [size=%llu bytes] " | |
1144 | "[mapped with %s] [unmapped with %s]\n", | |
1145 | ref->dev_addr, ref->size, | |
1146 | dir2name[entry->direction], | |
1147 | dir2name[ref->direction]); | |
1148 | } | |
1149 | ||
1150 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { | |
1151 | err_printk(ref->dev, entry, | |
1152 | "DMA-API: device driver failed to check map error" | |
1153 | "[device address=0x%016llx] [size=%llu bytes] " | |
1154 | "[mapped as %s]", | |
1155 | ref->dev_addr, ref->size, | |
1156 | type2name[entry->type]); | |
1157 | } | |
1158 | ||
1159 | hash_bucket_del(entry); | |
1160 | dma_entry_free(entry); | |
1161 | ||
1162 | put_hash_bucket(bucket, &flags); | |
1163 | } | |
1164 | ||
1165 | static void check_for_stack(struct device *dev, void *addr) | |
1166 | { | |
1167 | if (object_is_on_stack(addr)) | |
1168 | err_printk(dev, NULL, "DMA-API: device driver maps memory from " | |
1169 | "stack [addr=%p]\n", addr); | |
1170 | } | |
1171 | ||
1172 | static inline bool overlap(void *addr, unsigned long len, void *start, void *end) | |
1173 | { | |
1174 | unsigned long a1 = (unsigned long)addr; | |
1175 | unsigned long b1 = a1 + len; | |
1176 | unsigned long a2 = (unsigned long)start; | |
1177 | unsigned long b2 = (unsigned long)end; | |
1178 | ||
1179 | return !(b1 <= a2 || a1 >= b2); | |
1180 | } | |
1181 | ||
1182 | static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) | |
1183 | { | |
1184 | if (overlap(addr, len, _stext, _etext) || | |
1185 | overlap(addr, len, __start_rodata, __end_rodata)) | |
1186 | err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); | |
1187 | } | |
1188 | ||
1189 | static void check_sync(struct device *dev, | |
1190 | struct dma_debug_entry *ref, | |
1191 | bool to_cpu) | |
1192 | { | |
1193 | struct dma_debug_entry *entry; | |
1194 | struct hash_bucket *bucket; | |
1195 | unsigned long flags; | |
1196 | ||
1197 | bucket = get_hash_bucket(ref, &flags); | |
1198 | ||
1199 | entry = bucket_find_contain(&bucket, ref, &flags); | |
1200 | ||
1201 | if (!entry) { | |
1202 | err_printk(dev, NULL, "DMA-API: device driver tries " | |
1203 | "to sync DMA memory it has not allocated " | |
1204 | "[device address=0x%016llx] [size=%llu bytes]\n", | |
1205 | (unsigned long long)ref->dev_addr, ref->size); | |
1206 | goto out; | |
1207 | } | |
1208 | ||
1209 | if (ref->size > entry->size) { | |
1210 | err_printk(dev, entry, "DMA-API: device driver syncs" | |
1211 | " DMA memory outside allocated range " | |
1212 | "[device address=0x%016llx] " | |
1213 | "[allocation size=%llu bytes] " | |
1214 | "[sync offset+size=%llu]\n", | |
1215 | entry->dev_addr, entry->size, | |
1216 | ref->size); | |
1217 | } | |
1218 | ||
1219 | if (entry->direction == DMA_BIDIRECTIONAL) | |
1220 | goto out; | |
1221 | ||
1222 | if (ref->direction != entry->direction) { | |
1223 | err_printk(dev, entry, "DMA-API: device driver syncs " | |
1224 | "DMA memory with different direction " | |
1225 | "[device address=0x%016llx] [size=%llu bytes] " | |
1226 | "[mapped with %s] [synced with %s]\n", | |
1227 | (unsigned long long)ref->dev_addr, entry->size, | |
1228 | dir2name[entry->direction], | |
1229 | dir2name[ref->direction]); | |
1230 | } | |
1231 | ||
1232 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | |
1233 | !(ref->direction == DMA_TO_DEVICE)) | |
1234 | err_printk(dev, entry, "DMA-API: device driver syncs " | |
1235 | "device read-only DMA memory for cpu " | |
1236 | "[device address=0x%016llx] [size=%llu bytes] " | |
1237 | "[mapped with %s] [synced with %s]\n", | |
1238 | (unsigned long long)ref->dev_addr, entry->size, | |
1239 | dir2name[entry->direction], | |
1240 | dir2name[ref->direction]); | |
1241 | ||
1242 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && | |
1243 | !(ref->direction == DMA_FROM_DEVICE)) | |
1244 | err_printk(dev, entry, "DMA-API: device driver syncs " | |
1245 | "device write-only DMA memory to device " | |
1246 | "[device address=0x%016llx] [size=%llu bytes] " | |
1247 | "[mapped with %s] [synced with %s]\n", | |
1248 | (unsigned long long)ref->dev_addr, entry->size, | |
1249 | dir2name[entry->direction], | |
1250 | dir2name[ref->direction]); | |
1251 | ||
1252 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | |
1253 | ref->sg_call_ents != entry->sg_call_ents) { | |
1254 | err_printk(ref->dev, entry, "DMA-API: device driver syncs " | |
1255 | "DMA sg list with different entry count " | |
1256 | "[map count=%d] [sync count=%d]\n", | |
1257 | entry->sg_call_ents, ref->sg_call_ents); | |
1258 | } | |
1259 | ||
1260 | out: | |
1261 | put_hash_bucket(bucket, &flags); | |
1262 | } | |
1263 | ||
1264 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | |
1265 | size_t size, int direction, dma_addr_t dma_addr, | |
1266 | bool map_single) | |
1267 | { | |
1268 | struct dma_debug_entry *entry; | |
1269 | ||
1270 | if (unlikely(dma_debug_disabled())) | |
1271 | return; | |
1272 | ||
1273 | if (dma_mapping_error(dev, dma_addr)) | |
1274 | return; | |
1275 | ||
1276 | entry = dma_entry_alloc(); | |
1277 | if (!entry) | |
1278 | return; | |
1279 | ||
1280 | entry->dev = dev; | |
1281 | entry->type = dma_debug_page; | |
1282 | entry->pfn = page_to_pfn(page); | |
1283 | entry->offset = offset, | |
1284 | entry->dev_addr = dma_addr; | |
1285 | entry->size = size; | |
1286 | entry->direction = direction; | |
1287 | entry->map_err_type = MAP_ERR_NOT_CHECKED; | |
1288 | ||
1289 | if (map_single) | |
1290 | entry->type = dma_debug_single; | |
1291 | ||
1292 | if (!PageHighMem(page)) { | |
1293 | void *addr = page_address(page) + offset; | |
1294 | ||
1295 | check_for_stack(dev, addr); | |
1296 | check_for_illegal_area(dev, addr, size); | |
1297 | } | |
1298 | ||
1299 | add_dma_entry(entry); | |
1300 | } | |
1301 | EXPORT_SYMBOL(debug_dma_map_page); | |
1302 | ||
1303 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
1304 | { | |
1305 | struct dma_debug_entry ref; | |
1306 | struct dma_debug_entry *entry; | |
1307 | struct hash_bucket *bucket; | |
1308 | unsigned long flags; | |
1309 | ||
1310 | if (unlikely(dma_debug_disabled())) | |
1311 | return; | |
1312 | ||
1313 | ref.dev = dev; | |
1314 | ref.dev_addr = dma_addr; | |
1315 | bucket = get_hash_bucket(&ref, &flags); | |
1316 | ||
1317 | list_for_each_entry(entry, &bucket->list, list) { | |
1318 | if (!exact_match(&ref, entry)) | |
1319 | continue; | |
1320 | ||
1321 | /* | |
1322 | * The same physical address can be mapped multiple | |
1323 | * times. Without a hardware IOMMU this results in the | |
1324 | * same device addresses being put into the dma-debug | |
1325 | * hash multiple times too. This can result in false | |
1326 | * positives being reported. Therefore we implement a | |
1327 | * best-fit algorithm here which updates the first entry | |
1328 | * from the hash which fits the reference value and is | |
1329 | * not currently listed as being checked. | |
1330 | */ | |
1331 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { | |
1332 | entry->map_err_type = MAP_ERR_CHECKED; | |
1333 | break; | |
1334 | } | |
1335 | } | |
1336 | ||
1337 | put_hash_bucket(bucket, &flags); | |
1338 | } | |
1339 | EXPORT_SYMBOL(debug_dma_mapping_error); | |
1340 | ||
1341 | void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | |
1342 | size_t size, int direction, bool map_single) | |
1343 | { | |
1344 | struct dma_debug_entry ref = { | |
1345 | .type = dma_debug_page, | |
1346 | .dev = dev, | |
1347 | .dev_addr = addr, | |
1348 | .size = size, | |
1349 | .direction = direction, | |
1350 | }; | |
1351 | ||
1352 | if (unlikely(dma_debug_disabled())) | |
1353 | return; | |
1354 | ||
1355 | if (map_single) | |
1356 | ref.type = dma_debug_single; | |
1357 | ||
1358 | check_unmap(&ref); | |
1359 | } | |
1360 | EXPORT_SYMBOL(debug_dma_unmap_page); | |
1361 | ||
1362 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
1363 | int nents, int mapped_ents, int direction) | |
1364 | { | |
1365 | struct dma_debug_entry *entry; | |
1366 | struct scatterlist *s; | |
1367 | int i; | |
1368 | ||
1369 | if (unlikely(dma_debug_disabled())) | |
1370 | return; | |
1371 | ||
1372 | for_each_sg(sg, s, mapped_ents, i) { | |
1373 | entry = dma_entry_alloc(); | |
1374 | if (!entry) | |
1375 | return; | |
1376 | ||
1377 | entry->type = dma_debug_sg; | |
1378 | entry->dev = dev; | |
1379 | entry->pfn = page_to_pfn(sg_page(s)); | |
1380 | entry->offset = s->offset, | |
1381 | entry->size = sg_dma_len(s); | |
1382 | entry->dev_addr = sg_dma_address(s); | |
1383 | entry->direction = direction; | |
1384 | entry->sg_call_ents = nents; | |
1385 | entry->sg_mapped_ents = mapped_ents; | |
1386 | ||
1387 | if (!PageHighMem(sg_page(s))) { | |
1388 | check_for_stack(dev, sg_virt(s)); | |
1389 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); | |
1390 | } | |
1391 | ||
1392 | add_dma_entry(entry); | |
1393 | } | |
1394 | } | |
1395 | EXPORT_SYMBOL(debug_dma_map_sg); | |
1396 | ||
1397 | static int get_nr_mapped_entries(struct device *dev, | |
1398 | struct dma_debug_entry *ref) | |
1399 | { | |
1400 | struct dma_debug_entry *entry; | |
1401 | struct hash_bucket *bucket; | |
1402 | unsigned long flags; | |
1403 | int mapped_ents; | |
1404 | ||
1405 | bucket = get_hash_bucket(ref, &flags); | |
1406 | entry = bucket_find_exact(bucket, ref); | |
1407 | mapped_ents = 0; | |
1408 | ||
1409 | if (entry) | |
1410 | mapped_ents = entry->sg_mapped_ents; | |
1411 | put_hash_bucket(bucket, &flags); | |
1412 | ||
1413 | return mapped_ents; | |
1414 | } | |
1415 | ||
1416 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |
1417 | int nelems, int dir) | |
1418 | { | |
1419 | struct scatterlist *s; | |
1420 | int mapped_ents = 0, i; | |
1421 | ||
1422 | if (unlikely(dma_debug_disabled())) | |
1423 | return; | |
1424 | ||
1425 | for_each_sg(sglist, s, nelems, i) { | |
1426 | ||
1427 | struct dma_debug_entry ref = { | |
1428 | .type = dma_debug_sg, | |
1429 | .dev = dev, | |
1430 | .pfn = page_to_pfn(sg_page(s)), | |
1431 | .offset = s->offset, | |
1432 | .dev_addr = sg_dma_address(s), | |
1433 | .size = sg_dma_len(s), | |
1434 | .direction = dir, | |
1435 | .sg_call_ents = nelems, | |
1436 | }; | |
1437 | ||
1438 | if (mapped_ents && i >= mapped_ents) | |
1439 | break; | |
1440 | ||
1441 | if (!i) | |
1442 | mapped_ents = get_nr_mapped_entries(dev, &ref); | |
1443 | ||
1444 | check_unmap(&ref); | |
1445 | } | |
1446 | } | |
1447 | EXPORT_SYMBOL(debug_dma_unmap_sg); | |
1448 | ||
1449 | void debug_dma_alloc_coherent(struct device *dev, size_t size, | |
1450 | dma_addr_t dma_addr, void *virt) | |
1451 | { | |
1452 | struct dma_debug_entry *entry; | |
1453 | ||
1454 | if (unlikely(dma_debug_disabled())) | |
1455 | return; | |
1456 | ||
1457 | if (unlikely(virt == NULL)) | |
1458 | return; | |
1459 | ||
1460 | entry = dma_entry_alloc(); | |
1461 | if (!entry) | |
1462 | return; | |
1463 | ||
1464 | entry->type = dma_debug_coherent; | |
1465 | entry->dev = dev; | |
1466 | entry->pfn = page_to_pfn(virt_to_page(virt)); | |
1467 | entry->offset = (size_t) virt & ~PAGE_MASK; | |
1468 | entry->size = size; | |
1469 | entry->dev_addr = dma_addr; | |
1470 | entry->direction = DMA_BIDIRECTIONAL; | |
1471 | ||
1472 | add_dma_entry(entry); | |
1473 | } | |
1474 | EXPORT_SYMBOL(debug_dma_alloc_coherent); | |
1475 | ||
1476 | void debug_dma_free_coherent(struct device *dev, size_t size, | |
1477 | void *virt, dma_addr_t addr) | |
1478 | { | |
1479 | struct dma_debug_entry ref = { | |
1480 | .type = dma_debug_coherent, | |
1481 | .dev = dev, | |
1482 | .pfn = page_to_pfn(virt_to_page(virt)), | |
1483 | .offset = (size_t) virt & ~PAGE_MASK, | |
1484 | .dev_addr = addr, | |
1485 | .size = size, | |
1486 | .direction = DMA_BIDIRECTIONAL, | |
1487 | }; | |
1488 | ||
1489 | if (unlikely(dma_debug_disabled())) | |
1490 | return; | |
1491 | ||
1492 | check_unmap(&ref); | |
1493 | } | |
1494 | EXPORT_SYMBOL(debug_dma_free_coherent); | |
1495 | ||
1496 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
1497 | size_t size, int direction) | |
1498 | { | |
1499 | struct dma_debug_entry ref; | |
1500 | ||
1501 | if (unlikely(dma_debug_disabled())) | |
1502 | return; | |
1503 | ||
1504 | ref.type = dma_debug_single; | |
1505 | ref.dev = dev; | |
1506 | ref.dev_addr = dma_handle; | |
1507 | ref.size = size; | |
1508 | ref.direction = direction; | |
1509 | ref.sg_call_ents = 0; | |
1510 | ||
1511 | check_sync(dev, &ref, true); | |
1512 | } | |
1513 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); | |
1514 | ||
1515 | void debug_dma_sync_single_for_device(struct device *dev, | |
1516 | dma_addr_t dma_handle, size_t size, | |
1517 | int direction) | |
1518 | { | |
1519 | struct dma_debug_entry ref; | |
1520 | ||
1521 | if (unlikely(dma_debug_disabled())) | |
1522 | return; | |
1523 | ||
1524 | ref.type = dma_debug_single; | |
1525 | ref.dev = dev; | |
1526 | ref.dev_addr = dma_handle; | |
1527 | ref.size = size; | |
1528 | ref.direction = direction; | |
1529 | ref.sg_call_ents = 0; | |
1530 | ||
1531 | check_sync(dev, &ref, false); | |
1532 | } | |
1533 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); | |
1534 | ||
1535 | void debug_dma_sync_single_range_for_cpu(struct device *dev, | |
1536 | dma_addr_t dma_handle, | |
1537 | unsigned long offset, size_t size, | |
1538 | int direction) | |
1539 | { | |
1540 | struct dma_debug_entry ref; | |
1541 | ||
1542 | if (unlikely(dma_debug_disabled())) | |
1543 | return; | |
1544 | ||
1545 | ref.type = dma_debug_single; | |
1546 | ref.dev = dev; | |
1547 | ref.dev_addr = dma_handle; | |
1548 | ref.size = offset + size; | |
1549 | ref.direction = direction; | |
1550 | ref.sg_call_ents = 0; | |
1551 | ||
1552 | check_sync(dev, &ref, true); | |
1553 | } | |
1554 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); | |
1555 | ||
1556 | void debug_dma_sync_single_range_for_device(struct device *dev, | |
1557 | dma_addr_t dma_handle, | |
1558 | unsigned long offset, | |
1559 | size_t size, int direction) | |
1560 | { | |
1561 | struct dma_debug_entry ref; | |
1562 | ||
1563 | if (unlikely(dma_debug_disabled())) | |
1564 | return; | |
1565 | ||
1566 | ref.type = dma_debug_single; | |
1567 | ref.dev = dev; | |
1568 | ref.dev_addr = dma_handle; | |
1569 | ref.size = offset + size; | |
1570 | ref.direction = direction; | |
1571 | ref.sg_call_ents = 0; | |
1572 | ||
1573 | check_sync(dev, &ref, false); | |
1574 | } | |
1575 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); | |
1576 | ||
1577 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
1578 | int nelems, int direction) | |
1579 | { | |
1580 | struct scatterlist *s; | |
1581 | int mapped_ents = 0, i; | |
1582 | ||
1583 | if (unlikely(dma_debug_disabled())) | |
1584 | return; | |
1585 | ||
1586 | for_each_sg(sg, s, nelems, i) { | |
1587 | ||
1588 | struct dma_debug_entry ref = { | |
1589 | .type = dma_debug_sg, | |
1590 | .dev = dev, | |
1591 | .pfn = page_to_pfn(sg_page(s)), | |
1592 | .offset = s->offset, | |
1593 | .dev_addr = sg_dma_address(s), | |
1594 | .size = sg_dma_len(s), | |
1595 | .direction = direction, | |
1596 | .sg_call_ents = nelems, | |
1597 | }; | |
1598 | ||
1599 | if (!i) | |
1600 | mapped_ents = get_nr_mapped_entries(dev, &ref); | |
1601 | ||
1602 | if (i >= mapped_ents) | |
1603 | break; | |
1604 | ||
1605 | check_sync(dev, &ref, true); | |
1606 | } | |
1607 | } | |
1608 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); | |
1609 | ||
1610 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
1611 | int nelems, int direction) | |
1612 | { | |
1613 | struct scatterlist *s; | |
1614 | int mapped_ents = 0, i; | |
1615 | ||
1616 | if (unlikely(dma_debug_disabled())) | |
1617 | return; | |
1618 | ||
1619 | for_each_sg(sg, s, nelems, i) { | |
1620 | ||
1621 | struct dma_debug_entry ref = { | |
1622 | .type = dma_debug_sg, | |
1623 | .dev = dev, | |
1624 | .pfn = page_to_pfn(sg_page(s)), | |
1625 | .offset = s->offset, | |
1626 | .dev_addr = sg_dma_address(s), | |
1627 | .size = sg_dma_len(s), | |
1628 | .direction = direction, | |
1629 | .sg_call_ents = nelems, | |
1630 | }; | |
1631 | if (!i) | |
1632 | mapped_ents = get_nr_mapped_entries(dev, &ref); | |
1633 | ||
1634 | if (i >= mapped_ents) | |
1635 | break; | |
1636 | ||
1637 | check_sync(dev, &ref, false); | |
1638 | } | |
1639 | } | |
1640 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); | |
1641 | ||
1642 | static int __init dma_debug_driver_setup(char *str) | |
1643 | { | |
1644 | int i; | |
1645 | ||
1646 | for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { | |
1647 | current_driver_name[i] = *str; | |
1648 | if (*str == 0) | |
1649 | break; | |
1650 | } | |
1651 | ||
1652 | if (current_driver_name[0]) | |
1653 | pr_info("DMA-API: enable driver filter for driver [%s]\n", | |
1654 | current_driver_name); | |
1655 | ||
1656 | ||
1657 | return 1; | |
1658 | } | |
1659 | __setup("dma_debug_driver=", dma_debug_driver_setup); |