2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/stacktrace.h>
23 #include <linux/dma-debug.h>
24 #include <linux/spinlock.h>
25 #include <linux/debugfs.h>
26 #include <linux/uaccess.h>
27 #include <linux/device.h>
28 #include <linux/types.h>
29 #include <linux/sched.h>
30 #include <linux/ctype.h>
31 #include <linux/list.h>
32 #include <linux/slab.h>
34 #include <asm/sections.h>
36 #define HASH_SIZE 1024ULL
37 #define HASH_FN_SHIFT 13
38 #define HASH_FN_MASK (HASH_SIZE - 1)
47 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
49 struct dma_debug_entry
{
50 struct list_head list
;
59 #ifdef CONFIG_STACKTRACE
60 struct stack_trace stacktrace
;
61 unsigned long st_entries
[DMA_DEBUG_STACKTRACE_ENTRIES
];
66 struct list_head list
;
68 } ____cacheline_aligned_in_smp
;
70 /* Hash list to save the allocated dma addresses */
71 static struct hash_bucket dma_entry_hash
[HASH_SIZE
];
72 /* List of pre-allocated dma_debug_entry's */
73 static LIST_HEAD(free_entries
);
74 /* Lock for the list above */
75 static DEFINE_SPINLOCK(free_entries_lock
);
77 /* Global disable flag - will be set in case of an error */
78 static bool global_disable __read_mostly
;
80 /* Global error count */
81 static u32 error_count
;
83 /* Global error show enable*/
84 static u32 show_all_errors __read_mostly
;
85 /* Number of errors to show */
86 static u32 show_num_errors
= 1;
88 static u32 num_free_entries
;
89 static u32 min_free_entries
;
90 static u32 nr_total_entries
;
92 /* number of preallocated entries requested by kernel cmdline */
93 static u32 req_entries
;
95 /* debugfs dentry's for the stuff above */
96 static struct dentry
*dma_debug_dent __read_mostly
;
97 static struct dentry
*global_disable_dent __read_mostly
;
98 static struct dentry
*error_count_dent __read_mostly
;
99 static struct dentry
*show_all_errors_dent __read_mostly
;
100 static struct dentry
*show_num_errors_dent __read_mostly
;
101 static struct dentry
*num_free_entries_dent __read_mostly
;
102 static struct dentry
*min_free_entries_dent __read_mostly
;
103 static struct dentry
*filter_dent __read_mostly
;
105 /* per-driver filter related state */
107 #define NAME_MAX_LEN 64
109 static char current_driver_name
[NAME_MAX_LEN
] __read_mostly
;
110 static struct device_driver
*current_driver __read_mostly
;
112 static DEFINE_RWLOCK(driver_name_lock
);
114 static const char *type2name
[4] = { "single", "page",
115 "scather-gather", "coherent" };
117 static const char *dir2name
[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
118 "DMA_FROM_DEVICE", "DMA_NONE" };
120 /* little merge helper - remove it after the merge window */
121 #ifndef BUS_NOTIFY_UNBOUND_DRIVER
122 #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
126 * The access to some variables in this macro is racy. We can't use atomic_t
127 * here because all these variables are exported to debugfs. Some of them even
128 * writeable. This is also the reason why a lock won't help much. But anyway,
129 * the races are no big deal. Here is why:
131 * error_count: the addition is racy, but the worst thing that can happen is
132 * that we don't count some errors
133 * show_num_errors: the subtraction is racy. Also no big deal because in
134 * worst case this will result in one warning more in the
135 * system log than the user configured. This variable is
136 * writeable via debugfs.
138 static inline void dump_entry_trace(struct dma_debug_entry
*entry
)
140 #ifdef CONFIG_STACKTRACE
142 printk(KERN_WARNING
"Mapped at:\n");
143 print_stack_trace(&entry
->stacktrace
, 0);
148 static bool driver_filter(struct device
*dev
)
150 /* driver filter off */
151 if (likely(!current_driver_name
[0]))
154 /* driver filter on and initialized */
155 if (current_driver
&& dev
->driver
== current_driver
)
158 /* driver filter on but not yet initialized */
159 if (!current_driver
&& current_driver_name
[0]) {
160 struct device_driver
*drv
= get_driver(dev
->driver
);
167 /* lock to protect against change of current_driver_name */
168 read_lock_irqsave(&driver_name_lock
, flags
);
171 strncmp(current_driver_name
, drv
->name
,
172 NAME_MAX_LEN
-1) == 0) {
173 current_driver
= drv
;
177 read_unlock_irqrestore(&driver_name_lock
, flags
);
186 #define err_printk(dev, entry, format, arg...) do { \
188 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \
190 WARN(1, "%s %s: " format, \
191 dev_driver_string(dev), \
192 dev_name(dev) , ## arg); \
193 dump_entry_trace(entry); \
195 if (!show_all_errors && show_num_errors > 0) \
196 show_num_errors -= 1; \
200 * Hash related functions
202 * Every DMA-API request is saved into a struct dma_debug_entry. To
203 * have quick access to these structs they are stored into a hash.
205 static int hash_fn(struct dma_debug_entry
*entry
)
208 * Hash function is based on the dma address.
209 * We use bits 20-27 here as the index into the hash
211 return (entry
->dev_addr
>> HASH_FN_SHIFT
) & HASH_FN_MASK
;
215 * Request exclusive access to a hash bucket for a given dma_debug_entry.
217 static struct hash_bucket
*get_hash_bucket(struct dma_debug_entry
*entry
,
218 unsigned long *flags
)
220 int idx
= hash_fn(entry
);
221 unsigned long __flags
;
223 spin_lock_irqsave(&dma_entry_hash
[idx
].lock
, __flags
);
225 return &dma_entry_hash
[idx
];
229 * Give up exclusive access to the hash bucket
231 static void put_hash_bucket(struct hash_bucket
*bucket
,
232 unsigned long *flags
)
234 unsigned long __flags
= *flags
;
236 spin_unlock_irqrestore(&bucket
->lock
, __flags
);
240 * Search a given entry in the hash bucket list
242 static struct dma_debug_entry
*hash_bucket_find(struct hash_bucket
*bucket
,
243 struct dma_debug_entry
*ref
)
245 struct dma_debug_entry
*entry
, *ret
= NULL
;
246 int matches
= 0, match_lvl
, last_lvl
= 0;
248 list_for_each_entry(entry
, &bucket
->list
, list
) {
249 if ((entry
->dev_addr
!= ref
->dev_addr
) ||
250 (entry
->dev
!= ref
->dev
))
254 * Some drivers map the same physical address multiple
255 * times. Without a hardware IOMMU this results in the
256 * same device addresses being put into the dma-debug
257 * hash multiple times too. This can result in false
258 * positives being reported. Therfore we implement a
259 * best-fit algorithm here which returns the entry from
260 * the hash which fits best to the reference value
261 * instead of the first-fit.
265 entry
->size
== ref
->size
? ++match_lvl
: match_lvl
;
266 entry
->type
== ref
->type
? ++match_lvl
: match_lvl
;
267 entry
->direction
== ref
->direction
? ++match_lvl
: match_lvl
;
269 if (match_lvl
== 3) {
270 /* perfect-fit - return the result */
272 } else if (match_lvl
> last_lvl
) {
274 * We found an entry that fits better then the
277 last_lvl
= match_lvl
;
283 * If we have multiple matches but no perfect-fit, just return
286 ret
= (matches
== 1) ? ret
: NULL
;
292 * Add an entry to a hash bucket
294 static void hash_bucket_add(struct hash_bucket
*bucket
,
295 struct dma_debug_entry
*entry
)
297 list_add_tail(&entry
->list
, &bucket
->list
);
301 * Remove entry from a hash bucket list
303 static void hash_bucket_del(struct dma_debug_entry
*entry
)
305 list_del(&entry
->list
);
309 * Dump mapping entries for debugging purposes
311 void debug_dma_dump_mappings(struct device
*dev
)
315 for (idx
= 0; idx
< HASH_SIZE
; idx
++) {
316 struct hash_bucket
*bucket
= &dma_entry_hash
[idx
];
317 struct dma_debug_entry
*entry
;
320 spin_lock_irqsave(&bucket
->lock
, flags
);
322 list_for_each_entry(entry
, &bucket
->list
, list
) {
323 if (!dev
|| dev
== entry
->dev
) {
325 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
326 type2name
[entry
->type
], idx
,
327 (unsigned long long)entry
->paddr
,
328 entry
->dev_addr
, entry
->size
,
329 dir2name
[entry
->direction
]);
333 spin_unlock_irqrestore(&bucket
->lock
, flags
);
336 EXPORT_SYMBOL(debug_dma_dump_mappings
);
339 * Wrapper function for adding an entry to the hash.
340 * This function takes care of locking itself.
342 static void add_dma_entry(struct dma_debug_entry
*entry
)
344 struct hash_bucket
*bucket
;
347 bucket
= get_hash_bucket(entry
, &flags
);
348 hash_bucket_add(bucket
, entry
);
349 put_hash_bucket(bucket
, &flags
);
352 static struct dma_debug_entry
*__dma_entry_alloc(void)
354 struct dma_debug_entry
*entry
;
356 entry
= list_entry(free_entries
.next
, struct dma_debug_entry
, list
);
357 list_del(&entry
->list
);
358 memset(entry
, 0, sizeof(*entry
));
360 num_free_entries
-= 1;
361 if (num_free_entries
< min_free_entries
)
362 min_free_entries
= num_free_entries
;
367 /* struct dma_entry allocator
369 * The next two functions implement the allocator for
370 * struct dma_debug_entries.
372 static struct dma_debug_entry
*dma_entry_alloc(void)
374 struct dma_debug_entry
*entry
= NULL
;
377 spin_lock_irqsave(&free_entries_lock
, flags
);
379 if (list_empty(&free_entries
)) {
380 printk(KERN_ERR
"DMA-API: debugging out of memory "
382 global_disable
= true;
386 entry
= __dma_entry_alloc();
388 #ifdef CONFIG_STACKTRACE
389 entry
->stacktrace
.max_entries
= DMA_DEBUG_STACKTRACE_ENTRIES
;
390 entry
->stacktrace
.entries
= entry
->st_entries
;
391 entry
->stacktrace
.skip
= 2;
392 save_stack_trace(&entry
->stacktrace
);
396 spin_unlock_irqrestore(&free_entries_lock
, flags
);
401 static void dma_entry_free(struct dma_debug_entry
*entry
)
406 * add to beginning of the list - this way the entries are
407 * more likely cache hot when they are reallocated.
409 spin_lock_irqsave(&free_entries_lock
, flags
);
410 list_add(&entry
->list
, &free_entries
);
411 num_free_entries
+= 1;
412 spin_unlock_irqrestore(&free_entries_lock
, flags
);
415 int dma_debug_resize_entries(u32 num_entries
)
417 int i
, delta
, ret
= 0;
419 struct dma_debug_entry
*entry
;
422 spin_lock_irqsave(&free_entries_lock
, flags
);
424 if (nr_total_entries
< num_entries
) {
425 delta
= num_entries
- nr_total_entries
;
427 spin_unlock_irqrestore(&free_entries_lock
, flags
);
429 for (i
= 0; i
< delta
; i
++) {
430 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
434 list_add_tail(&entry
->list
, &tmp
);
437 spin_lock_irqsave(&free_entries_lock
, flags
);
439 list_splice(&tmp
, &free_entries
);
440 nr_total_entries
+= i
;
441 num_free_entries
+= i
;
443 delta
= nr_total_entries
- num_entries
;
445 for (i
= 0; i
< delta
&& !list_empty(&free_entries
); i
++) {
446 entry
= __dma_entry_alloc();
450 nr_total_entries
-= i
;
453 if (nr_total_entries
!= num_entries
)
456 spin_unlock_irqrestore(&free_entries_lock
, flags
);
460 EXPORT_SYMBOL(dma_debug_resize_entries
);
463 * DMA-API debugging init code
465 * The init code does two things:
466 * 1. Initialize core data structures
467 * 2. Preallocate a given number of dma_debug_entry structs
470 static int prealloc_memory(u32 num_entries
)
472 struct dma_debug_entry
*entry
, *next_entry
;
475 for (i
= 0; i
< num_entries
; ++i
) {
476 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
480 list_add_tail(&entry
->list
, &free_entries
);
483 num_free_entries
= num_entries
;
484 min_free_entries
= num_entries
;
486 printk(KERN_INFO
"DMA-API: preallocated %d debug entries\n",
493 list_for_each_entry_safe(entry
, next_entry
, &free_entries
, list
) {
494 list_del(&entry
->list
);
501 static ssize_t
filter_read(struct file
*file
, char __user
*user_buf
,
502 size_t count
, loff_t
*ppos
)
505 char buf
[NAME_MAX_LEN
+ 1];
508 if (!current_driver_name
[0])
512 * We can't copy to userspace directly because current_driver_name can
513 * only be read under the driver_name_lock with irqs disabled. So
514 * create a temporary copy first.
516 read_lock_irqsave(&driver_name_lock
, flags
);
517 len
= scnprintf(buf
, NAME_MAX_LEN
+ 1, "%s\n", current_driver_name
);
518 read_unlock_irqrestore(&driver_name_lock
, flags
);
520 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
523 static ssize_t
filter_write(struct file
*file
, const char __user
*userbuf
,
524 size_t count
, loff_t
*ppos
)
527 char buf
[NAME_MAX_LEN
];
528 size_t len
= NAME_MAX_LEN
- 1;
532 * We can't copy from userspace directly. Access to
533 * current_driver_name is protected with a write_lock with irqs
534 * disabled. Since copy_from_user can fault and may sleep we
535 * need to copy to temporary buffer first
537 len
= min(count
, len
);
538 if (copy_from_user(buf
, userbuf
, len
))
543 write_lock_irqsave(&driver_name_lock
, flags
);
546 * Now handle the string we got from userspace very carefully.
548 * - only use the first token we got
549 * - token delimiter is everything looking like a space
550 * character (' ', '\n', '\t' ...)
553 if (!isalnum(buf
[0])) {
555 * If the first character userspace gave us is not
556 * alphanumerical then assume the filter should be
559 if (current_driver_name
[0])
560 printk(KERN_INFO
"DMA-API: switching off dma-debug "
562 current_driver_name
[0] = 0;
563 current_driver
= NULL
;
568 * Now parse out the first token and use it as the name for the
569 * driver to filter for.
571 for (i
= 0; i
< NAME_MAX_LEN
; ++i
) {
572 current_driver_name
[i
] = buf
[i
];
573 if (isspace(buf
[i
]) || buf
[i
] == ' ' || buf
[i
] == 0)
576 current_driver_name
[i
] = 0;
577 current_driver
= NULL
;
579 printk(KERN_INFO
"DMA-API: enable driver filter for driver [%s]\n",
580 current_driver_name
);
583 write_unlock_irqrestore(&driver_name_lock
, flags
);
588 const struct file_operations filter_fops
= {
590 .write
= filter_write
,
593 static int dma_debug_fs_init(void)
595 dma_debug_dent
= debugfs_create_dir("dma-api", NULL
);
596 if (!dma_debug_dent
) {
597 printk(KERN_ERR
"DMA-API: can not create debugfs directory\n");
601 global_disable_dent
= debugfs_create_bool("disabled", 0444,
603 (u32
*)&global_disable
);
604 if (!global_disable_dent
)
607 error_count_dent
= debugfs_create_u32("error_count", 0444,
608 dma_debug_dent
, &error_count
);
609 if (!error_count_dent
)
612 show_all_errors_dent
= debugfs_create_u32("all_errors", 0644,
615 if (!show_all_errors_dent
)
618 show_num_errors_dent
= debugfs_create_u32("num_errors", 0644,
621 if (!show_num_errors_dent
)
624 num_free_entries_dent
= debugfs_create_u32("num_free_entries", 0444,
627 if (!num_free_entries_dent
)
630 min_free_entries_dent
= debugfs_create_u32("min_free_entries", 0444,
633 if (!min_free_entries_dent
)
636 filter_dent
= debugfs_create_file("driver_filter", 0644,
637 dma_debug_dent
, NULL
, &filter_fops
);
644 debugfs_remove_recursive(dma_debug_dent
);
649 static int device_dma_allocations(struct device
*dev
)
651 struct dma_debug_entry
*entry
;
655 for (i
= 0; i
< HASH_SIZE
; ++i
) {
656 spin_lock_irqsave(&dma_entry_hash
[i
].lock
, flags
);
657 list_for_each_entry(entry
, &dma_entry_hash
[i
].list
, list
) {
658 if (entry
->dev
== dev
)
661 spin_unlock_irqrestore(&dma_entry_hash
[i
].lock
, flags
);
667 static int dma_debug_device_change(struct notifier_block
*nb
,
668 unsigned long action
, void *data
)
670 struct device
*dev
= data
;
675 case BUS_NOTIFY_UNBOUND_DRIVER
:
676 count
= device_dma_allocations(dev
);
679 err_printk(dev
, NULL
, "DMA-API: device driver has pending "
680 "DMA allocations while released from device "
681 "[count=%d]\n", count
);
690 void dma_debug_add_bus(struct bus_type
*bus
)
692 struct notifier_block
*nb
;
694 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
696 printk(KERN_ERR
"dma_debug_add_bus: out of memory\n");
700 nb
->notifier_call
= dma_debug_device_change
;
702 bus_register_notifier(bus
, nb
);
706 * Let the architectures decide how many entries should be preallocated.
708 void dma_debug_init(u32 num_entries
)
715 for (i
= 0; i
< HASH_SIZE
; ++i
) {
716 INIT_LIST_HEAD(&dma_entry_hash
[i
].list
);
717 dma_entry_hash
[i
].lock
= SPIN_LOCK_UNLOCKED
;
720 if (dma_debug_fs_init() != 0) {
721 printk(KERN_ERR
"DMA-API: error creating debugfs entries "
723 global_disable
= true;
729 num_entries
= req_entries
;
731 if (prealloc_memory(num_entries
) != 0) {
732 printk(KERN_ERR
"DMA-API: debugging out of memory error "
734 global_disable
= true;
739 nr_total_entries
= num_free_entries
;
741 printk(KERN_INFO
"DMA-API: debugging enabled by kernel config\n");
744 static __init
int dma_debug_cmdline(char *str
)
749 if (strncmp(str
, "off", 3) == 0) {
750 printk(KERN_INFO
"DMA-API: debugging disabled on kernel "
752 global_disable
= true;
758 static __init
int dma_debug_entries_cmdline(char *str
)
765 res
= get_option(&str
, &req_entries
);
773 __setup("dma_debug=", dma_debug_cmdline
);
774 __setup("dma_debug_entries=", dma_debug_entries_cmdline
);
776 static void check_unmap(struct dma_debug_entry
*ref
)
778 struct dma_debug_entry
*entry
;
779 struct hash_bucket
*bucket
;
782 if (dma_mapping_error(ref
->dev
, ref
->dev_addr
)) {
783 err_printk(ref
->dev
, NULL
, "DMA-API: device driver tries "
784 "to free an invalid DMA memory address\n");
788 bucket
= get_hash_bucket(ref
, &flags
);
789 entry
= hash_bucket_find(bucket
, ref
);
792 err_printk(ref
->dev
, NULL
, "DMA-API: device driver tries "
793 "to free DMA memory it has not allocated "
794 "[device address=0x%016llx] [size=%llu bytes]\n",
795 ref
->dev_addr
, ref
->size
);
799 if (ref
->size
!= entry
->size
) {
800 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
801 "DMA memory with different size "
802 "[device address=0x%016llx] [map size=%llu bytes] "
803 "[unmap size=%llu bytes]\n",
804 ref
->dev_addr
, entry
->size
, ref
->size
);
807 if (ref
->type
!= entry
->type
) {
808 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
809 "DMA memory with wrong function "
810 "[device address=0x%016llx] [size=%llu bytes] "
811 "[mapped as %s] [unmapped as %s]\n",
812 ref
->dev_addr
, ref
->size
,
813 type2name
[entry
->type
], type2name
[ref
->type
]);
814 } else if ((entry
->type
== dma_debug_coherent
) &&
815 (ref
->paddr
!= entry
->paddr
)) {
816 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
817 "DMA memory with different CPU address "
818 "[device address=0x%016llx] [size=%llu bytes] "
819 "[cpu alloc address=%p] [cpu free address=%p]",
820 ref
->dev_addr
, ref
->size
,
821 (void *)entry
->paddr
, (void *)ref
->paddr
);
824 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
825 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
826 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
827 "DMA sg list with different entry count "
828 "[map count=%d] [unmap count=%d]\n",
829 entry
->sg_call_ents
, ref
->sg_call_ents
);
833 * This may be no bug in reality - but most implementations of the
834 * DMA API don't handle this properly, so check for it here
836 if (ref
->direction
!= entry
->direction
) {
837 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
838 "DMA memory with different direction "
839 "[device address=0x%016llx] [size=%llu bytes] "
840 "[mapped with %s] [unmapped with %s]\n",
841 ref
->dev_addr
, ref
->size
,
842 dir2name
[entry
->direction
],
843 dir2name
[ref
->direction
]);
846 hash_bucket_del(entry
);
847 dma_entry_free(entry
);
850 put_hash_bucket(bucket
, &flags
);
853 static void check_for_stack(struct device
*dev
, void *addr
)
855 if (object_is_on_stack(addr
))
856 err_printk(dev
, NULL
, "DMA-API: device driver maps memory from"
857 "stack [addr=%p]\n", addr
);
860 static inline bool overlap(void *addr
, u64 size
, void *start
, void *end
)
862 void *addr2
= (char *)addr
+ size
;
864 return ((addr
>= start
&& addr
< end
) ||
865 (addr2
>= start
&& addr2
< end
) ||
866 ((addr
< start
) && (addr2
>= end
)));
869 static void check_for_illegal_area(struct device
*dev
, void *addr
, u64 size
)
871 if (overlap(addr
, size
, _text
, _etext
) ||
872 overlap(addr
, size
, __start_rodata
, __end_rodata
))
873 err_printk(dev
, NULL
, "DMA-API: device driver maps "
874 "memory from kernel text or rodata "
875 "[addr=%p] [size=%llu]\n", addr
, size
);
878 static void check_sync(struct device
*dev
, dma_addr_t addr
,
879 u64 size
, u64 offset
, int direction
, bool to_cpu
)
881 struct dma_debug_entry ref
= {
885 .direction
= direction
,
887 struct dma_debug_entry
*entry
;
888 struct hash_bucket
*bucket
;
891 bucket
= get_hash_bucket(&ref
, &flags
);
893 entry
= hash_bucket_find(bucket
, &ref
);
896 err_printk(dev
, NULL
, "DMA-API: device driver tries "
897 "to sync DMA memory it has not allocated "
898 "[device address=0x%016llx] [size=%llu bytes]\n",
899 (unsigned long long)addr
, size
);
903 if ((offset
+ size
) > entry
->size
) {
904 err_printk(dev
, entry
, "DMA-API: device driver syncs"
905 " DMA memory outside allocated range "
906 "[device address=0x%016llx] "
907 "[allocation size=%llu bytes] [sync offset=%llu] "
908 "[sync size=%llu]\n", entry
->dev_addr
, entry
->size
,
912 if (direction
!= entry
->direction
) {
913 err_printk(dev
, entry
, "DMA-API: device driver syncs "
914 "DMA memory with different direction "
915 "[device address=0x%016llx] [size=%llu bytes] "
916 "[mapped with %s] [synced with %s]\n",
917 (unsigned long long)addr
, entry
->size
,
918 dir2name
[entry
->direction
],
919 dir2name
[direction
]);
922 if (entry
->direction
== DMA_BIDIRECTIONAL
)
925 if (to_cpu
&& !(entry
->direction
== DMA_FROM_DEVICE
) &&
926 !(direction
== DMA_TO_DEVICE
))
927 err_printk(dev
, entry
, "DMA-API: device driver syncs "
928 "device read-only DMA memory for cpu "
929 "[device address=0x%016llx] [size=%llu bytes] "
930 "[mapped with %s] [synced with %s]\n",
931 (unsigned long long)addr
, entry
->size
,
932 dir2name
[entry
->direction
],
933 dir2name
[direction
]);
935 if (!to_cpu
&& !(entry
->direction
== DMA_TO_DEVICE
) &&
936 !(direction
== DMA_FROM_DEVICE
))
937 err_printk(dev
, entry
, "DMA-API: device driver syncs "
938 "device write-only DMA memory to device "
939 "[device address=0x%016llx] [size=%llu bytes] "
940 "[mapped with %s] [synced with %s]\n",
941 (unsigned long long)addr
, entry
->size
,
942 dir2name
[entry
->direction
],
943 dir2name
[direction
]);
946 put_hash_bucket(bucket
, &flags
);
950 void debug_dma_map_page(struct device
*dev
, struct page
*page
, size_t offset
,
951 size_t size
, int direction
, dma_addr_t dma_addr
,
954 struct dma_debug_entry
*entry
;
956 if (unlikely(global_disable
))
959 if (unlikely(dma_mapping_error(dev
, dma_addr
)))
962 entry
= dma_entry_alloc();
967 entry
->type
= dma_debug_page
;
968 entry
->paddr
= page_to_phys(page
) + offset
;
969 entry
->dev_addr
= dma_addr
;
971 entry
->direction
= direction
;
974 entry
->type
= dma_debug_single
;
976 if (!PageHighMem(page
)) {
977 void *addr
= ((char *)page_address(page
)) + offset
;
978 check_for_stack(dev
, addr
);
979 check_for_illegal_area(dev
, addr
, size
);
982 add_dma_entry(entry
);
984 EXPORT_SYMBOL(debug_dma_map_page
);
986 void debug_dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
987 size_t size
, int direction
, bool map_single
)
989 struct dma_debug_entry ref
= {
990 .type
= dma_debug_page
,
994 .direction
= direction
,
997 if (unlikely(global_disable
))
1001 ref
.type
= dma_debug_single
;
1005 EXPORT_SYMBOL(debug_dma_unmap_page
);
1007 void debug_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1008 int nents
, int mapped_ents
, int direction
)
1010 struct dma_debug_entry
*entry
;
1011 struct scatterlist
*s
;
1014 if (unlikely(global_disable
))
1017 for_each_sg(sg
, s
, mapped_ents
, i
) {
1018 entry
= dma_entry_alloc();
1022 entry
->type
= dma_debug_sg
;
1024 entry
->paddr
= sg_phys(s
);
1025 entry
->size
= sg_dma_len(s
);
1026 entry
->dev_addr
= sg_dma_address(s
);
1027 entry
->direction
= direction
;
1028 entry
->sg_call_ents
= nents
;
1029 entry
->sg_mapped_ents
= mapped_ents
;
1031 if (!PageHighMem(sg_page(s
))) {
1032 check_for_stack(dev
, sg_virt(s
));
1033 check_for_illegal_area(dev
, sg_virt(s
), sg_dma_len(s
));
1036 add_dma_entry(entry
);
1039 EXPORT_SYMBOL(debug_dma_map_sg
);
1041 static int get_nr_mapped_entries(struct device
*dev
, struct scatterlist
*s
)
1043 struct dma_debug_entry
*entry
;
1044 struct hash_bucket
*bucket
;
1045 unsigned long flags
;
1046 int mapped_ents
= 0;
1047 struct dma_debug_entry ref
;
1050 ref
.dev_addr
= sg_dma_address(s
);
1051 ref
.size
= sg_dma_len(s
),
1053 bucket
= get_hash_bucket(&ref
, &flags
);
1054 entry
= hash_bucket_find(bucket
, &ref
);
1056 mapped_ents
= entry
->sg_mapped_ents
;
1057 put_hash_bucket(bucket
, &flags
);
1062 void debug_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
1063 int nelems
, int dir
)
1065 struct scatterlist
*s
;
1066 int mapped_ents
= 0, i
;
1068 if (unlikely(global_disable
))
1071 for_each_sg(sglist
, s
, nelems
, i
) {
1073 struct dma_debug_entry ref
= {
1074 .type
= dma_debug_sg
,
1076 .paddr
= sg_phys(s
),
1077 .dev_addr
= sg_dma_address(s
),
1078 .size
= sg_dma_len(s
),
1083 if (mapped_ents
&& i
>= mapped_ents
)
1087 ref
.sg_call_ents
= nelems
;
1088 mapped_ents
= get_nr_mapped_entries(dev
, s
);
1094 EXPORT_SYMBOL(debug_dma_unmap_sg
);
1096 void debug_dma_alloc_coherent(struct device
*dev
, size_t size
,
1097 dma_addr_t dma_addr
, void *virt
)
1099 struct dma_debug_entry
*entry
;
1101 if (unlikely(global_disable
))
1104 if (unlikely(virt
== NULL
))
1107 entry
= dma_entry_alloc();
1111 entry
->type
= dma_debug_coherent
;
1113 entry
->paddr
= virt_to_phys(virt
);
1115 entry
->dev_addr
= dma_addr
;
1116 entry
->direction
= DMA_BIDIRECTIONAL
;
1118 add_dma_entry(entry
);
1120 EXPORT_SYMBOL(debug_dma_alloc_coherent
);
1122 void debug_dma_free_coherent(struct device
*dev
, size_t size
,
1123 void *virt
, dma_addr_t addr
)
1125 struct dma_debug_entry ref
= {
1126 .type
= dma_debug_coherent
,
1128 .paddr
= virt_to_phys(virt
),
1131 .direction
= DMA_BIDIRECTIONAL
,
1134 if (unlikely(global_disable
))
1139 EXPORT_SYMBOL(debug_dma_free_coherent
);
1141 void debug_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
1142 size_t size
, int direction
)
1144 if (unlikely(global_disable
))
1147 check_sync(dev
, dma_handle
, size
, 0, direction
, true);
1149 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu
);
1151 void debug_dma_sync_single_for_device(struct device
*dev
,
1152 dma_addr_t dma_handle
, size_t size
,
1155 if (unlikely(global_disable
))
1158 check_sync(dev
, dma_handle
, size
, 0, direction
, false);
1160 EXPORT_SYMBOL(debug_dma_sync_single_for_device
);
1162 void debug_dma_sync_single_range_for_cpu(struct device
*dev
,
1163 dma_addr_t dma_handle
,
1164 unsigned long offset
, size_t size
,
1167 if (unlikely(global_disable
))
1170 check_sync(dev
, dma_handle
, size
, offset
, direction
, true);
1172 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu
);
1174 void debug_dma_sync_single_range_for_device(struct device
*dev
,
1175 dma_addr_t dma_handle
,
1176 unsigned long offset
,
1177 size_t size
, int direction
)
1179 if (unlikely(global_disable
))
1182 check_sync(dev
, dma_handle
, size
, offset
, direction
, false);
1184 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device
);
1186 void debug_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1187 int nelems
, int direction
)
1189 struct scatterlist
*s
;
1190 int mapped_ents
= 0, i
;
1192 if (unlikely(global_disable
))
1195 for_each_sg(sg
, s
, nelems
, i
) {
1197 mapped_ents
= get_nr_mapped_entries(dev
, s
);
1199 if (i
>= mapped_ents
)
1202 check_sync(dev
, sg_dma_address(s
), sg_dma_len(s
), 0,
1206 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu
);
1208 void debug_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1209 int nelems
, int direction
)
1211 struct scatterlist
*s
;
1212 int mapped_ents
= 0, i
;
1214 if (unlikely(global_disable
))
1217 for_each_sg(sg
, s
, nelems
, i
) {
1219 mapped_ents
= get_nr_mapped_entries(dev
, s
);
1221 if (i
>= mapped_ents
)
1224 check_sync(dev
, sg_dma_address(s
), sg_dma_len(s
), 0,
1228 EXPORT_SYMBOL(debug_dma_sync_sg_for_device
);
1230 static int __init
dma_debug_driver_setup(char *str
)
1234 for (i
= 0; i
< NAME_MAX_LEN
- 1; ++i
, ++str
) {
1235 current_driver_name
[i
] = *str
;
1240 if (current_driver_name
[0])
1241 printk(KERN_INFO
"DMA-API: enable driver filter for "
1242 "driver [%s]\n", current_driver_name
);
1247 __setup("dma_debug_driver=", dma_debug_driver_setup
);