2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/stacktrace.h>
23 #include <linux/dma-debug.h>
24 #include <linux/spinlock.h>
25 #include <linux/debugfs.h>
26 #include <linux/uaccess.h>
27 #include <linux/device.h>
28 #include <linux/types.h>
29 #include <linux/sched.h>
30 #include <linux/ctype.h>
31 #include <linux/list.h>
32 #include <linux/slab.h>
34 #include <asm/sections.h>
36 #define HASH_SIZE 1024ULL
37 #define HASH_FN_SHIFT 13
38 #define HASH_FN_MASK (HASH_SIZE - 1)
47 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
49 struct dma_debug_entry
{
50 struct list_head list
;
59 #ifdef CONFIG_STACKTRACE
60 struct stack_trace stacktrace
;
61 unsigned long st_entries
[DMA_DEBUG_STACKTRACE_ENTRIES
];
66 struct list_head list
;
68 } ____cacheline_aligned_in_smp
;
70 /* Hash list to save the allocated dma addresses */
71 static struct hash_bucket dma_entry_hash
[HASH_SIZE
];
72 /* List of pre-allocated dma_debug_entry's */
73 static LIST_HEAD(free_entries
);
74 /* Lock for the list above */
75 static DEFINE_SPINLOCK(free_entries_lock
);
77 /* Global disable flag - will be set in case of an error */
78 static bool global_disable __read_mostly
;
80 /* Global error count */
81 static u32 error_count
;
83 /* Global error show enable*/
84 static u32 show_all_errors __read_mostly
;
85 /* Number of errors to show */
86 static u32 show_num_errors
= 1;
88 static u32 num_free_entries
;
89 static u32 min_free_entries
;
90 static u32 nr_total_entries
;
92 /* number of preallocated entries requested by kernel cmdline */
93 static u32 req_entries
;
95 /* debugfs dentry's for the stuff above */
96 static struct dentry
*dma_debug_dent __read_mostly
;
97 static struct dentry
*global_disable_dent __read_mostly
;
98 static struct dentry
*error_count_dent __read_mostly
;
99 static struct dentry
*show_all_errors_dent __read_mostly
;
100 static struct dentry
*show_num_errors_dent __read_mostly
;
101 static struct dentry
*num_free_entries_dent __read_mostly
;
102 static struct dentry
*min_free_entries_dent __read_mostly
;
103 static struct dentry
*filter_dent __read_mostly
;
105 /* per-driver filter related state */
107 #define NAME_MAX_LEN 64
109 static char current_driver_name
[NAME_MAX_LEN
] __read_mostly
;
110 static struct device_driver
*current_driver __read_mostly
;
112 static DEFINE_RWLOCK(driver_name_lock
);
114 static const char *type2name
[4] = { "single", "page",
115 "scather-gather", "coherent" };
117 static const char *dir2name
[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
118 "DMA_FROM_DEVICE", "DMA_NONE" };
121 * The access to some variables in this macro is racy. We can't use atomic_t
122 * here because all these variables are exported to debugfs. Some of them even
123 * writeable. This is also the reason why a lock won't help much. But anyway,
124 * the races are no big deal. Here is why:
126 * error_count: the addition is racy, but the worst thing that can happen is
127 * that we don't count some errors
128 * show_num_errors: the subtraction is racy. Also no big deal because in
129 * worst case this will result in one warning more in the
130 * system log than the user configured. This variable is
131 * writeable via debugfs.
133 static inline void dump_entry_trace(struct dma_debug_entry
*entry
)
135 #ifdef CONFIG_STACKTRACE
137 printk(KERN_WARNING
"Mapped at:\n");
138 print_stack_trace(&entry
->stacktrace
, 0);
143 static bool driver_filter(struct device
*dev
)
145 /* driver filter off */
146 if (likely(!current_driver_name
[0]))
149 /* driver filter on and initialized */
150 if (current_driver
&& dev
->driver
== current_driver
)
153 /* driver filter on but not yet initialized */
154 if (!current_driver
&& current_driver_name
[0]) {
155 struct device_driver
*drv
= get_driver(dev
->driver
);
162 /* lock to protect against change of current_driver_name */
163 read_lock_irqsave(&driver_name_lock
, flags
);
166 strncmp(current_driver_name
, drv
->name
,
167 NAME_MAX_LEN
-1) == 0) {
168 current_driver
= drv
;
172 read_unlock_irqrestore(&driver_name_lock
, flags
);
181 #define err_printk(dev, entry, format, arg...) do { \
183 if (driver_filter(dev) && \
184 (show_all_errors || show_num_errors > 0)) { \
185 WARN(1, "%s %s: " format, \
186 dev_driver_string(dev), \
187 dev_name(dev) , ## arg); \
188 dump_entry_trace(entry); \
190 if (!show_all_errors && show_num_errors > 0) \
191 show_num_errors -= 1; \
195 * Hash related functions
197 * Every DMA-API request is saved into a struct dma_debug_entry. To
198 * have quick access to these structs they are stored into a hash.
200 static int hash_fn(struct dma_debug_entry
*entry
)
203 * Hash function is based on the dma address.
204 * We use bits 20-27 here as the index into the hash
206 return (entry
->dev_addr
>> HASH_FN_SHIFT
) & HASH_FN_MASK
;
210 * Request exclusive access to a hash bucket for a given dma_debug_entry.
212 static struct hash_bucket
*get_hash_bucket(struct dma_debug_entry
*entry
,
213 unsigned long *flags
)
215 int idx
= hash_fn(entry
);
216 unsigned long __flags
;
218 spin_lock_irqsave(&dma_entry_hash
[idx
].lock
, __flags
);
220 return &dma_entry_hash
[idx
];
224 * Give up exclusive access to the hash bucket
226 static void put_hash_bucket(struct hash_bucket
*bucket
,
227 unsigned long *flags
)
229 unsigned long __flags
= *flags
;
231 spin_unlock_irqrestore(&bucket
->lock
, __flags
);
235 * Search a given entry in the hash bucket list
237 static struct dma_debug_entry
*hash_bucket_find(struct hash_bucket
*bucket
,
238 struct dma_debug_entry
*ref
)
240 struct dma_debug_entry
*entry
;
242 list_for_each_entry(entry
, &bucket
->list
, list
) {
243 if ((entry
->dev_addr
== ref
->dev_addr
) &&
244 (entry
->dev
== ref
->dev
))
252 * Add an entry to a hash bucket
254 static void hash_bucket_add(struct hash_bucket
*bucket
,
255 struct dma_debug_entry
*entry
)
257 list_add_tail(&entry
->list
, &bucket
->list
);
261 * Remove entry from a hash bucket list
263 static void hash_bucket_del(struct dma_debug_entry
*entry
)
265 list_del(&entry
->list
);
269 * Dump mapping entries for debugging purposes
271 void debug_dma_dump_mappings(struct device
*dev
)
275 for (idx
= 0; idx
< HASH_SIZE
; idx
++) {
276 struct hash_bucket
*bucket
= &dma_entry_hash
[idx
];
277 struct dma_debug_entry
*entry
;
280 spin_lock_irqsave(&bucket
->lock
, flags
);
282 list_for_each_entry(entry
, &bucket
->list
, list
) {
283 if (!dev
|| dev
== entry
->dev
) {
285 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
286 type2name
[entry
->type
], idx
,
287 (unsigned long long)entry
->paddr
,
288 entry
->dev_addr
, entry
->size
,
289 dir2name
[entry
->direction
]);
293 spin_unlock_irqrestore(&bucket
->lock
, flags
);
296 EXPORT_SYMBOL(debug_dma_dump_mappings
);
299 * Wrapper function for adding an entry to the hash.
300 * This function takes care of locking itself.
302 static void add_dma_entry(struct dma_debug_entry
*entry
)
304 struct hash_bucket
*bucket
;
307 bucket
= get_hash_bucket(entry
, &flags
);
308 hash_bucket_add(bucket
, entry
);
309 put_hash_bucket(bucket
, &flags
);
312 static struct dma_debug_entry
*__dma_entry_alloc(void)
314 struct dma_debug_entry
*entry
;
316 entry
= list_entry(free_entries
.next
, struct dma_debug_entry
, list
);
317 list_del(&entry
->list
);
318 memset(entry
, 0, sizeof(*entry
));
320 num_free_entries
-= 1;
321 if (num_free_entries
< min_free_entries
)
322 min_free_entries
= num_free_entries
;
327 /* struct dma_entry allocator
329 * The next two functions implement the allocator for
330 * struct dma_debug_entries.
332 static struct dma_debug_entry
*dma_entry_alloc(void)
334 struct dma_debug_entry
*entry
= NULL
;
337 spin_lock_irqsave(&free_entries_lock
, flags
);
339 if (list_empty(&free_entries
)) {
340 printk(KERN_ERR
"DMA-API: debugging out of memory "
342 global_disable
= true;
346 entry
= __dma_entry_alloc();
348 #ifdef CONFIG_STACKTRACE
349 entry
->stacktrace
.max_entries
= DMA_DEBUG_STACKTRACE_ENTRIES
;
350 entry
->stacktrace
.entries
= entry
->st_entries
;
351 entry
->stacktrace
.skip
= 2;
352 save_stack_trace(&entry
->stacktrace
);
356 spin_unlock_irqrestore(&free_entries_lock
, flags
);
361 static void dma_entry_free(struct dma_debug_entry
*entry
)
366 * add to beginning of the list - this way the entries are
367 * more likely cache hot when they are reallocated.
369 spin_lock_irqsave(&free_entries_lock
, flags
);
370 list_add(&entry
->list
, &free_entries
);
371 num_free_entries
+= 1;
372 spin_unlock_irqrestore(&free_entries_lock
, flags
);
375 int dma_debug_resize_entries(u32 num_entries
)
377 int i
, delta
, ret
= 0;
379 struct dma_debug_entry
*entry
;
382 spin_lock_irqsave(&free_entries_lock
, flags
);
384 if (nr_total_entries
< num_entries
) {
385 delta
= num_entries
- nr_total_entries
;
387 spin_unlock_irqrestore(&free_entries_lock
, flags
);
389 for (i
= 0; i
< delta
; i
++) {
390 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
394 list_add_tail(&entry
->list
, &tmp
);
397 spin_lock_irqsave(&free_entries_lock
, flags
);
399 list_splice(&tmp
, &free_entries
);
400 nr_total_entries
+= i
;
401 num_free_entries
+= i
;
403 delta
= nr_total_entries
- num_entries
;
405 for (i
= 0; i
< delta
&& !list_empty(&free_entries
); i
++) {
406 entry
= __dma_entry_alloc();
410 nr_total_entries
-= i
;
413 if (nr_total_entries
!= num_entries
)
416 spin_unlock_irqrestore(&free_entries_lock
, flags
);
420 EXPORT_SYMBOL(dma_debug_resize_entries
);
423 * DMA-API debugging init code
425 * The init code does two things:
426 * 1. Initialize core data structures
427 * 2. Preallocate a given number of dma_debug_entry structs
430 static int prealloc_memory(u32 num_entries
)
432 struct dma_debug_entry
*entry
, *next_entry
;
435 for (i
= 0; i
< num_entries
; ++i
) {
436 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
440 list_add_tail(&entry
->list
, &free_entries
);
443 num_free_entries
= num_entries
;
444 min_free_entries
= num_entries
;
446 printk(KERN_INFO
"DMA-API: preallocated %d debug entries\n",
453 list_for_each_entry_safe(entry
, next_entry
, &free_entries
, list
) {
454 list_del(&entry
->list
);
461 static ssize_t
filter_read(struct file
*file
, char __user
*user_buf
,
462 size_t count
, loff_t
*ppos
)
465 char buf
[NAME_MAX_LEN
+ 1];
468 if (!current_driver_name
[0])
472 * We can't copy to userspace directly because current_driver_name can
473 * only be read under the driver_name_lock with irqs disabled. So
474 * create a temporary copy first.
476 read_lock_irqsave(&driver_name_lock
, flags
);
477 len
= scnprintf(buf
, NAME_MAX_LEN
+ 1, "%s\n", current_driver_name
);
478 read_unlock_irqrestore(&driver_name_lock
, flags
);
480 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
483 static ssize_t
filter_write(struct file
*file
, const char __user
*userbuf
,
484 size_t count
, loff_t
*ppos
)
487 char buf
[NAME_MAX_LEN
];
488 size_t len
= NAME_MAX_LEN
- 1;
492 * We can't copy from userspace directly. Access to
493 * current_driver_name is protected with a write_lock with irqs
494 * disabled. Since copy_from_user can fault and may sleep we
495 * need to copy to temporary buffer first
497 len
= min(count
, len
);
498 if (copy_from_user(buf
, userbuf
, len
))
503 write_lock_irqsave(&driver_name_lock
, flags
);
505 /* Now handle the string we got from userspace very carefully.
507 * - only use the first token we got
508 * - token delimiter is everything looking like a space
509 * character (' ', '\n', '\t' ...)
512 if (!isalnum(buf
[0])) {
514 If the first character userspace gave us is not
515 * alphanumerical then assume the filter should be
518 if (current_driver_name
[0])
519 printk(KERN_INFO
"DMA-API: switching off dma-debug "
521 current_driver_name
[0] = 0;
522 current_driver
= NULL
;
527 * Now parse out the first token and use it as the name for the
528 * driver to filter for.
530 for (i
= 0; i
< NAME_MAX_LEN
; ++i
) {
531 current_driver_name
[i
] = buf
[i
];
532 if (isspace(buf
[i
]) || buf
[i
] == ' ' || buf
[i
] == 0)
535 current_driver_name
[i
] = 0;
536 current_driver
= NULL
;
538 printk(KERN_INFO
"DMA-API: enable driver filter for driver [%s]\n",
539 current_driver_name
);
542 write_unlock_irqrestore(&driver_name_lock
, flags
);
547 const struct file_operations filter_fops
= {
549 .write
= filter_write
,
552 static int dma_debug_fs_init(void)
554 dma_debug_dent
= debugfs_create_dir("dma-api", NULL
);
555 if (!dma_debug_dent
) {
556 printk(KERN_ERR
"DMA-API: can not create debugfs directory\n");
560 global_disable_dent
= debugfs_create_bool("disabled", 0444,
562 (u32
*)&global_disable
);
563 if (!global_disable_dent
)
566 error_count_dent
= debugfs_create_u32("error_count", 0444,
567 dma_debug_dent
, &error_count
);
568 if (!error_count_dent
)
571 show_all_errors_dent
= debugfs_create_u32("all_errors", 0644,
574 if (!show_all_errors_dent
)
577 show_num_errors_dent
= debugfs_create_u32("num_errors", 0644,
580 if (!show_num_errors_dent
)
583 num_free_entries_dent
= debugfs_create_u32("num_free_entries", 0444,
586 if (!num_free_entries_dent
)
589 min_free_entries_dent
= debugfs_create_u32("min_free_entries", 0444,
592 if (!min_free_entries_dent
)
595 filter_dent
= debugfs_create_file("driver_filter", 0644,
596 dma_debug_dent
, NULL
, &filter_fops
);
603 debugfs_remove_recursive(dma_debug_dent
);
608 void dma_debug_add_bus(struct bus_type
*bus
)
610 /* FIXME: register notifier */
614 * Let the architectures decide how many entries should be preallocated.
616 void dma_debug_init(u32 num_entries
)
623 for (i
= 0; i
< HASH_SIZE
; ++i
) {
624 INIT_LIST_HEAD(&dma_entry_hash
[i
].list
);
625 dma_entry_hash
[i
].lock
= SPIN_LOCK_UNLOCKED
;
628 if (dma_debug_fs_init() != 0) {
629 printk(KERN_ERR
"DMA-API: error creating debugfs entries "
631 global_disable
= true;
637 num_entries
= req_entries
;
639 if (prealloc_memory(num_entries
) != 0) {
640 printk(KERN_ERR
"DMA-API: debugging out of memory error "
642 global_disable
= true;
647 nr_total_entries
= num_free_entries
;
649 printk(KERN_INFO
"DMA-API: debugging enabled by kernel config\n");
652 static __init
int dma_debug_cmdline(char *str
)
657 if (strncmp(str
, "off", 3) == 0) {
658 printk(KERN_INFO
"DMA-API: debugging disabled on kernel "
660 global_disable
= true;
666 static __init
int dma_debug_entries_cmdline(char *str
)
673 res
= get_option(&str
, &req_entries
);
681 __setup("dma_debug=", dma_debug_cmdline
);
682 __setup("dma_debug_entries=", dma_debug_entries_cmdline
);
684 static void check_unmap(struct dma_debug_entry
*ref
)
686 struct dma_debug_entry
*entry
;
687 struct hash_bucket
*bucket
;
690 if (dma_mapping_error(ref
->dev
, ref
->dev_addr
)) {
691 err_printk(ref
->dev
, NULL
, "DMA-API: device driver tries "
692 "to free an invalid DMA memory address\n");
696 bucket
= get_hash_bucket(ref
, &flags
);
697 entry
= hash_bucket_find(bucket
, ref
);
700 err_printk(ref
->dev
, NULL
, "DMA-API: device driver tries "
701 "to free DMA memory it has not allocated "
702 "[device address=0x%016llx] [size=%llu bytes]\n",
703 ref
->dev_addr
, ref
->size
);
707 if (ref
->size
!= entry
->size
) {
708 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
709 "DMA memory with different size "
710 "[device address=0x%016llx] [map size=%llu bytes] "
711 "[unmap size=%llu bytes]\n",
712 ref
->dev_addr
, entry
->size
, ref
->size
);
715 if (ref
->type
!= entry
->type
) {
716 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
717 "DMA memory with wrong function "
718 "[device address=0x%016llx] [size=%llu bytes] "
719 "[mapped as %s] [unmapped as %s]\n",
720 ref
->dev_addr
, ref
->size
,
721 type2name
[entry
->type
], type2name
[ref
->type
]);
722 } else if ((entry
->type
== dma_debug_coherent
) &&
723 (ref
->paddr
!= entry
->paddr
)) {
724 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
725 "DMA memory with different CPU address "
726 "[device address=0x%016llx] [size=%llu bytes] "
727 "[cpu alloc address=%p] [cpu free address=%p]",
728 ref
->dev_addr
, ref
->size
,
729 (void *)entry
->paddr
, (void *)ref
->paddr
);
732 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
733 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
734 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
735 "DMA sg list with different entry count "
736 "[map count=%d] [unmap count=%d]\n",
737 entry
->sg_call_ents
, ref
->sg_call_ents
);
741 * This may be no bug in reality - but most implementations of the
742 * DMA API don't handle this properly, so check for it here
744 if (ref
->direction
!= entry
->direction
) {
745 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
746 "DMA memory with different direction "
747 "[device address=0x%016llx] [size=%llu bytes] "
748 "[mapped with %s] [unmapped with %s]\n",
749 ref
->dev_addr
, ref
->size
,
750 dir2name
[entry
->direction
],
751 dir2name
[ref
->direction
]);
754 hash_bucket_del(entry
);
755 dma_entry_free(entry
);
758 put_hash_bucket(bucket
, &flags
);
761 static void check_for_stack(struct device
*dev
, void *addr
)
763 if (object_is_on_stack(addr
))
764 err_printk(dev
, NULL
, "DMA-API: device driver maps memory from"
765 "stack [addr=%p]\n", addr
);
768 static inline bool overlap(void *addr
, u64 size
, void *start
, void *end
)
770 void *addr2
= (char *)addr
+ size
;
772 return ((addr
>= start
&& addr
< end
) ||
773 (addr2
>= start
&& addr2
< end
) ||
774 ((addr
< start
) && (addr2
>= end
)));
777 static void check_for_illegal_area(struct device
*dev
, void *addr
, u64 size
)
779 if (overlap(addr
, size
, _text
, _etext
) ||
780 overlap(addr
, size
, __start_rodata
, __end_rodata
))
781 err_printk(dev
, NULL
, "DMA-API: device driver maps "
782 "memory from kernel text or rodata "
783 "[addr=%p] [size=%llu]\n", addr
, size
);
786 static void check_sync(struct device
*dev
, dma_addr_t addr
,
787 u64 size
, u64 offset
, int direction
, bool to_cpu
)
789 struct dma_debug_entry ref
= {
793 .direction
= direction
,
795 struct dma_debug_entry
*entry
;
796 struct hash_bucket
*bucket
;
799 bucket
= get_hash_bucket(&ref
, &flags
);
801 entry
= hash_bucket_find(bucket
, &ref
);
804 err_printk(dev
, NULL
, "DMA-API: device driver tries "
805 "to sync DMA memory it has not allocated "
806 "[device address=0x%016llx] [size=%llu bytes]\n",
807 (unsigned long long)addr
, size
);
811 if ((offset
+ size
) > entry
->size
) {
812 err_printk(dev
, entry
, "DMA-API: device driver syncs"
813 " DMA memory outside allocated range "
814 "[device address=0x%016llx] "
815 "[allocation size=%llu bytes] [sync offset=%llu] "
816 "[sync size=%llu]\n", entry
->dev_addr
, entry
->size
,
820 if (direction
!= entry
->direction
) {
821 err_printk(dev
, entry
, "DMA-API: device driver syncs "
822 "DMA memory with different direction "
823 "[device address=0x%016llx] [size=%llu bytes] "
824 "[mapped with %s] [synced with %s]\n",
825 (unsigned long long)addr
, entry
->size
,
826 dir2name
[entry
->direction
],
827 dir2name
[direction
]);
830 if (entry
->direction
== DMA_BIDIRECTIONAL
)
833 if (to_cpu
&& !(entry
->direction
== DMA_FROM_DEVICE
) &&
834 !(direction
== DMA_TO_DEVICE
))
835 err_printk(dev
, entry
, "DMA-API: device driver syncs "
836 "device read-only DMA memory for cpu "
837 "[device address=0x%016llx] [size=%llu bytes] "
838 "[mapped with %s] [synced with %s]\n",
839 (unsigned long long)addr
, entry
->size
,
840 dir2name
[entry
->direction
],
841 dir2name
[direction
]);
843 if (!to_cpu
&& !(entry
->direction
== DMA_TO_DEVICE
) &&
844 !(direction
== DMA_FROM_DEVICE
))
845 err_printk(dev
, entry
, "DMA-API: device driver syncs "
846 "device write-only DMA memory to device "
847 "[device address=0x%016llx] [size=%llu bytes] "
848 "[mapped with %s] [synced with %s]\n",
849 (unsigned long long)addr
, entry
->size
,
850 dir2name
[entry
->direction
],
851 dir2name
[direction
]);
854 put_hash_bucket(bucket
, &flags
);
858 void debug_dma_map_page(struct device
*dev
, struct page
*page
, size_t offset
,
859 size_t size
, int direction
, dma_addr_t dma_addr
,
862 struct dma_debug_entry
*entry
;
864 if (unlikely(global_disable
))
867 if (unlikely(dma_mapping_error(dev
, dma_addr
)))
870 entry
= dma_entry_alloc();
875 entry
->type
= dma_debug_page
;
876 entry
->paddr
= page_to_phys(page
) + offset
;
877 entry
->dev_addr
= dma_addr
;
879 entry
->direction
= direction
;
882 entry
->type
= dma_debug_single
;
884 if (!PageHighMem(page
)) {
885 void *addr
= ((char *)page_address(page
)) + offset
;
886 check_for_stack(dev
, addr
);
887 check_for_illegal_area(dev
, addr
, size
);
890 add_dma_entry(entry
);
892 EXPORT_SYMBOL(debug_dma_map_page
);
894 void debug_dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
895 size_t size
, int direction
, bool map_single
)
897 struct dma_debug_entry ref
= {
898 .type
= dma_debug_page
,
902 .direction
= direction
,
905 if (unlikely(global_disable
))
909 ref
.type
= dma_debug_single
;
913 EXPORT_SYMBOL(debug_dma_unmap_page
);
915 void debug_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
916 int nents
, int mapped_ents
, int direction
)
918 struct dma_debug_entry
*entry
;
919 struct scatterlist
*s
;
922 if (unlikely(global_disable
))
925 for_each_sg(sg
, s
, mapped_ents
, i
) {
926 entry
= dma_entry_alloc();
930 entry
->type
= dma_debug_sg
;
932 entry
->paddr
= sg_phys(s
);
933 entry
->size
= s
->length
;
934 entry
->dev_addr
= s
->dma_address
;
935 entry
->direction
= direction
;
936 entry
->sg_call_ents
= nents
;
937 entry
->sg_mapped_ents
= mapped_ents
;
939 if (!PageHighMem(sg_page(s
))) {
940 check_for_stack(dev
, sg_virt(s
));
941 check_for_illegal_area(dev
, sg_virt(s
), s
->length
);
944 add_dma_entry(entry
);
947 EXPORT_SYMBOL(debug_dma_map_sg
);
949 void debug_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
952 struct dma_debug_entry
*entry
;
953 struct scatterlist
*s
;
954 int mapped_ents
= 0, i
;
957 if (unlikely(global_disable
))
960 for_each_sg(sglist
, s
, nelems
, i
) {
962 struct dma_debug_entry ref
= {
963 .type
= dma_debug_sg
,
966 .dev_addr
= s
->dma_address
,
972 if (mapped_ents
&& i
>= mapped_ents
)
975 if (mapped_ents
== 0) {
976 struct hash_bucket
*bucket
;
977 ref
.sg_call_ents
= nelems
;
978 bucket
= get_hash_bucket(&ref
, &flags
);
979 entry
= hash_bucket_find(bucket
, &ref
);
981 mapped_ents
= entry
->sg_mapped_ents
;
982 put_hash_bucket(bucket
, &flags
);
988 EXPORT_SYMBOL(debug_dma_unmap_sg
);
990 void debug_dma_alloc_coherent(struct device
*dev
, size_t size
,
991 dma_addr_t dma_addr
, void *virt
)
993 struct dma_debug_entry
*entry
;
995 if (unlikely(global_disable
))
998 if (unlikely(virt
== NULL
))
1001 entry
= dma_entry_alloc();
1005 entry
->type
= dma_debug_coherent
;
1007 entry
->paddr
= virt_to_phys(virt
);
1009 entry
->dev_addr
= dma_addr
;
1010 entry
->direction
= DMA_BIDIRECTIONAL
;
1012 add_dma_entry(entry
);
1014 EXPORT_SYMBOL(debug_dma_alloc_coherent
);
1016 void debug_dma_free_coherent(struct device
*dev
, size_t size
,
1017 void *virt
, dma_addr_t addr
)
1019 struct dma_debug_entry ref
= {
1020 .type
= dma_debug_coherent
,
1022 .paddr
= virt_to_phys(virt
),
1025 .direction
= DMA_BIDIRECTIONAL
,
1028 if (unlikely(global_disable
))
1033 EXPORT_SYMBOL(debug_dma_free_coherent
);
1035 void debug_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
1036 size_t size
, int direction
)
1038 if (unlikely(global_disable
))
1041 check_sync(dev
, dma_handle
, size
, 0, direction
, true);
1043 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu
);
1045 void debug_dma_sync_single_for_device(struct device
*dev
,
1046 dma_addr_t dma_handle
, size_t size
,
1049 if (unlikely(global_disable
))
1052 check_sync(dev
, dma_handle
, size
, 0, direction
, false);
1054 EXPORT_SYMBOL(debug_dma_sync_single_for_device
);
1056 void debug_dma_sync_single_range_for_cpu(struct device
*dev
,
1057 dma_addr_t dma_handle
,
1058 unsigned long offset
, size_t size
,
1061 if (unlikely(global_disable
))
1064 check_sync(dev
, dma_handle
, size
, offset
, direction
, true);
1066 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu
);
1068 void debug_dma_sync_single_range_for_device(struct device
*dev
,
1069 dma_addr_t dma_handle
,
1070 unsigned long offset
,
1071 size_t size
, int direction
)
1073 if (unlikely(global_disable
))
1076 check_sync(dev
, dma_handle
, size
, offset
, direction
, false);
1078 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device
);
1080 void debug_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1081 int nelems
, int direction
)
1083 struct scatterlist
*s
;
1086 if (unlikely(global_disable
))
1089 for_each_sg(sg
, s
, nelems
, i
) {
1090 check_sync(dev
, s
->dma_address
, s
->dma_length
, 0,
1094 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu
);
1096 void debug_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1097 int nelems
, int direction
)
1099 struct scatterlist
*s
;
1102 if (unlikely(global_disable
))
1105 for_each_sg(sg
, s
, nelems
, i
) {
1106 check_sync(dev
, s
->dma_address
, s
->dma_length
, 0,
1110 EXPORT_SYMBOL(debug_dma_sync_sg_for_device
);
1112 static int __init
dma_debug_driver_setup(char *str
)
1116 for (i
= 0; i
< NAME_MAX_LEN
- 1; ++i
, ++str
) {
1117 current_driver_name
[i
] = *str
;
1122 if (current_driver_name
[0])
1123 printk(KERN_INFO
"DMA-API: enable driver filter for "
1124 "driver [%s]\n", current_driver_name
);
1129 __setup("dma_debug_driver=", dma_debug_driver_setup
);