dma-debug: comment style fixes
[deliverable/linux.git] / lib / dma-debug.c
1 /*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/stacktrace.h>
23 #include <linux/dma-debug.h>
24 #include <linux/spinlock.h>
25 #include <linux/debugfs.h>
26 #include <linux/uaccess.h>
27 #include <linux/device.h>
28 #include <linux/types.h>
29 #include <linux/sched.h>
30 #include <linux/ctype.h>
31 #include <linux/list.h>
32 #include <linux/slab.h>
33
34 #include <asm/sections.h>
35
36 #define HASH_SIZE 1024ULL
37 #define HASH_FN_SHIFT 13
38 #define HASH_FN_MASK (HASH_SIZE - 1)
39
40 enum {
41 dma_debug_single,
42 dma_debug_page,
43 dma_debug_sg,
44 dma_debug_coherent,
45 };
46
47 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
48
49 struct dma_debug_entry {
50 struct list_head list;
51 struct device *dev;
52 int type;
53 phys_addr_t paddr;
54 u64 dev_addr;
55 u64 size;
56 int direction;
57 int sg_call_ents;
58 int sg_mapped_ents;
59 #ifdef CONFIG_STACKTRACE
60 struct stack_trace stacktrace;
61 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
62 #endif
63 };
64
65 struct hash_bucket {
66 struct list_head list;
67 spinlock_t lock;
68 } ____cacheline_aligned_in_smp;
69
70 /* Hash list to save the allocated dma addresses */
71 static struct hash_bucket dma_entry_hash[HASH_SIZE];
72 /* List of pre-allocated dma_debug_entry's */
73 static LIST_HEAD(free_entries);
74 /* Lock for the list above */
75 static DEFINE_SPINLOCK(free_entries_lock);
76
77 /* Global disable flag - will be set in case of an error */
78 static bool global_disable __read_mostly;
79
80 /* Global error count */
81 static u32 error_count;
82
83 /* Global error show enable*/
84 static u32 show_all_errors __read_mostly;
85 /* Number of errors to show */
86 static u32 show_num_errors = 1;
87
88 static u32 num_free_entries;
89 static u32 min_free_entries;
90 static u32 nr_total_entries;
91
92 /* number of preallocated entries requested by kernel cmdline */
93 static u32 req_entries;
94
95 /* debugfs dentry's for the stuff above */
96 static struct dentry *dma_debug_dent __read_mostly;
97 static struct dentry *global_disable_dent __read_mostly;
98 static struct dentry *error_count_dent __read_mostly;
99 static struct dentry *show_all_errors_dent __read_mostly;
100 static struct dentry *show_num_errors_dent __read_mostly;
101 static struct dentry *num_free_entries_dent __read_mostly;
102 static struct dentry *min_free_entries_dent __read_mostly;
103 static struct dentry *filter_dent __read_mostly;
104
105 /* per-driver filter related state */
106
107 #define NAME_MAX_LEN 64
108
109 static char current_driver_name[NAME_MAX_LEN] __read_mostly;
110 static struct device_driver *current_driver __read_mostly;
111
112 static DEFINE_RWLOCK(driver_name_lock);
113
114 static const char *type2name[4] = { "single", "page",
115 "scather-gather", "coherent" };
116
117 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
118 "DMA_FROM_DEVICE", "DMA_NONE" };
119
120 /* little merge helper - remove it after the merge window */
121 #ifndef BUS_NOTIFY_UNBOUND_DRIVER
122 #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
123 #endif
124
125 /*
126 * The access to some variables in this macro is racy. We can't use atomic_t
127 * here because all these variables are exported to debugfs. Some of them even
128 * writeable. This is also the reason why a lock won't help much. But anyway,
129 * the races are no big deal. Here is why:
130 *
131 * error_count: the addition is racy, but the worst thing that can happen is
132 * that we don't count some errors
133 * show_num_errors: the subtraction is racy. Also no big deal because in
134 * worst case this will result in one warning more in the
135 * system log than the user configured. This variable is
136 * writeable via debugfs.
137 */
138 static inline void dump_entry_trace(struct dma_debug_entry *entry)
139 {
140 #ifdef CONFIG_STACKTRACE
141 if (entry) {
142 printk(KERN_WARNING "Mapped at:\n");
143 print_stack_trace(&entry->stacktrace, 0);
144 }
145 #endif
146 }
147
148 static bool driver_filter(struct device *dev)
149 {
150 /* driver filter off */
151 if (likely(!current_driver_name[0]))
152 return true;
153
154 /* driver filter on and initialized */
155 if (current_driver && dev->driver == current_driver)
156 return true;
157
158 /* driver filter on but not yet initialized */
159 if (!current_driver && current_driver_name[0]) {
160 struct device_driver *drv = get_driver(dev->driver);
161 unsigned long flags;
162 bool ret = false;
163
164 if (!drv)
165 return false;
166
167 /* lock to protect against change of current_driver_name */
168 read_lock_irqsave(&driver_name_lock, flags);
169
170 if (drv->name &&
171 strncmp(current_driver_name, drv->name,
172 NAME_MAX_LEN-1) == 0) {
173 current_driver = drv;
174 ret = true;
175 }
176
177 read_unlock_irqrestore(&driver_name_lock, flags);
178 put_driver(drv);
179
180 return ret;
181 }
182
183 return false;
184 }
185
186 #define err_printk(dev, entry, format, arg...) do { \
187 error_count += 1; \
188 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \
190 WARN(1, "%s %s: " format, \
191 dev_driver_string(dev), \
192 dev_name(dev) , ## arg); \
193 dump_entry_trace(entry); \
194 } \
195 if (!show_all_errors && show_num_errors > 0) \
196 show_num_errors -= 1; \
197 } while (0);
198
199 /*
200 * Hash related functions
201 *
202 * Every DMA-API request is saved into a struct dma_debug_entry. To
203 * have quick access to these structs they are stored into a hash.
204 */
205 static int hash_fn(struct dma_debug_entry *entry)
206 {
207 /*
208 * Hash function is based on the dma address.
209 * We use bits 20-27 here as the index into the hash
210 */
211 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
212 }
213
214 /*
215 * Request exclusive access to a hash bucket for a given dma_debug_entry.
216 */
217 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
218 unsigned long *flags)
219 {
220 int idx = hash_fn(entry);
221 unsigned long __flags;
222
223 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
224 *flags = __flags;
225 return &dma_entry_hash[idx];
226 }
227
228 /*
229 * Give up exclusive access to the hash bucket
230 */
231 static void put_hash_bucket(struct hash_bucket *bucket,
232 unsigned long *flags)
233 {
234 unsigned long __flags = *flags;
235
236 spin_unlock_irqrestore(&bucket->lock, __flags);
237 }
238
239 /*
240 * Search a given entry in the hash bucket list
241 */
242 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
243 struct dma_debug_entry *ref)
244 {
245 struct dma_debug_entry *entry, *ret = NULL;
246 int matches = 0, match_lvl, last_lvl = 0;
247
248 list_for_each_entry(entry, &bucket->list, list) {
249 if ((entry->dev_addr != ref->dev_addr) ||
250 (entry->dev != ref->dev))
251 continue;
252
253 /*
254 * Some drivers map the same physical address multiple
255 * times. Without a hardware IOMMU this results in the
256 * same device addresses being put into the dma-debug
257 * hash multiple times too. This can result in false
258 * positives being reported. Therfore we implement a
259 * best-fit algorithm here which returns the entry from
260 * the hash which fits best to the reference value
261 * instead of the first-fit.
262 */
263 matches += 1;
264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : match_lvl;
266 entry->type == ref->type ? ++match_lvl : match_lvl;
267 entry->direction == ref->direction ? ++match_lvl : match_lvl;
268
269 if (match_lvl == 3) {
270 /* perfect-fit - return the result */
271 return entry;
272 } else if (match_lvl > last_lvl) {
273 /*
274 * We found an entry that fits better then the
275 * previous one
276 */
277 last_lvl = match_lvl;
278 ret = entry;
279 }
280 }
281
282 /*
283 * If we have multiple matches but no perfect-fit, just return
284 * NULL.
285 */
286 ret = (matches == 1) ? ret : NULL;
287
288 return ret;
289 }
290
291 /*
292 * Add an entry to a hash bucket
293 */
294 static void hash_bucket_add(struct hash_bucket *bucket,
295 struct dma_debug_entry *entry)
296 {
297 list_add_tail(&entry->list, &bucket->list);
298 }
299
300 /*
301 * Remove entry from a hash bucket list
302 */
303 static void hash_bucket_del(struct dma_debug_entry *entry)
304 {
305 list_del(&entry->list);
306 }
307
308 /*
309 * Dump mapping entries for debugging purposes
310 */
311 void debug_dma_dump_mappings(struct device *dev)
312 {
313 int idx;
314
315 for (idx = 0; idx < HASH_SIZE; idx++) {
316 struct hash_bucket *bucket = &dma_entry_hash[idx];
317 struct dma_debug_entry *entry;
318 unsigned long flags;
319
320 spin_lock_irqsave(&bucket->lock, flags);
321
322 list_for_each_entry(entry, &bucket->list, list) {
323 if (!dev || dev == entry->dev) {
324 dev_info(entry->dev,
325 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
326 type2name[entry->type], idx,
327 (unsigned long long)entry->paddr,
328 entry->dev_addr, entry->size,
329 dir2name[entry->direction]);
330 }
331 }
332
333 spin_unlock_irqrestore(&bucket->lock, flags);
334 }
335 }
336 EXPORT_SYMBOL(debug_dma_dump_mappings);
337
338 /*
339 * Wrapper function for adding an entry to the hash.
340 * This function takes care of locking itself.
341 */
342 static void add_dma_entry(struct dma_debug_entry *entry)
343 {
344 struct hash_bucket *bucket;
345 unsigned long flags;
346
347 bucket = get_hash_bucket(entry, &flags);
348 hash_bucket_add(bucket, entry);
349 put_hash_bucket(bucket, &flags);
350 }
351
352 static struct dma_debug_entry *__dma_entry_alloc(void)
353 {
354 struct dma_debug_entry *entry;
355
356 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
357 list_del(&entry->list);
358 memset(entry, 0, sizeof(*entry));
359
360 num_free_entries -= 1;
361 if (num_free_entries < min_free_entries)
362 min_free_entries = num_free_entries;
363
364 return entry;
365 }
366
367 /* struct dma_entry allocator
368 *
369 * The next two functions implement the allocator for
370 * struct dma_debug_entries.
371 */
372 static struct dma_debug_entry *dma_entry_alloc(void)
373 {
374 struct dma_debug_entry *entry = NULL;
375 unsigned long flags;
376
377 spin_lock_irqsave(&free_entries_lock, flags);
378
379 if (list_empty(&free_entries)) {
380 printk(KERN_ERR "DMA-API: debugging out of memory "
381 "- disabling\n");
382 global_disable = true;
383 goto out;
384 }
385
386 entry = __dma_entry_alloc();
387
388 #ifdef CONFIG_STACKTRACE
389 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
390 entry->stacktrace.entries = entry->st_entries;
391 entry->stacktrace.skip = 2;
392 save_stack_trace(&entry->stacktrace);
393 #endif
394
395 out:
396 spin_unlock_irqrestore(&free_entries_lock, flags);
397
398 return entry;
399 }
400
401 static void dma_entry_free(struct dma_debug_entry *entry)
402 {
403 unsigned long flags;
404
405 /*
406 * add to beginning of the list - this way the entries are
407 * more likely cache hot when they are reallocated.
408 */
409 spin_lock_irqsave(&free_entries_lock, flags);
410 list_add(&entry->list, &free_entries);
411 num_free_entries += 1;
412 spin_unlock_irqrestore(&free_entries_lock, flags);
413 }
414
415 int dma_debug_resize_entries(u32 num_entries)
416 {
417 int i, delta, ret = 0;
418 unsigned long flags;
419 struct dma_debug_entry *entry;
420 LIST_HEAD(tmp);
421
422 spin_lock_irqsave(&free_entries_lock, flags);
423
424 if (nr_total_entries < num_entries) {
425 delta = num_entries - nr_total_entries;
426
427 spin_unlock_irqrestore(&free_entries_lock, flags);
428
429 for (i = 0; i < delta; i++) {
430 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
431 if (!entry)
432 break;
433
434 list_add_tail(&entry->list, &tmp);
435 }
436
437 spin_lock_irqsave(&free_entries_lock, flags);
438
439 list_splice(&tmp, &free_entries);
440 nr_total_entries += i;
441 num_free_entries += i;
442 } else {
443 delta = nr_total_entries - num_entries;
444
445 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
446 entry = __dma_entry_alloc();
447 kfree(entry);
448 }
449
450 nr_total_entries -= i;
451 }
452
453 if (nr_total_entries != num_entries)
454 ret = 1;
455
456 spin_unlock_irqrestore(&free_entries_lock, flags);
457
458 return ret;
459 }
460 EXPORT_SYMBOL(dma_debug_resize_entries);
461
462 /*
463 * DMA-API debugging init code
464 *
465 * The init code does two things:
466 * 1. Initialize core data structures
467 * 2. Preallocate a given number of dma_debug_entry structs
468 */
469
470 static int prealloc_memory(u32 num_entries)
471 {
472 struct dma_debug_entry *entry, *next_entry;
473 int i;
474
475 for (i = 0; i < num_entries; ++i) {
476 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
477 if (!entry)
478 goto out_err;
479
480 list_add_tail(&entry->list, &free_entries);
481 }
482
483 num_free_entries = num_entries;
484 min_free_entries = num_entries;
485
486 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
487 num_entries);
488
489 return 0;
490
491 out_err:
492
493 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
494 list_del(&entry->list);
495 kfree(entry);
496 }
497
498 return -ENOMEM;
499 }
500
501 static ssize_t filter_read(struct file *file, char __user *user_buf,
502 size_t count, loff_t *ppos)
503 {
504 unsigned long flags;
505 char buf[NAME_MAX_LEN + 1];
506 int len;
507
508 if (!current_driver_name[0])
509 return 0;
510
511 /*
512 * We can't copy to userspace directly because current_driver_name can
513 * only be read under the driver_name_lock with irqs disabled. So
514 * create a temporary copy first.
515 */
516 read_lock_irqsave(&driver_name_lock, flags);
517 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
518 read_unlock_irqrestore(&driver_name_lock, flags);
519
520 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
521 }
522
523 static ssize_t filter_write(struct file *file, const char __user *userbuf,
524 size_t count, loff_t *ppos)
525 {
526 unsigned long flags;
527 char buf[NAME_MAX_LEN];
528 size_t len = NAME_MAX_LEN - 1;
529 int i;
530
531 /*
532 * We can't copy from userspace directly. Access to
533 * current_driver_name is protected with a write_lock with irqs
534 * disabled. Since copy_from_user can fault and may sleep we
535 * need to copy to temporary buffer first
536 */
537 len = min(count, len);
538 if (copy_from_user(buf, userbuf, len))
539 return -EFAULT;
540
541 buf[len] = 0;
542
543 write_lock_irqsave(&driver_name_lock, flags);
544
545 /*
546 * Now handle the string we got from userspace very carefully.
547 * The rules are:
548 * - only use the first token we got
549 * - token delimiter is everything looking like a space
550 * character (' ', '\n', '\t' ...)
551 *
552 */
553 if (!isalnum(buf[0])) {
554 /*
555 * If the first character userspace gave us is not
556 * alphanumerical then assume the filter should be
557 * switched off.
558 */
559 if (current_driver_name[0])
560 printk(KERN_INFO "DMA-API: switching off dma-debug "
561 "driver filter\n");
562 current_driver_name[0] = 0;
563 current_driver = NULL;
564 goto out_unlock;
565 }
566
567 /*
568 * Now parse out the first token and use it as the name for the
569 * driver to filter for.
570 */
571 for (i = 0; i < NAME_MAX_LEN; ++i) {
572 current_driver_name[i] = buf[i];
573 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
574 break;
575 }
576 current_driver_name[i] = 0;
577 current_driver = NULL;
578
579 printk(KERN_INFO "DMA-API: enable driver filter for driver [%s]\n",
580 current_driver_name);
581
582 out_unlock:
583 write_unlock_irqrestore(&driver_name_lock, flags);
584
585 return count;
586 }
587
588 const struct file_operations filter_fops = {
589 .read = filter_read,
590 .write = filter_write,
591 };
592
593 static int dma_debug_fs_init(void)
594 {
595 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
596 if (!dma_debug_dent) {
597 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
598 return -ENOMEM;
599 }
600
601 global_disable_dent = debugfs_create_bool("disabled", 0444,
602 dma_debug_dent,
603 (u32 *)&global_disable);
604 if (!global_disable_dent)
605 goto out_err;
606
607 error_count_dent = debugfs_create_u32("error_count", 0444,
608 dma_debug_dent, &error_count);
609 if (!error_count_dent)
610 goto out_err;
611
612 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
613 dma_debug_dent,
614 &show_all_errors);
615 if (!show_all_errors_dent)
616 goto out_err;
617
618 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
619 dma_debug_dent,
620 &show_num_errors);
621 if (!show_num_errors_dent)
622 goto out_err;
623
624 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
625 dma_debug_dent,
626 &num_free_entries);
627 if (!num_free_entries_dent)
628 goto out_err;
629
630 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
631 dma_debug_dent,
632 &min_free_entries);
633 if (!min_free_entries_dent)
634 goto out_err;
635
636 filter_dent = debugfs_create_file("driver_filter", 0644,
637 dma_debug_dent, NULL, &filter_fops);
638 if (!filter_dent)
639 goto out_err;
640
641 return 0;
642
643 out_err:
644 debugfs_remove_recursive(dma_debug_dent);
645
646 return -ENOMEM;
647 }
648
649 static int device_dma_allocations(struct device *dev)
650 {
651 struct dma_debug_entry *entry;
652 unsigned long flags;
653 int count = 0, i;
654
655 for (i = 0; i < HASH_SIZE; ++i) {
656 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
657 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
658 if (entry->dev == dev)
659 count += 1;
660 }
661 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
662 }
663
664 return count;
665 }
666
667 static int dma_debug_device_change(struct notifier_block *nb,
668 unsigned long action, void *data)
669 {
670 struct device *dev = data;
671 int count;
672
673
674 switch (action) {
675 case BUS_NOTIFY_UNBOUND_DRIVER:
676 count = device_dma_allocations(dev);
677 if (count == 0)
678 break;
679 err_printk(dev, NULL, "DMA-API: device driver has pending "
680 "DMA allocations while released from device "
681 "[count=%d]\n", count);
682 break;
683 default:
684 break;
685 }
686
687 return 0;
688 }
689
690 void dma_debug_add_bus(struct bus_type *bus)
691 {
692 struct notifier_block *nb;
693
694 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
695 if (nb == NULL) {
696 printk(KERN_ERR "dma_debug_add_bus: out of memory\n");
697 return;
698 }
699
700 nb->notifier_call = dma_debug_device_change;
701
702 bus_register_notifier(bus, nb);
703 }
704
705 /*
706 * Let the architectures decide how many entries should be preallocated.
707 */
708 void dma_debug_init(u32 num_entries)
709 {
710 int i;
711
712 if (global_disable)
713 return;
714
715 for (i = 0; i < HASH_SIZE; ++i) {
716 INIT_LIST_HEAD(&dma_entry_hash[i].list);
717 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
718 }
719
720 if (dma_debug_fs_init() != 0) {
721 printk(KERN_ERR "DMA-API: error creating debugfs entries "
722 "- disabling\n");
723 global_disable = true;
724
725 return;
726 }
727
728 if (req_entries)
729 num_entries = req_entries;
730
731 if (prealloc_memory(num_entries) != 0) {
732 printk(KERN_ERR "DMA-API: debugging out of memory error "
733 "- disabled\n");
734 global_disable = true;
735
736 return;
737 }
738
739 nr_total_entries = num_free_entries;
740
741 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
742 }
743
744 static __init int dma_debug_cmdline(char *str)
745 {
746 if (!str)
747 return -EINVAL;
748
749 if (strncmp(str, "off", 3) == 0) {
750 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
751 "command line\n");
752 global_disable = true;
753 }
754
755 return 0;
756 }
757
758 static __init int dma_debug_entries_cmdline(char *str)
759 {
760 int res;
761
762 if (!str)
763 return -EINVAL;
764
765 res = get_option(&str, &req_entries);
766
767 if (!res)
768 req_entries = 0;
769
770 return 0;
771 }
772
773 __setup("dma_debug=", dma_debug_cmdline);
774 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
775
776 static void check_unmap(struct dma_debug_entry *ref)
777 {
778 struct dma_debug_entry *entry;
779 struct hash_bucket *bucket;
780 unsigned long flags;
781
782 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
783 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
784 "to free an invalid DMA memory address\n");
785 return;
786 }
787
788 bucket = get_hash_bucket(ref, &flags);
789 entry = hash_bucket_find(bucket, ref);
790
791 if (!entry) {
792 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
793 "to free DMA memory it has not allocated "
794 "[device address=0x%016llx] [size=%llu bytes]\n",
795 ref->dev_addr, ref->size);
796 goto out;
797 }
798
799 if (ref->size != entry->size) {
800 err_printk(ref->dev, entry, "DMA-API: device driver frees "
801 "DMA memory with different size "
802 "[device address=0x%016llx] [map size=%llu bytes] "
803 "[unmap size=%llu bytes]\n",
804 ref->dev_addr, entry->size, ref->size);
805 }
806
807 if (ref->type != entry->type) {
808 err_printk(ref->dev, entry, "DMA-API: device driver frees "
809 "DMA memory with wrong function "
810 "[device address=0x%016llx] [size=%llu bytes] "
811 "[mapped as %s] [unmapped as %s]\n",
812 ref->dev_addr, ref->size,
813 type2name[entry->type], type2name[ref->type]);
814 } else if ((entry->type == dma_debug_coherent) &&
815 (ref->paddr != entry->paddr)) {
816 err_printk(ref->dev, entry, "DMA-API: device driver frees "
817 "DMA memory with different CPU address "
818 "[device address=0x%016llx] [size=%llu bytes] "
819 "[cpu alloc address=%p] [cpu free address=%p]",
820 ref->dev_addr, ref->size,
821 (void *)entry->paddr, (void *)ref->paddr);
822 }
823
824 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
825 ref->sg_call_ents != entry->sg_call_ents) {
826 err_printk(ref->dev, entry, "DMA-API: device driver frees "
827 "DMA sg list with different entry count "
828 "[map count=%d] [unmap count=%d]\n",
829 entry->sg_call_ents, ref->sg_call_ents);
830 }
831
832 /*
833 * This may be no bug in reality - but most implementations of the
834 * DMA API don't handle this properly, so check for it here
835 */
836 if (ref->direction != entry->direction) {
837 err_printk(ref->dev, entry, "DMA-API: device driver frees "
838 "DMA memory with different direction "
839 "[device address=0x%016llx] [size=%llu bytes] "
840 "[mapped with %s] [unmapped with %s]\n",
841 ref->dev_addr, ref->size,
842 dir2name[entry->direction],
843 dir2name[ref->direction]);
844 }
845
846 hash_bucket_del(entry);
847 dma_entry_free(entry);
848
849 out:
850 put_hash_bucket(bucket, &flags);
851 }
852
853 static void check_for_stack(struct device *dev, void *addr)
854 {
855 if (object_is_on_stack(addr))
856 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
857 "stack [addr=%p]\n", addr);
858 }
859
860 static inline bool overlap(void *addr, u64 size, void *start, void *end)
861 {
862 void *addr2 = (char *)addr + size;
863
864 return ((addr >= start && addr < end) ||
865 (addr2 >= start && addr2 < end) ||
866 ((addr < start) && (addr2 >= end)));
867 }
868
869 static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
870 {
871 if (overlap(addr, size, _text, _etext) ||
872 overlap(addr, size, __start_rodata, __end_rodata))
873 err_printk(dev, NULL, "DMA-API: device driver maps "
874 "memory from kernel text or rodata "
875 "[addr=%p] [size=%llu]\n", addr, size);
876 }
877
878 static void check_sync(struct device *dev, dma_addr_t addr,
879 u64 size, u64 offset, int direction, bool to_cpu)
880 {
881 struct dma_debug_entry ref = {
882 .dev = dev,
883 .dev_addr = addr,
884 .size = size,
885 .direction = direction,
886 };
887 struct dma_debug_entry *entry;
888 struct hash_bucket *bucket;
889 unsigned long flags;
890
891 bucket = get_hash_bucket(&ref, &flags);
892
893 entry = hash_bucket_find(bucket, &ref);
894
895 if (!entry) {
896 err_printk(dev, NULL, "DMA-API: device driver tries "
897 "to sync DMA memory it has not allocated "
898 "[device address=0x%016llx] [size=%llu bytes]\n",
899 (unsigned long long)addr, size);
900 goto out;
901 }
902
903 if ((offset + size) > entry->size) {
904 err_printk(dev, entry, "DMA-API: device driver syncs"
905 " DMA memory outside allocated range "
906 "[device address=0x%016llx] "
907 "[allocation size=%llu bytes] [sync offset=%llu] "
908 "[sync size=%llu]\n", entry->dev_addr, entry->size,
909 offset, size);
910 }
911
912 if (direction != entry->direction) {
913 err_printk(dev, entry, "DMA-API: device driver syncs "
914 "DMA memory with different direction "
915 "[device address=0x%016llx] [size=%llu bytes] "
916 "[mapped with %s] [synced with %s]\n",
917 (unsigned long long)addr, entry->size,
918 dir2name[entry->direction],
919 dir2name[direction]);
920 }
921
922 if (entry->direction == DMA_BIDIRECTIONAL)
923 goto out;
924
925 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
926 !(direction == DMA_TO_DEVICE))
927 err_printk(dev, entry, "DMA-API: device driver syncs "
928 "device read-only DMA memory for cpu "
929 "[device address=0x%016llx] [size=%llu bytes] "
930 "[mapped with %s] [synced with %s]\n",
931 (unsigned long long)addr, entry->size,
932 dir2name[entry->direction],
933 dir2name[direction]);
934
935 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
936 !(direction == DMA_FROM_DEVICE))
937 err_printk(dev, entry, "DMA-API: device driver syncs "
938 "device write-only DMA memory to device "
939 "[device address=0x%016llx] [size=%llu bytes] "
940 "[mapped with %s] [synced with %s]\n",
941 (unsigned long long)addr, entry->size,
942 dir2name[entry->direction],
943 dir2name[direction]);
944
945 out:
946 put_hash_bucket(bucket, &flags);
947
948 }
949
950 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
951 size_t size, int direction, dma_addr_t dma_addr,
952 bool map_single)
953 {
954 struct dma_debug_entry *entry;
955
956 if (unlikely(global_disable))
957 return;
958
959 if (unlikely(dma_mapping_error(dev, dma_addr)))
960 return;
961
962 entry = dma_entry_alloc();
963 if (!entry)
964 return;
965
966 entry->dev = dev;
967 entry->type = dma_debug_page;
968 entry->paddr = page_to_phys(page) + offset;
969 entry->dev_addr = dma_addr;
970 entry->size = size;
971 entry->direction = direction;
972
973 if (map_single)
974 entry->type = dma_debug_single;
975
976 if (!PageHighMem(page)) {
977 void *addr = ((char *)page_address(page)) + offset;
978 check_for_stack(dev, addr);
979 check_for_illegal_area(dev, addr, size);
980 }
981
982 add_dma_entry(entry);
983 }
984 EXPORT_SYMBOL(debug_dma_map_page);
985
986 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
987 size_t size, int direction, bool map_single)
988 {
989 struct dma_debug_entry ref = {
990 .type = dma_debug_page,
991 .dev = dev,
992 .dev_addr = addr,
993 .size = size,
994 .direction = direction,
995 };
996
997 if (unlikely(global_disable))
998 return;
999
1000 if (map_single)
1001 ref.type = dma_debug_single;
1002
1003 check_unmap(&ref);
1004 }
1005 EXPORT_SYMBOL(debug_dma_unmap_page);
1006
1007 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1008 int nents, int mapped_ents, int direction)
1009 {
1010 struct dma_debug_entry *entry;
1011 struct scatterlist *s;
1012 int i;
1013
1014 if (unlikely(global_disable))
1015 return;
1016
1017 for_each_sg(sg, s, mapped_ents, i) {
1018 entry = dma_entry_alloc();
1019 if (!entry)
1020 return;
1021
1022 entry->type = dma_debug_sg;
1023 entry->dev = dev;
1024 entry->paddr = sg_phys(s);
1025 entry->size = sg_dma_len(s);
1026 entry->dev_addr = sg_dma_address(s);
1027 entry->direction = direction;
1028 entry->sg_call_ents = nents;
1029 entry->sg_mapped_ents = mapped_ents;
1030
1031 if (!PageHighMem(sg_page(s))) {
1032 check_for_stack(dev, sg_virt(s));
1033 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1034 }
1035
1036 add_dma_entry(entry);
1037 }
1038 }
1039 EXPORT_SYMBOL(debug_dma_map_sg);
1040
1041 static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
1042 {
1043 struct dma_debug_entry *entry;
1044 struct hash_bucket *bucket;
1045 unsigned long flags;
1046 int mapped_ents = 0;
1047 struct dma_debug_entry ref;
1048
1049 ref.dev = dev;
1050 ref.dev_addr = sg_dma_address(s);
1051 ref.size = sg_dma_len(s),
1052
1053 bucket = get_hash_bucket(&ref, &flags);
1054 entry = hash_bucket_find(bucket, &ref);
1055 if (entry)
1056 mapped_ents = entry->sg_mapped_ents;
1057 put_hash_bucket(bucket, &flags);
1058
1059 return mapped_ents;
1060 }
1061
1062 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1063 int nelems, int dir)
1064 {
1065 struct scatterlist *s;
1066 int mapped_ents = 0, i;
1067
1068 if (unlikely(global_disable))
1069 return;
1070
1071 for_each_sg(sglist, s, nelems, i) {
1072
1073 struct dma_debug_entry ref = {
1074 .type = dma_debug_sg,
1075 .dev = dev,
1076 .paddr = sg_phys(s),
1077 .dev_addr = sg_dma_address(s),
1078 .size = sg_dma_len(s),
1079 .direction = dir,
1080 .sg_call_ents = 0,
1081 };
1082
1083 if (mapped_ents && i >= mapped_ents)
1084 break;
1085
1086 if (!i) {
1087 ref.sg_call_ents = nelems;
1088 mapped_ents = get_nr_mapped_entries(dev, s);
1089 }
1090
1091 check_unmap(&ref);
1092 }
1093 }
1094 EXPORT_SYMBOL(debug_dma_unmap_sg);
1095
1096 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1097 dma_addr_t dma_addr, void *virt)
1098 {
1099 struct dma_debug_entry *entry;
1100
1101 if (unlikely(global_disable))
1102 return;
1103
1104 if (unlikely(virt == NULL))
1105 return;
1106
1107 entry = dma_entry_alloc();
1108 if (!entry)
1109 return;
1110
1111 entry->type = dma_debug_coherent;
1112 entry->dev = dev;
1113 entry->paddr = virt_to_phys(virt);
1114 entry->size = size;
1115 entry->dev_addr = dma_addr;
1116 entry->direction = DMA_BIDIRECTIONAL;
1117
1118 add_dma_entry(entry);
1119 }
1120 EXPORT_SYMBOL(debug_dma_alloc_coherent);
1121
1122 void debug_dma_free_coherent(struct device *dev, size_t size,
1123 void *virt, dma_addr_t addr)
1124 {
1125 struct dma_debug_entry ref = {
1126 .type = dma_debug_coherent,
1127 .dev = dev,
1128 .paddr = virt_to_phys(virt),
1129 .dev_addr = addr,
1130 .size = size,
1131 .direction = DMA_BIDIRECTIONAL,
1132 };
1133
1134 if (unlikely(global_disable))
1135 return;
1136
1137 check_unmap(&ref);
1138 }
1139 EXPORT_SYMBOL(debug_dma_free_coherent);
1140
1141 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1142 size_t size, int direction)
1143 {
1144 if (unlikely(global_disable))
1145 return;
1146
1147 check_sync(dev, dma_handle, size, 0, direction, true);
1148 }
1149 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1150
1151 void debug_dma_sync_single_for_device(struct device *dev,
1152 dma_addr_t dma_handle, size_t size,
1153 int direction)
1154 {
1155 if (unlikely(global_disable))
1156 return;
1157
1158 check_sync(dev, dma_handle, size, 0, direction, false);
1159 }
1160 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1161
1162 void debug_dma_sync_single_range_for_cpu(struct device *dev,
1163 dma_addr_t dma_handle,
1164 unsigned long offset, size_t size,
1165 int direction)
1166 {
1167 if (unlikely(global_disable))
1168 return;
1169
1170 check_sync(dev, dma_handle, size, offset, direction, true);
1171 }
1172 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1173
1174 void debug_dma_sync_single_range_for_device(struct device *dev,
1175 dma_addr_t dma_handle,
1176 unsigned long offset,
1177 size_t size, int direction)
1178 {
1179 if (unlikely(global_disable))
1180 return;
1181
1182 check_sync(dev, dma_handle, size, offset, direction, false);
1183 }
1184 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1185
1186 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1187 int nelems, int direction)
1188 {
1189 struct scatterlist *s;
1190 int mapped_ents = 0, i;
1191
1192 if (unlikely(global_disable))
1193 return;
1194
1195 for_each_sg(sg, s, nelems, i) {
1196 if (!i)
1197 mapped_ents = get_nr_mapped_entries(dev, s);
1198
1199 if (i >= mapped_ents)
1200 break;
1201
1202 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
1203 direction, true);
1204 }
1205 }
1206 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1207
1208 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1209 int nelems, int direction)
1210 {
1211 struct scatterlist *s;
1212 int mapped_ents = 0, i;
1213
1214 if (unlikely(global_disable))
1215 return;
1216
1217 for_each_sg(sg, s, nelems, i) {
1218 if (!i)
1219 mapped_ents = get_nr_mapped_entries(dev, s);
1220
1221 if (i >= mapped_ents)
1222 break;
1223
1224 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
1225 direction, false);
1226 }
1227 }
1228 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1229
1230 static int __init dma_debug_driver_setup(char *str)
1231 {
1232 int i;
1233
1234 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1235 current_driver_name[i] = *str;
1236 if (*str == 0)
1237 break;
1238 }
1239
1240 if (current_driver_name[0])
1241 printk(KERN_INFO "DMA-API: enable driver filter for "
1242 "driver [%s]\n", current_driver_name);
1243
1244
1245 return 1;
1246 }
1247 __setup("dma_debug_driver=", dma_debug_driver_setup);
This page took 0.055536 seconds and 6 git commands to generate.