drm/radeon/dp_auxch: Ratelimit aux transfer debug messages
[sfrench/cifs-2.6.git] / lib / dma-debug.c
1 /*
2  * Copyright (C) 2008 Advanced Micro Devices, Inc.
3  *
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19
20 #include <linux/sched/task_stack.h>
21 #include <linux/scatterlist.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/sched/task.h>
24 #include <linux/stacktrace.h>
25 #include <linux/dma-debug.h>
26 #include <linux/spinlock.h>
27 #include <linux/vmalloc.h>
28 #include <linux/debugfs.h>
29 #include <linux/uaccess.h>
30 #include <linux/export.h>
31 #include <linux/device.h>
32 #include <linux/types.h>
33 #include <linux/sched.h>
34 #include <linux/ctype.h>
35 #include <linux/list.h>
36 #include <linux/slab.h>
37
38 #include <asm/sections.h>
39
40 #define HASH_SIZE       1024ULL
41 #define HASH_FN_SHIFT   13
42 #define HASH_FN_MASK    (HASH_SIZE - 1)
43
44 enum {
45         dma_debug_single,
46         dma_debug_page,
47         dma_debug_sg,
48         dma_debug_coherent,
49         dma_debug_resource,
50 };
51
52 enum map_err_types {
53         MAP_ERR_CHECK_NOT_APPLICABLE,
54         MAP_ERR_NOT_CHECKED,
55         MAP_ERR_CHECKED,
56 };
57
58 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
59
60 /**
61  * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
62  * @list: node on pre-allocated free_entries list
63  * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
64  * @type: single, page, sg, coherent
65  * @pfn: page frame of the start address
66  * @offset: offset of mapping relative to pfn
67  * @size: length of the mapping
68  * @direction: enum dma_data_direction
69  * @sg_call_ents: 'nents' from dma_map_sg
70  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
71  * @map_err_type: track whether dma_mapping_error() was checked
72  * @stacktrace: support backtraces when a violation is detected
73  */
74 struct dma_debug_entry {
75         struct list_head list;
76         struct device    *dev;
77         int              type;
78         unsigned long    pfn;
79         size_t           offset;
80         u64              dev_addr;
81         u64              size;
82         int              direction;
83         int              sg_call_ents;
84         int              sg_mapped_ents;
85         enum map_err_types  map_err_type;
86 #ifdef CONFIG_STACKTRACE
87         struct           stack_trace stacktrace;
88         unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
89 #endif
90 };
91
92 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
93
94 struct hash_bucket {
95         struct list_head list;
96         spinlock_t lock;
97 } ____cacheline_aligned_in_smp;
98
99 /* Hash list to save the allocated dma addresses */
100 static struct hash_bucket dma_entry_hash[HASH_SIZE];
101 /* List of pre-allocated dma_debug_entry's */
102 static LIST_HEAD(free_entries);
103 /* Lock for the list above */
104 static DEFINE_SPINLOCK(free_entries_lock);
105
106 /* Global disable flag - will be set in case of an error */
107 static bool global_disable __read_mostly;
108
109 /* Early initialization disable flag, set at the end of dma_debug_init */
110 static bool dma_debug_initialized __read_mostly;
111
112 static inline bool dma_debug_disabled(void)
113 {
114         return global_disable || !dma_debug_initialized;
115 }
116
117 /* Global error count */
118 static u32 error_count;
119
120 /* Global error show enable*/
121 static u32 show_all_errors __read_mostly;
122 /* Number of errors to show */
123 static u32 show_num_errors = 1;
124
125 static u32 num_free_entries;
126 static u32 min_free_entries;
127 static u32 nr_total_entries;
128
129 /* number of preallocated entries requested by kernel cmdline */
130 static u32 req_entries;
131
132 /* debugfs dentry's for the stuff above */
133 static struct dentry *dma_debug_dent        __read_mostly;
134 static struct dentry *global_disable_dent   __read_mostly;
135 static struct dentry *error_count_dent      __read_mostly;
136 static struct dentry *show_all_errors_dent  __read_mostly;
137 static struct dentry *show_num_errors_dent  __read_mostly;
138 static struct dentry *num_free_entries_dent __read_mostly;
139 static struct dentry *min_free_entries_dent __read_mostly;
140 static struct dentry *filter_dent           __read_mostly;
141
142 /* per-driver filter related state */
143
144 #define NAME_MAX_LEN    64
145
146 static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
147 static struct device_driver *current_driver                    __read_mostly;
148
149 static DEFINE_RWLOCK(driver_name_lock);
150
151 static const char *const maperr2str[] = {
152         [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
153         [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
154         [MAP_ERR_CHECKED] = "dma map error checked",
155 };
156
157 static const char *type2name[5] = { "single", "page",
158                                     "scather-gather", "coherent",
159                                     "resource" };
160
161 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
162                                    "DMA_FROM_DEVICE", "DMA_NONE" };
163
164 /*
165  * The access to some variables in this macro is racy. We can't use atomic_t
166  * here because all these variables are exported to debugfs. Some of them even
167  * writeable. This is also the reason why a lock won't help much. But anyway,
168  * the races are no big deal. Here is why:
169  *
170  *   error_count: the addition is racy, but the worst thing that can happen is
171  *                that we don't count some errors
172  *   show_num_errors: the subtraction is racy. Also no big deal because in
173  *                    worst case this will result in one warning more in the
174  *                    system log than the user configured. This variable is
175  *                    writeable via debugfs.
176  */
177 static inline void dump_entry_trace(struct dma_debug_entry *entry)
178 {
179 #ifdef CONFIG_STACKTRACE
180         if (entry) {
181                 pr_warning("Mapped at:\n");
182                 print_stack_trace(&entry->stacktrace, 0);
183         }
184 #endif
185 }
186
187 static bool driver_filter(struct device *dev)
188 {
189         struct device_driver *drv;
190         unsigned long flags;
191         bool ret;
192
193         /* driver filter off */
194         if (likely(!current_driver_name[0]))
195                 return true;
196
197         /* driver filter on and initialized */
198         if (current_driver && dev && dev->driver == current_driver)
199                 return true;
200
201         /* driver filter on, but we can't filter on a NULL device... */
202         if (!dev)
203                 return false;
204
205         if (current_driver || !current_driver_name[0])
206                 return false;
207
208         /* driver filter on but not yet initialized */
209         drv = dev->driver;
210         if (!drv)
211                 return false;
212
213         /* lock to protect against change of current_driver_name */
214         read_lock_irqsave(&driver_name_lock, flags);
215
216         ret = false;
217         if (drv->name &&
218             strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
219                 current_driver = drv;
220                 ret = true;
221         }
222
223         read_unlock_irqrestore(&driver_name_lock, flags);
224
225         return ret;
226 }
227
228 #define err_printk(dev, entry, format, arg...) do {                     \
229                 error_count += 1;                                       \
230                 if (driver_filter(dev) &&                               \
231                     (show_all_errors || show_num_errors > 0)) {         \
232                         WARN(1, "%s %s: " format,                       \
233                              dev ? dev_driver_string(dev) : "NULL",     \
234                              dev ? dev_name(dev) : "NULL", ## arg);     \
235                         dump_entry_trace(entry);                        \
236                 }                                                       \
237                 if (!show_all_errors && show_num_errors > 0)            \
238                         show_num_errors -= 1;                           \
239         } while (0);
240
241 /*
242  * Hash related functions
243  *
244  * Every DMA-API request is saved into a struct dma_debug_entry. To
245  * have quick access to these structs they are stored into a hash.
246  */
247 static int hash_fn(struct dma_debug_entry *entry)
248 {
249         /*
250          * Hash function is based on the dma address.
251          * We use bits 20-27 here as the index into the hash
252          */
253         return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
254 }
255
256 /*
257  * Request exclusive access to a hash bucket for a given dma_debug_entry.
258  */
259 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
260                                            unsigned long *flags)
261         __acquires(&dma_entry_hash[idx].lock)
262 {
263         int idx = hash_fn(entry);
264         unsigned long __flags;
265
266         spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
267         *flags = __flags;
268         return &dma_entry_hash[idx];
269 }
270
271 /*
272  * Give up exclusive access to the hash bucket
273  */
274 static void put_hash_bucket(struct hash_bucket *bucket,
275                             unsigned long *flags)
276         __releases(&bucket->lock)
277 {
278         unsigned long __flags = *flags;
279
280         spin_unlock_irqrestore(&bucket->lock, __flags);
281 }
282
283 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
284 {
285         return ((a->dev_addr == b->dev_addr) &&
286                 (a->dev == b->dev)) ? true : false;
287 }
288
289 static bool containing_match(struct dma_debug_entry *a,
290                              struct dma_debug_entry *b)
291 {
292         if (a->dev != b->dev)
293                 return false;
294
295         if ((b->dev_addr <= a->dev_addr) &&
296             ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
297                 return true;
298
299         return false;
300 }
301
302 /*
303  * Search a given entry in the hash bucket list
304  */
305 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
306                                                   struct dma_debug_entry *ref,
307                                                   match_fn match)
308 {
309         struct dma_debug_entry *entry, *ret = NULL;
310         int matches = 0, match_lvl, last_lvl = -1;
311
312         list_for_each_entry(entry, &bucket->list, list) {
313                 if (!match(ref, entry))
314                         continue;
315
316                 /*
317                  * Some drivers map the same physical address multiple
318                  * times. Without a hardware IOMMU this results in the
319                  * same device addresses being put into the dma-debug
320                  * hash multiple times too. This can result in false
321                  * positives being reported. Therefore we implement a
322                  * best-fit algorithm here which returns the entry from
323                  * the hash which fits best to the reference value
324                  * instead of the first-fit.
325                  */
326                 matches += 1;
327                 match_lvl = 0;
328                 entry->size         == ref->size         ? ++match_lvl : 0;
329                 entry->type         == ref->type         ? ++match_lvl : 0;
330                 entry->direction    == ref->direction    ? ++match_lvl : 0;
331                 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
332
333                 if (match_lvl == 4) {
334                         /* perfect-fit - return the result */
335                         return entry;
336                 } else if (match_lvl > last_lvl) {
337                         /*
338                          * We found an entry that fits better then the
339                          * previous one or it is the 1st match.
340                          */
341                         last_lvl = match_lvl;
342                         ret      = entry;
343                 }
344         }
345
346         /*
347          * If we have multiple matches but no perfect-fit, just return
348          * NULL.
349          */
350         ret = (matches == 1) ? ret : NULL;
351
352         return ret;
353 }
354
355 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
356                                                  struct dma_debug_entry *ref)
357 {
358         return __hash_bucket_find(bucket, ref, exact_match);
359 }
360
361 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
362                                                    struct dma_debug_entry *ref,
363                                                    unsigned long *flags)
364 {
365
366         unsigned int max_range = dma_get_max_seg_size(ref->dev);
367         struct dma_debug_entry *entry, index = *ref;
368         unsigned int range = 0;
369
370         while (range <= max_range) {
371                 entry = __hash_bucket_find(*bucket, ref, containing_match);
372
373                 if (entry)
374                         return entry;
375
376                 /*
377                  * Nothing found, go back a hash bucket
378                  */
379                 put_hash_bucket(*bucket, flags);
380                 range          += (1 << HASH_FN_SHIFT);
381                 index.dev_addr -= (1 << HASH_FN_SHIFT);
382                 *bucket = get_hash_bucket(&index, flags);
383         }
384
385         return NULL;
386 }
387
388 /*
389  * Add an entry to a hash bucket
390  */
391 static void hash_bucket_add(struct hash_bucket *bucket,
392                             struct dma_debug_entry *entry)
393 {
394         list_add_tail(&entry->list, &bucket->list);
395 }
396
397 /*
398  * Remove entry from a hash bucket list
399  */
400 static void hash_bucket_del(struct dma_debug_entry *entry)
401 {
402         list_del(&entry->list);
403 }
404
405 static unsigned long long phys_addr(struct dma_debug_entry *entry)
406 {
407         if (entry->type == dma_debug_resource)
408                 return __pfn_to_phys(entry->pfn) + entry->offset;
409
410         return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
411 }
412
413 /*
414  * Dump mapping entries for debugging purposes
415  */
416 void debug_dma_dump_mappings(struct device *dev)
417 {
418         int idx;
419
420         for (idx = 0; idx < HASH_SIZE; idx++) {
421                 struct hash_bucket *bucket = &dma_entry_hash[idx];
422                 struct dma_debug_entry *entry;
423                 unsigned long flags;
424
425                 spin_lock_irqsave(&bucket->lock, flags);
426
427                 list_for_each_entry(entry, &bucket->list, list) {
428                         if (!dev || dev == entry->dev) {
429                                 dev_info(entry->dev,
430                                          "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
431                                          type2name[entry->type], idx,
432                                          phys_addr(entry), entry->pfn,
433                                          entry->dev_addr, entry->size,
434                                          dir2name[entry->direction],
435                                          maperr2str[entry->map_err_type]);
436                         }
437                 }
438
439                 spin_unlock_irqrestore(&bucket->lock, flags);
440         }
441 }
442 EXPORT_SYMBOL(debug_dma_dump_mappings);
443
444 /*
445  * For each mapping (initial cacheline in the case of
446  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
447  * scatterlist, or the cacheline specified in dma_map_single) insert
448  * into this tree using the cacheline as the key. At
449  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
450  * the entry already exists at insertion time add a tag as a reference
451  * count for the overlapping mappings.  For now, the overlap tracking
452  * just ensures that 'unmaps' balance 'maps' before marking the
453  * cacheline idle, but we should also be flagging overlaps as an API
454  * violation.
455  *
456  * Memory usage is mostly constrained by the maximum number of available
457  * dma-debug entries in that we need a free dma_debug_entry before
458  * inserting into the tree.  In the case of dma_map_page and
459  * dma_alloc_coherent there is only one dma_debug_entry and one
460  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
461  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
462  * entries into the tree.
463  *
464  * At any time debug_dma_assert_idle() can be called to trigger a
465  * warning if any cachelines in the given page are in the active set.
466  */
467 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
468 static DEFINE_SPINLOCK(radix_lock);
469 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
470 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
471 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
472
473 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
474 {
475         return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
476                 (entry->offset >> L1_CACHE_SHIFT);
477 }
478
479 static int active_cacheline_read_overlap(phys_addr_t cln)
480 {
481         int overlap = 0, i;
482
483         for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
484                 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
485                         overlap |= 1 << i;
486         return overlap;
487 }
488
489 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
490 {
491         int i;
492
493         if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
494                 return overlap;
495
496         for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
497                 if (overlap & 1 << i)
498                         radix_tree_tag_set(&dma_active_cacheline, cln, i);
499                 else
500                         radix_tree_tag_clear(&dma_active_cacheline, cln, i);
501
502         return overlap;
503 }
504
505 static void active_cacheline_inc_overlap(phys_addr_t cln)
506 {
507         int overlap = active_cacheline_read_overlap(cln);
508
509         overlap = active_cacheline_set_overlap(cln, ++overlap);
510
511         /* If we overflowed the overlap counter then we're potentially
512          * leaking dma-mappings.  Otherwise, if maps and unmaps are
513          * balanced then this overflow may cause false negatives in
514          * debug_dma_assert_idle() as the cacheline may be marked idle
515          * prematurely.
516          */
517         WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
518                   "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
519                   ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
520 }
521
522 static int active_cacheline_dec_overlap(phys_addr_t cln)
523 {
524         int overlap = active_cacheline_read_overlap(cln);
525
526         return active_cacheline_set_overlap(cln, --overlap);
527 }
528
529 static int active_cacheline_insert(struct dma_debug_entry *entry)
530 {
531         phys_addr_t cln = to_cacheline_number(entry);
532         unsigned long flags;
533         int rc;
534
535         /* If the device is not writing memory then we don't have any
536          * concerns about the cpu consuming stale data.  This mitigates
537          * legitimate usages of overlapping mappings.
538          */
539         if (entry->direction == DMA_TO_DEVICE)
540                 return 0;
541
542         spin_lock_irqsave(&radix_lock, flags);
543         rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
544         if (rc == -EEXIST)
545                 active_cacheline_inc_overlap(cln);
546         spin_unlock_irqrestore(&radix_lock, flags);
547
548         return rc;
549 }
550
551 static void active_cacheline_remove(struct dma_debug_entry *entry)
552 {
553         phys_addr_t cln = to_cacheline_number(entry);
554         unsigned long flags;
555
556         /* ...mirror the insert case */
557         if (entry->direction == DMA_TO_DEVICE)
558                 return;
559
560         spin_lock_irqsave(&radix_lock, flags);
561         /* since we are counting overlaps the final put of the
562          * cacheline will occur when the overlap count is 0.
563          * active_cacheline_dec_overlap() returns -1 in that case
564          */
565         if (active_cacheline_dec_overlap(cln) < 0)
566                 radix_tree_delete(&dma_active_cacheline, cln);
567         spin_unlock_irqrestore(&radix_lock, flags);
568 }
569
570 /**
571  * debug_dma_assert_idle() - assert that a page is not undergoing dma
572  * @page: page to lookup in the dma_active_cacheline tree
573  *
574  * Place a call to this routine in cases where the cpu touching the page
575  * before the dma completes (page is dma_unmapped) will lead to data
576  * corruption.
577  */
578 void debug_dma_assert_idle(struct page *page)
579 {
580         static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
581         struct dma_debug_entry *entry = NULL;
582         void **results = (void **) &ents;
583         unsigned int nents, i;
584         unsigned long flags;
585         phys_addr_t cln;
586
587         if (dma_debug_disabled())
588                 return;
589
590         if (!page)
591                 return;
592
593         cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
594         spin_lock_irqsave(&radix_lock, flags);
595         nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
596                                        CACHELINES_PER_PAGE);
597         for (i = 0; i < nents; i++) {
598                 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
599
600                 if (ent_cln == cln) {
601                         entry = ents[i];
602                         break;
603                 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
604                         break;
605         }
606         spin_unlock_irqrestore(&radix_lock, flags);
607
608         if (!entry)
609                 return;
610
611         cln = to_cacheline_number(entry);
612         err_printk(entry->dev, entry,
613                    "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
614                    &cln);
615 }
616
617 /*
618  * Wrapper function for adding an entry to the hash.
619  * This function takes care of locking itself.
620  */
621 static void add_dma_entry(struct dma_debug_entry *entry)
622 {
623         struct hash_bucket *bucket;
624         unsigned long flags;
625         int rc;
626
627         bucket = get_hash_bucket(entry, &flags);
628         hash_bucket_add(bucket, entry);
629         put_hash_bucket(bucket, &flags);
630
631         rc = active_cacheline_insert(entry);
632         if (rc == -ENOMEM) {
633                 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
634                 global_disable = true;
635         }
636
637         /* TODO: report -EEXIST errors here as overlapping mappings are
638          * not supported by the DMA API
639          */
640 }
641
642 static struct dma_debug_entry *__dma_entry_alloc(void)
643 {
644         struct dma_debug_entry *entry;
645
646         entry = list_entry(free_entries.next, struct dma_debug_entry, list);
647         list_del(&entry->list);
648         memset(entry, 0, sizeof(*entry));
649
650         num_free_entries -= 1;
651         if (num_free_entries < min_free_entries)
652                 min_free_entries = num_free_entries;
653
654         return entry;
655 }
656
657 /* struct dma_entry allocator
658  *
659  * The next two functions implement the allocator for
660  * struct dma_debug_entries.
661  */
662 static struct dma_debug_entry *dma_entry_alloc(void)
663 {
664         struct dma_debug_entry *entry;
665         unsigned long flags;
666
667         spin_lock_irqsave(&free_entries_lock, flags);
668
669         if (list_empty(&free_entries)) {
670                 global_disable = true;
671                 spin_unlock_irqrestore(&free_entries_lock, flags);
672                 pr_err("DMA-API: debugging out of memory - disabling\n");
673                 return NULL;
674         }
675
676         entry = __dma_entry_alloc();
677
678         spin_unlock_irqrestore(&free_entries_lock, flags);
679
680 #ifdef CONFIG_STACKTRACE
681         entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
682         entry->stacktrace.entries = entry->st_entries;
683         entry->stacktrace.skip = 2;
684         save_stack_trace(&entry->stacktrace);
685 #endif
686
687         return entry;
688 }
689
690 static void dma_entry_free(struct dma_debug_entry *entry)
691 {
692         unsigned long flags;
693
694         active_cacheline_remove(entry);
695
696         /*
697          * add to beginning of the list - this way the entries are
698          * more likely cache hot when they are reallocated.
699          */
700         spin_lock_irqsave(&free_entries_lock, flags);
701         list_add(&entry->list, &free_entries);
702         num_free_entries += 1;
703         spin_unlock_irqrestore(&free_entries_lock, flags);
704 }
705
706 int dma_debug_resize_entries(u32 num_entries)
707 {
708         int i, delta, ret = 0;
709         unsigned long flags;
710         struct dma_debug_entry *entry;
711         LIST_HEAD(tmp);
712
713         spin_lock_irqsave(&free_entries_lock, flags);
714
715         if (nr_total_entries < num_entries) {
716                 delta = num_entries - nr_total_entries;
717
718                 spin_unlock_irqrestore(&free_entries_lock, flags);
719
720                 for (i = 0; i < delta; i++) {
721                         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
722                         if (!entry)
723                                 break;
724
725                         list_add_tail(&entry->list, &tmp);
726                 }
727
728                 spin_lock_irqsave(&free_entries_lock, flags);
729
730                 list_splice(&tmp, &free_entries);
731                 nr_total_entries += i;
732                 num_free_entries += i;
733         } else {
734                 delta = nr_total_entries - num_entries;
735
736                 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
737                         entry = __dma_entry_alloc();
738                         kfree(entry);
739                 }
740
741                 nr_total_entries -= i;
742         }
743
744         if (nr_total_entries != num_entries)
745                 ret = 1;
746
747         spin_unlock_irqrestore(&free_entries_lock, flags);
748
749         return ret;
750 }
751 EXPORT_SYMBOL(dma_debug_resize_entries);
752
753 /*
754  * DMA-API debugging init code
755  *
756  * The init code does two things:
757  *   1. Initialize core data structures
758  *   2. Preallocate a given number of dma_debug_entry structs
759  */
760
761 static int prealloc_memory(u32 num_entries)
762 {
763         struct dma_debug_entry *entry, *next_entry;
764         int i;
765
766         for (i = 0; i < num_entries; ++i) {
767                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
768                 if (!entry)
769                         goto out_err;
770
771                 list_add_tail(&entry->list, &free_entries);
772         }
773
774         num_free_entries = num_entries;
775         min_free_entries = num_entries;
776
777         pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
778
779         return 0;
780
781 out_err:
782
783         list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
784                 list_del(&entry->list);
785                 kfree(entry);
786         }
787
788         return -ENOMEM;
789 }
790
791 static ssize_t filter_read(struct file *file, char __user *user_buf,
792                            size_t count, loff_t *ppos)
793 {
794         char buf[NAME_MAX_LEN + 1];
795         unsigned long flags;
796         int len;
797
798         if (!current_driver_name[0])
799                 return 0;
800
801         /*
802          * We can't copy to userspace directly because current_driver_name can
803          * only be read under the driver_name_lock with irqs disabled. So
804          * create a temporary copy first.
805          */
806         read_lock_irqsave(&driver_name_lock, flags);
807         len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
808         read_unlock_irqrestore(&driver_name_lock, flags);
809
810         return simple_read_from_buffer(user_buf, count, ppos, buf, len);
811 }
812
813 static ssize_t filter_write(struct file *file, const char __user *userbuf,
814                             size_t count, loff_t *ppos)
815 {
816         char buf[NAME_MAX_LEN];
817         unsigned long flags;
818         size_t len;
819         int i;
820
821         /*
822          * We can't copy from userspace directly. Access to
823          * current_driver_name is protected with a write_lock with irqs
824          * disabled. Since copy_from_user can fault and may sleep we
825          * need to copy to temporary buffer first
826          */
827         len = min(count, (size_t)(NAME_MAX_LEN - 1));
828         if (copy_from_user(buf, userbuf, len))
829                 return -EFAULT;
830
831         buf[len] = 0;
832
833         write_lock_irqsave(&driver_name_lock, flags);
834
835         /*
836          * Now handle the string we got from userspace very carefully.
837          * The rules are:
838          *         - only use the first token we got
839          *         - token delimiter is everything looking like a space
840          *           character (' ', '\n', '\t' ...)
841          *
842          */
843         if (!isalnum(buf[0])) {
844                 /*
845                  * If the first character userspace gave us is not
846                  * alphanumerical then assume the filter should be
847                  * switched off.
848                  */
849                 if (current_driver_name[0])
850                         pr_info("DMA-API: switching off dma-debug driver filter\n");
851                 current_driver_name[0] = 0;
852                 current_driver = NULL;
853                 goto out_unlock;
854         }
855
856         /*
857          * Now parse out the first token and use it as the name for the
858          * driver to filter for.
859          */
860         for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
861                 current_driver_name[i] = buf[i];
862                 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
863                         break;
864         }
865         current_driver_name[i] = 0;
866         current_driver = NULL;
867
868         pr_info("DMA-API: enable driver filter for driver [%s]\n",
869                 current_driver_name);
870
871 out_unlock:
872         write_unlock_irqrestore(&driver_name_lock, flags);
873
874         return count;
875 }
876
877 static const struct file_operations filter_fops = {
878         .read  = filter_read,
879         .write = filter_write,
880         .llseek = default_llseek,
881 };
882
883 static int dma_debug_fs_init(void)
884 {
885         dma_debug_dent = debugfs_create_dir("dma-api", NULL);
886         if (!dma_debug_dent) {
887                 pr_err("DMA-API: can not create debugfs directory\n");
888                 return -ENOMEM;
889         }
890
891         global_disable_dent = debugfs_create_bool("disabled", 0444,
892                         dma_debug_dent,
893                         &global_disable);
894         if (!global_disable_dent)
895                 goto out_err;
896
897         error_count_dent = debugfs_create_u32("error_count", 0444,
898                         dma_debug_dent, &error_count);
899         if (!error_count_dent)
900                 goto out_err;
901
902         show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
903                         dma_debug_dent,
904                         &show_all_errors);
905         if (!show_all_errors_dent)
906                 goto out_err;
907
908         show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
909                         dma_debug_dent,
910                         &show_num_errors);
911         if (!show_num_errors_dent)
912                 goto out_err;
913
914         num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
915                         dma_debug_dent,
916                         &num_free_entries);
917         if (!num_free_entries_dent)
918                 goto out_err;
919
920         min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
921                         dma_debug_dent,
922                         &min_free_entries);
923         if (!min_free_entries_dent)
924                 goto out_err;
925
926         filter_dent = debugfs_create_file("driver_filter", 0644,
927                                           dma_debug_dent, NULL, &filter_fops);
928         if (!filter_dent)
929                 goto out_err;
930
931         return 0;
932
933 out_err:
934         debugfs_remove_recursive(dma_debug_dent);
935
936         return -ENOMEM;
937 }
938
939 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
940 {
941         struct dma_debug_entry *entry;
942         unsigned long flags;
943         int count = 0, i;
944
945         local_irq_save(flags);
946
947         for (i = 0; i < HASH_SIZE; ++i) {
948                 spin_lock(&dma_entry_hash[i].lock);
949                 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
950                         if (entry->dev == dev) {
951                                 count += 1;
952                                 *out_entry = entry;
953                         }
954                 }
955                 spin_unlock(&dma_entry_hash[i].lock);
956         }
957
958         local_irq_restore(flags);
959
960         return count;
961 }
962
963 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
964 {
965         struct device *dev = data;
966         struct dma_debug_entry *uninitialized_var(entry);
967         int count;
968
969         if (dma_debug_disabled())
970                 return 0;
971
972         switch (action) {
973         case BUS_NOTIFY_UNBOUND_DRIVER:
974                 count = device_dma_allocations(dev, &entry);
975                 if (count == 0)
976                         break;
977                 err_printk(dev, entry, "DMA-API: device driver has pending "
978                                 "DMA allocations while released from device "
979                                 "[count=%d]\n"
980                                 "One of leaked entries details: "
981                                 "[device address=0x%016llx] [size=%llu bytes] "
982                                 "[mapped with %s] [mapped as %s]\n",
983                         count, entry->dev_addr, entry->size,
984                         dir2name[entry->direction], type2name[entry->type]);
985                 break;
986         default:
987                 break;
988         }
989
990         return 0;
991 }
992
993 void dma_debug_add_bus(struct bus_type *bus)
994 {
995         struct notifier_block *nb;
996
997         if (dma_debug_disabled())
998                 return;
999
1000         nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1001         if (nb == NULL) {
1002                 pr_err("dma_debug_add_bus: out of memory\n");
1003                 return;
1004         }
1005
1006         nb->notifier_call = dma_debug_device_change;
1007
1008         bus_register_notifier(bus, nb);
1009 }
1010
1011 /*
1012  * Let the architectures decide how many entries should be preallocated.
1013  */
1014 void dma_debug_init(u32 num_entries)
1015 {
1016         int i;
1017
1018         /* Do not use dma_debug_initialized here, since we really want to be
1019          * called to set dma_debug_initialized
1020          */
1021         if (global_disable)
1022                 return;
1023
1024         for (i = 0; i < HASH_SIZE; ++i) {
1025                 INIT_LIST_HEAD(&dma_entry_hash[i].list);
1026                 spin_lock_init(&dma_entry_hash[i].lock);
1027         }
1028
1029         if (dma_debug_fs_init() != 0) {
1030                 pr_err("DMA-API: error creating debugfs entries - disabling\n");
1031                 global_disable = true;
1032
1033                 return;
1034         }
1035
1036         if (req_entries)
1037                 num_entries = req_entries;
1038
1039         if (prealloc_memory(num_entries) != 0) {
1040                 pr_err("DMA-API: debugging out of memory error - disabled\n");
1041                 global_disable = true;
1042
1043                 return;
1044         }
1045
1046         nr_total_entries = num_free_entries;
1047
1048         dma_debug_initialized = true;
1049
1050         pr_info("DMA-API: debugging enabled by kernel config\n");
1051 }
1052
1053 static __init int dma_debug_cmdline(char *str)
1054 {
1055         if (!str)
1056                 return -EINVAL;
1057
1058         if (strncmp(str, "off", 3) == 0) {
1059                 pr_info("DMA-API: debugging disabled on kernel command line\n");
1060                 global_disable = true;
1061         }
1062
1063         return 0;
1064 }
1065
1066 static __init int dma_debug_entries_cmdline(char *str)
1067 {
1068         int res;
1069
1070         if (!str)
1071                 return -EINVAL;
1072
1073         res = get_option(&str, &req_entries);
1074
1075         if (!res)
1076                 req_entries = 0;
1077
1078         return 0;
1079 }
1080
1081 __setup("dma_debug=", dma_debug_cmdline);
1082 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
1083
1084 static void check_unmap(struct dma_debug_entry *ref)
1085 {
1086         struct dma_debug_entry *entry;
1087         struct hash_bucket *bucket;
1088         unsigned long flags;
1089
1090         bucket = get_hash_bucket(ref, &flags);
1091         entry = bucket_find_exact(bucket, ref);
1092
1093         if (!entry) {
1094                 /* must drop lock before calling dma_mapping_error */
1095                 put_hash_bucket(bucket, &flags);
1096
1097                 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1098                         err_printk(ref->dev, NULL,
1099                                    "DMA-API: device driver tries to free an "
1100                                    "invalid DMA memory address\n");
1101                 } else {
1102                         err_printk(ref->dev, NULL,
1103                                    "DMA-API: device driver tries to free DMA "
1104                                    "memory it has not allocated [device "
1105                                    "address=0x%016llx] [size=%llu bytes]\n",
1106                                    ref->dev_addr, ref->size);
1107                 }
1108                 return;
1109         }
1110
1111         if (ref->size != entry->size) {
1112                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1113                            "DMA memory with different size "
1114                            "[device address=0x%016llx] [map size=%llu bytes] "
1115                            "[unmap size=%llu bytes]\n",
1116                            ref->dev_addr, entry->size, ref->size);
1117         }
1118
1119         if (ref->type != entry->type) {
1120                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1121                            "DMA memory with wrong function "
1122                            "[device address=0x%016llx] [size=%llu bytes] "
1123                            "[mapped as %s] [unmapped as %s]\n",
1124                            ref->dev_addr, ref->size,
1125                            type2name[entry->type], type2name[ref->type]);
1126         } else if ((entry->type == dma_debug_coherent) &&
1127                    (phys_addr(ref) != phys_addr(entry))) {
1128                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1129                            "DMA memory with different CPU address "
1130                            "[device address=0x%016llx] [size=%llu bytes] "
1131                            "[cpu alloc address=0x%016llx] "
1132                            "[cpu free address=0x%016llx]",
1133                            ref->dev_addr, ref->size,
1134                            phys_addr(entry),
1135                            phys_addr(ref));
1136         }
1137
1138         if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1139             ref->sg_call_ents != entry->sg_call_ents) {
1140                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1141                            "DMA sg list with different entry count "
1142                            "[map count=%d] [unmap count=%d]\n",
1143                            entry->sg_call_ents, ref->sg_call_ents);
1144         }
1145
1146         /*
1147          * This may be no bug in reality - but most implementations of the
1148          * DMA API don't handle this properly, so check for it here
1149          */
1150         if (ref->direction != entry->direction) {
1151                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1152                            "DMA memory with different direction "
1153                            "[device address=0x%016llx] [size=%llu bytes] "
1154                            "[mapped with %s] [unmapped with %s]\n",
1155                            ref->dev_addr, ref->size,
1156                            dir2name[entry->direction],
1157                            dir2name[ref->direction]);
1158         }
1159
1160         /*
1161          * Drivers should use dma_mapping_error() to check the returned
1162          * addresses of dma_map_single() and dma_map_page().
1163          * If not, print this warning message. See Documentation/DMA-API.txt.
1164          */
1165         if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1166                 err_printk(ref->dev, entry,
1167                            "DMA-API: device driver failed to check map error"
1168                            "[device address=0x%016llx] [size=%llu bytes] "
1169                            "[mapped as %s]",
1170                            ref->dev_addr, ref->size,
1171                            type2name[entry->type]);
1172         }
1173
1174         hash_bucket_del(entry);
1175         dma_entry_free(entry);
1176
1177         put_hash_bucket(bucket, &flags);
1178 }
1179
1180 static void check_for_stack(struct device *dev,
1181                             struct page *page, size_t offset)
1182 {
1183         void *addr;
1184         struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1185
1186         if (!stack_vm_area) {
1187                 /* Stack is direct-mapped. */
1188                 if (PageHighMem(page))
1189                         return;
1190                 addr = page_address(page) + offset;
1191                 if (object_is_on_stack(addr))
1192                         err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
1193         } else {
1194                 /* Stack is vmalloced. */
1195                 int i;
1196
1197                 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1198                         if (page != stack_vm_area->pages[i])
1199                                 continue;
1200
1201                         addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1202                         err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
1203                         break;
1204                 }
1205         }
1206 }
1207
1208 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1209 {
1210         unsigned long a1 = (unsigned long)addr;
1211         unsigned long b1 = a1 + len;
1212         unsigned long a2 = (unsigned long)start;
1213         unsigned long b2 = (unsigned long)end;
1214
1215         return !(b1 <= a2 || a1 >= b2);
1216 }
1217
1218 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1219 {
1220         if (overlap(addr, len, _stext, _etext) ||
1221             overlap(addr, len, __start_rodata, __end_rodata))
1222                 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1223 }
1224
1225 static void check_sync(struct device *dev,
1226                        struct dma_debug_entry *ref,
1227                        bool to_cpu)
1228 {
1229         struct dma_debug_entry *entry;
1230         struct hash_bucket *bucket;
1231         unsigned long flags;
1232
1233         bucket = get_hash_bucket(ref, &flags);
1234
1235         entry = bucket_find_contain(&bucket, ref, &flags);
1236
1237         if (!entry) {
1238                 err_printk(dev, NULL, "DMA-API: device driver tries "
1239                                 "to sync DMA memory it has not allocated "
1240                                 "[device address=0x%016llx] [size=%llu bytes]\n",
1241                                 (unsigned long long)ref->dev_addr, ref->size);
1242                 goto out;
1243         }
1244
1245         if (ref->size > entry->size) {
1246                 err_printk(dev, entry, "DMA-API: device driver syncs"
1247                                 " DMA memory outside allocated range "
1248                                 "[device address=0x%016llx] "
1249                                 "[allocation size=%llu bytes] "
1250                                 "[sync offset+size=%llu]\n",
1251                                 entry->dev_addr, entry->size,
1252                                 ref->size);
1253         }
1254
1255         if (entry->direction == DMA_BIDIRECTIONAL)
1256                 goto out;
1257
1258         if (ref->direction != entry->direction) {
1259                 err_printk(dev, entry, "DMA-API: device driver syncs "
1260                                 "DMA memory with different direction "
1261                                 "[device address=0x%016llx] [size=%llu bytes] "
1262                                 "[mapped with %s] [synced with %s]\n",
1263                                 (unsigned long long)ref->dev_addr, entry->size,
1264                                 dir2name[entry->direction],
1265                                 dir2name[ref->direction]);
1266         }
1267
1268         if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1269                       !(ref->direction == DMA_TO_DEVICE))
1270                 err_printk(dev, entry, "DMA-API: device driver syncs "
1271                                 "device read-only DMA memory for cpu "
1272                                 "[device address=0x%016llx] [size=%llu bytes] "
1273                                 "[mapped with %s] [synced with %s]\n",
1274                                 (unsigned long long)ref->dev_addr, entry->size,
1275                                 dir2name[entry->direction],
1276                                 dir2name[ref->direction]);
1277
1278         if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1279                        !(ref->direction == DMA_FROM_DEVICE))
1280                 err_printk(dev, entry, "DMA-API: device driver syncs "
1281                                 "device write-only DMA memory to device "
1282                                 "[device address=0x%016llx] [size=%llu bytes] "
1283                                 "[mapped with %s] [synced with %s]\n",
1284                                 (unsigned long long)ref->dev_addr, entry->size,
1285                                 dir2name[entry->direction],
1286                                 dir2name[ref->direction]);
1287
1288         if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1289             ref->sg_call_ents != entry->sg_call_ents) {
1290                 err_printk(ref->dev, entry, "DMA-API: device driver syncs "
1291                            "DMA sg list with different entry count "
1292                            "[map count=%d] [sync count=%d]\n",
1293                            entry->sg_call_ents, ref->sg_call_ents);
1294         }
1295
1296 out:
1297         put_hash_bucket(bucket, &flags);
1298 }
1299
1300 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1301                         size_t size, int direction, dma_addr_t dma_addr,
1302                         bool map_single)
1303 {
1304         struct dma_debug_entry *entry;
1305
1306         if (unlikely(dma_debug_disabled()))
1307                 return;
1308
1309         if (dma_mapping_error(dev, dma_addr))
1310                 return;
1311
1312         entry = dma_entry_alloc();
1313         if (!entry)
1314                 return;
1315
1316         entry->dev       = dev;
1317         entry->type      = dma_debug_page;
1318         entry->pfn       = page_to_pfn(page);
1319         entry->offset    = offset,
1320         entry->dev_addr  = dma_addr;
1321         entry->size      = size;
1322         entry->direction = direction;
1323         entry->map_err_type = MAP_ERR_NOT_CHECKED;
1324
1325         if (map_single)
1326                 entry->type = dma_debug_single;
1327
1328         check_for_stack(dev, page, offset);
1329
1330         if (!PageHighMem(page)) {
1331                 void *addr = page_address(page) + offset;
1332
1333                 check_for_illegal_area(dev, addr, size);
1334         }
1335
1336         add_dma_entry(entry);
1337 }
1338 EXPORT_SYMBOL(debug_dma_map_page);
1339
1340 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1341 {
1342         struct dma_debug_entry ref;
1343         struct dma_debug_entry *entry;
1344         struct hash_bucket *bucket;
1345         unsigned long flags;
1346
1347         if (unlikely(dma_debug_disabled()))
1348                 return;
1349
1350         ref.dev = dev;
1351         ref.dev_addr = dma_addr;
1352         bucket = get_hash_bucket(&ref, &flags);
1353
1354         list_for_each_entry(entry, &bucket->list, list) {
1355                 if (!exact_match(&ref, entry))
1356                         continue;
1357
1358                 /*
1359                  * The same physical address can be mapped multiple
1360                  * times. Without a hardware IOMMU this results in the
1361                  * same device addresses being put into the dma-debug
1362                  * hash multiple times too. This can result in false
1363                  * positives being reported. Therefore we implement a
1364                  * best-fit algorithm here which updates the first entry
1365                  * from the hash which fits the reference value and is
1366                  * not currently listed as being checked.
1367                  */
1368                 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1369                         entry->map_err_type = MAP_ERR_CHECKED;
1370                         break;
1371                 }
1372         }
1373
1374         put_hash_bucket(bucket, &flags);
1375 }
1376 EXPORT_SYMBOL(debug_dma_mapping_error);
1377
1378 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1379                           size_t size, int direction, bool map_single)
1380 {
1381         struct dma_debug_entry ref = {
1382                 .type           = dma_debug_page,
1383                 .dev            = dev,
1384                 .dev_addr       = addr,
1385                 .size           = size,
1386                 .direction      = direction,
1387         };
1388
1389         if (unlikely(dma_debug_disabled()))
1390                 return;
1391
1392         if (map_single)
1393                 ref.type = dma_debug_single;
1394
1395         check_unmap(&ref);
1396 }
1397 EXPORT_SYMBOL(debug_dma_unmap_page);
1398
1399 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1400                       int nents, int mapped_ents, int direction)
1401 {
1402         struct dma_debug_entry *entry;
1403         struct scatterlist *s;
1404         int i;
1405
1406         if (unlikely(dma_debug_disabled()))
1407                 return;
1408
1409         for_each_sg(sg, s, mapped_ents, i) {
1410                 entry = dma_entry_alloc();
1411                 if (!entry)
1412                         return;
1413
1414                 entry->type           = dma_debug_sg;
1415                 entry->dev            = dev;
1416                 entry->pfn            = page_to_pfn(sg_page(s));
1417                 entry->offset         = s->offset,
1418                 entry->size           = sg_dma_len(s);
1419                 entry->dev_addr       = sg_dma_address(s);
1420                 entry->direction      = direction;
1421                 entry->sg_call_ents   = nents;
1422                 entry->sg_mapped_ents = mapped_ents;
1423
1424                 check_for_stack(dev, sg_page(s), s->offset);
1425
1426                 if (!PageHighMem(sg_page(s))) {
1427                         check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1428                 }
1429
1430                 add_dma_entry(entry);
1431         }
1432 }
1433 EXPORT_SYMBOL(debug_dma_map_sg);
1434
1435 static int get_nr_mapped_entries(struct device *dev,
1436                                  struct dma_debug_entry *ref)
1437 {
1438         struct dma_debug_entry *entry;
1439         struct hash_bucket *bucket;
1440         unsigned long flags;
1441         int mapped_ents;
1442
1443         bucket       = get_hash_bucket(ref, &flags);
1444         entry        = bucket_find_exact(bucket, ref);
1445         mapped_ents  = 0;
1446
1447         if (entry)
1448                 mapped_ents = entry->sg_mapped_ents;
1449         put_hash_bucket(bucket, &flags);
1450
1451         return mapped_ents;
1452 }
1453
1454 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1455                         int nelems, int dir)
1456 {
1457         struct scatterlist *s;
1458         int mapped_ents = 0, i;
1459
1460         if (unlikely(dma_debug_disabled()))
1461                 return;
1462
1463         for_each_sg(sglist, s, nelems, i) {
1464
1465                 struct dma_debug_entry ref = {
1466                         .type           = dma_debug_sg,
1467                         .dev            = dev,
1468                         .pfn            = page_to_pfn(sg_page(s)),
1469                         .offset         = s->offset,
1470                         .dev_addr       = sg_dma_address(s),
1471                         .size           = sg_dma_len(s),
1472                         .direction      = dir,
1473                         .sg_call_ents   = nelems,
1474                 };
1475
1476                 if (mapped_ents && i >= mapped_ents)
1477                         break;
1478
1479                 if (!i)
1480                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1481
1482                 check_unmap(&ref);
1483         }
1484 }
1485 EXPORT_SYMBOL(debug_dma_unmap_sg);
1486
1487 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1488                               dma_addr_t dma_addr, void *virt)
1489 {
1490         struct dma_debug_entry *entry;
1491
1492         if (unlikely(dma_debug_disabled()))
1493                 return;
1494
1495         if (unlikely(virt == NULL))
1496                 return;
1497
1498         entry = dma_entry_alloc();
1499         if (!entry)
1500                 return;
1501
1502         entry->type      = dma_debug_coherent;
1503         entry->dev       = dev;
1504         entry->pfn       = page_to_pfn(virt_to_page(virt));
1505         entry->offset    = (size_t) virt & ~PAGE_MASK;
1506         entry->size      = size;
1507         entry->dev_addr  = dma_addr;
1508         entry->direction = DMA_BIDIRECTIONAL;
1509
1510         add_dma_entry(entry);
1511 }
1512 EXPORT_SYMBOL(debug_dma_alloc_coherent);
1513
1514 void debug_dma_free_coherent(struct device *dev, size_t size,
1515                          void *virt, dma_addr_t addr)
1516 {
1517         struct dma_debug_entry ref = {
1518                 .type           = dma_debug_coherent,
1519                 .dev            = dev,
1520                 .pfn            = page_to_pfn(virt_to_page(virt)),
1521                 .offset         = (size_t) virt & ~PAGE_MASK,
1522                 .dev_addr       = addr,
1523                 .size           = size,
1524                 .direction      = DMA_BIDIRECTIONAL,
1525         };
1526
1527         if (unlikely(dma_debug_disabled()))
1528                 return;
1529
1530         check_unmap(&ref);
1531 }
1532 EXPORT_SYMBOL(debug_dma_free_coherent);
1533
1534 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1535                             int direction, dma_addr_t dma_addr)
1536 {
1537         struct dma_debug_entry *entry;
1538
1539         if (unlikely(dma_debug_disabled()))
1540                 return;
1541
1542         entry = dma_entry_alloc();
1543         if (!entry)
1544                 return;
1545
1546         entry->type             = dma_debug_resource;
1547         entry->dev              = dev;
1548         entry->pfn              = PHYS_PFN(addr);
1549         entry->offset           = offset_in_page(addr);
1550         entry->size             = size;
1551         entry->dev_addr         = dma_addr;
1552         entry->direction        = direction;
1553         entry->map_err_type     = MAP_ERR_NOT_CHECKED;
1554
1555         add_dma_entry(entry);
1556 }
1557 EXPORT_SYMBOL(debug_dma_map_resource);
1558
1559 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1560                               size_t size, int direction)
1561 {
1562         struct dma_debug_entry ref = {
1563                 .type           = dma_debug_resource,
1564                 .dev            = dev,
1565                 .dev_addr       = dma_addr,
1566                 .size           = size,
1567                 .direction      = direction,
1568         };
1569
1570         if (unlikely(dma_debug_disabled()))
1571                 return;
1572
1573         check_unmap(&ref);
1574 }
1575 EXPORT_SYMBOL(debug_dma_unmap_resource);
1576
1577 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1578                                    size_t size, int direction)
1579 {
1580         struct dma_debug_entry ref;
1581
1582         if (unlikely(dma_debug_disabled()))
1583                 return;
1584
1585         ref.type         = dma_debug_single;
1586         ref.dev          = dev;
1587         ref.dev_addr     = dma_handle;
1588         ref.size         = size;
1589         ref.direction    = direction;
1590         ref.sg_call_ents = 0;
1591
1592         check_sync(dev, &ref, true);
1593 }
1594 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1595
1596 void debug_dma_sync_single_for_device(struct device *dev,
1597                                       dma_addr_t dma_handle, size_t size,
1598                                       int direction)
1599 {
1600         struct dma_debug_entry ref;
1601
1602         if (unlikely(dma_debug_disabled()))
1603                 return;
1604
1605         ref.type         = dma_debug_single;
1606         ref.dev          = dev;
1607         ref.dev_addr     = dma_handle;
1608         ref.size         = size;
1609         ref.direction    = direction;
1610         ref.sg_call_ents = 0;
1611
1612         check_sync(dev, &ref, false);
1613 }
1614 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1615
1616 void debug_dma_sync_single_range_for_cpu(struct device *dev,
1617                                          dma_addr_t dma_handle,
1618                                          unsigned long offset, size_t size,
1619                                          int direction)
1620 {
1621         struct dma_debug_entry ref;
1622
1623         if (unlikely(dma_debug_disabled()))
1624                 return;
1625
1626         ref.type         = dma_debug_single;
1627         ref.dev          = dev;
1628         ref.dev_addr     = dma_handle;
1629         ref.size         = offset + size;
1630         ref.direction    = direction;
1631         ref.sg_call_ents = 0;
1632
1633         check_sync(dev, &ref, true);
1634 }
1635 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1636
1637 void debug_dma_sync_single_range_for_device(struct device *dev,
1638                                             dma_addr_t dma_handle,
1639                                             unsigned long offset,
1640                                             size_t size, int direction)
1641 {
1642         struct dma_debug_entry ref;
1643
1644         if (unlikely(dma_debug_disabled()))
1645                 return;
1646
1647         ref.type         = dma_debug_single;
1648         ref.dev          = dev;
1649         ref.dev_addr     = dma_handle;
1650         ref.size         = offset + size;
1651         ref.direction    = direction;
1652         ref.sg_call_ents = 0;
1653
1654         check_sync(dev, &ref, false);
1655 }
1656 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1657
1658 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1659                                int nelems, int direction)
1660 {
1661         struct scatterlist *s;
1662         int mapped_ents = 0, i;
1663
1664         if (unlikely(dma_debug_disabled()))
1665                 return;
1666
1667         for_each_sg(sg, s, nelems, i) {
1668
1669                 struct dma_debug_entry ref = {
1670                         .type           = dma_debug_sg,
1671                         .dev            = dev,
1672                         .pfn            = page_to_pfn(sg_page(s)),
1673                         .offset         = s->offset,
1674                         .dev_addr       = sg_dma_address(s),
1675                         .size           = sg_dma_len(s),
1676                         .direction      = direction,
1677                         .sg_call_ents   = nelems,
1678                 };
1679
1680                 if (!i)
1681                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1682
1683                 if (i >= mapped_ents)
1684                         break;
1685
1686                 check_sync(dev, &ref, true);
1687         }
1688 }
1689 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1690
1691 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1692                                   int nelems, int direction)
1693 {
1694         struct scatterlist *s;
1695         int mapped_ents = 0, i;
1696
1697         if (unlikely(dma_debug_disabled()))
1698                 return;
1699
1700         for_each_sg(sg, s, nelems, i) {
1701
1702                 struct dma_debug_entry ref = {
1703                         .type           = dma_debug_sg,
1704                         .dev            = dev,
1705                         .pfn            = page_to_pfn(sg_page(s)),
1706                         .offset         = s->offset,
1707                         .dev_addr       = sg_dma_address(s),
1708                         .size           = sg_dma_len(s),
1709                         .direction      = direction,
1710                         .sg_call_ents   = nelems,
1711                 };
1712                 if (!i)
1713                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1714
1715                 if (i >= mapped_ents)
1716                         break;
1717
1718                 check_sync(dev, &ref, false);
1719         }
1720 }
1721 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1722
1723 static int __init dma_debug_driver_setup(char *str)
1724 {
1725         int i;
1726
1727         for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1728                 current_driver_name[i] = *str;
1729                 if (*str == 0)
1730                         break;
1731         }
1732
1733         if (current_driver_name[0])
1734                 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1735                         current_driver_name);
1736
1737
1738         return 1;
1739 }
1740 __setup("dma_debug_driver=", dma_debug_driver_setup);