.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2008 Advanced Micro Devices, Inc. |
---|
3 | 4 | * |
---|
4 | 5 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify it |
---|
7 | | - * under the terms of the GNU General Public License version 2 as published |
---|
8 | | - * by the Free Software Foundation. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope that it will be useful, |
---|
11 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
12 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
13 | | - * GNU General Public License for more details. |
---|
14 | | - * |
---|
15 | | - * You should have received a copy of the GNU General Public License |
---|
16 | | - * along with this program; if not, write to the Free Software |
---|
17 | | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
---|
18 | 6 | */ |
---|
| 7 | + |
---|
| 8 | +#define pr_fmt(fmt) "DMA-API: " fmt |
---|
19 | 9 | |
---|
20 | 10 | #include <linux/sched/task_stack.h> |
---|
21 | 11 | #include <linux/scatterlist.h> |
---|
22 | | -#include <linux/dma-mapping.h> |
---|
| 12 | +#include <linux/dma-map-ops.h> |
---|
23 | 13 | #include <linux/sched/task.h> |
---|
24 | 14 | #include <linux/stacktrace.h> |
---|
25 | | -#include <linux/dma-debug.h> |
---|
26 | 15 | #include <linux/spinlock.h> |
---|
27 | 16 | #include <linux/vmalloc.h> |
---|
28 | 17 | #include <linux/debugfs.h> |
---|
.. | .. |
---|
34 | 23 | #include <linux/ctype.h> |
---|
35 | 24 | #include <linux/list.h> |
---|
36 | 25 | #include <linux/slab.h> |
---|
37 | | - |
---|
38 | 26 | #include <asm/sections.h> |
---|
| 27 | +#include "debug.h" |
---|
39 | 28 | |
---|
40 | | -#define HASH_SIZE 1024ULL |
---|
| 29 | +#define HASH_SIZE 16384ULL |
---|
41 | 30 | #define HASH_FN_SHIFT 13 |
---|
42 | 31 | #define HASH_FN_MASK (HASH_SIZE - 1) |
---|
43 | 32 | |
---|
44 | | -/* allow architectures to override this if absolutely required */ |
---|
45 | | -#ifndef PREALLOC_DMA_DEBUG_ENTRIES |
---|
46 | 33 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
---|
47 | | -#endif |
---|
| 34 | +/* If the pool runs out, add this many new entries at once */ |
---|
| 35 | +#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) |
---|
48 | 36 | |
---|
49 | 37 | enum { |
---|
50 | 38 | dma_debug_single, |
---|
51 | | - dma_debug_page, |
---|
52 | 39 | dma_debug_sg, |
---|
53 | 40 | dma_debug_coherent, |
---|
54 | 41 | dma_debug_resource, |
---|
.. | .. |
---|
66 | 53 | * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping |
---|
67 | 54 | * @list: node on pre-allocated free_entries list |
---|
68 | 55 | * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent |
---|
69 | | - * @type: single, page, sg, coherent |
---|
70 | | - * @pfn: page frame of the start address |
---|
71 | | - * @offset: offset of mapping relative to pfn |
---|
72 | 56 | * @size: length of the mapping |
---|
| 57 | + * @type: single, page, sg, coherent |
---|
73 | 58 | * @direction: enum dma_data_direction |
---|
74 | 59 | * @sg_call_ents: 'nents' from dma_map_sg |
---|
75 | 60 | * @sg_mapped_ents: 'mapped_ents' from dma_map_sg |
---|
| 61 | + * @pfn: page frame of the start address |
---|
| 62 | + * @offset: offset of mapping relative to pfn |
---|
76 | 63 | * @map_err_type: track whether dma_mapping_error() was checked |
---|
77 | 64 | * @stacktrace: support backtraces when a violation is detected |
---|
78 | 65 | */ |
---|
79 | 66 | struct dma_debug_entry { |
---|
80 | 67 | struct list_head list; |
---|
81 | 68 | struct device *dev; |
---|
82 | | - int type; |
---|
83 | | - unsigned long pfn; |
---|
84 | | - size_t offset; |
---|
85 | 69 | u64 dev_addr; |
---|
86 | 70 | u64 size; |
---|
| 71 | + int type; |
---|
87 | 72 | int direction; |
---|
88 | 73 | int sg_call_ents; |
---|
89 | 74 | int sg_mapped_ents; |
---|
| 75 | + unsigned long pfn; |
---|
| 76 | + size_t offset; |
---|
90 | 77 | enum map_err_types map_err_type; |
---|
91 | 78 | #ifdef CONFIG_STACKTRACE |
---|
92 | | - struct stack_trace stacktrace; |
---|
93 | | - unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; |
---|
| 79 | + unsigned int stack_len; |
---|
| 80 | + unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; |
---|
94 | 81 | #endif |
---|
95 | | -}; |
---|
| 82 | +} ____cacheline_aligned_in_smp; |
---|
96 | 83 | |
---|
97 | 84 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); |
---|
98 | 85 | |
---|
99 | 86 | struct hash_bucket { |
---|
100 | 87 | struct list_head list; |
---|
101 | 88 | spinlock_t lock; |
---|
102 | | -} ____cacheline_aligned_in_smp; |
---|
| 89 | +}; |
---|
103 | 90 | |
---|
104 | 91 | /* Hash list to save the allocated dma addresses */ |
---|
105 | 92 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; |
---|
.. | .. |
---|
134 | 121 | /* number of preallocated entries requested by kernel cmdline */ |
---|
135 | 122 | static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
---|
136 | 123 | |
---|
137 | | -/* debugfs dentry's for the stuff above */ |
---|
138 | | -static struct dentry *dma_debug_dent __read_mostly; |
---|
139 | | -static struct dentry *global_disable_dent __read_mostly; |
---|
140 | | -static struct dentry *error_count_dent __read_mostly; |
---|
141 | | -static struct dentry *show_all_errors_dent __read_mostly; |
---|
142 | | -static struct dentry *show_num_errors_dent __read_mostly; |
---|
143 | | -static struct dentry *num_free_entries_dent __read_mostly; |
---|
144 | | -static struct dentry *min_free_entries_dent __read_mostly; |
---|
145 | | -static struct dentry *filter_dent __read_mostly; |
---|
146 | | - |
---|
147 | 124 | /* per-driver filter related state */ |
---|
148 | 125 | |
---|
149 | 126 | #define NAME_MAX_LEN 64 |
---|
.. | .. |
---|
159 | 136 | [MAP_ERR_CHECKED] = "dma map error checked", |
---|
160 | 137 | }; |
---|
161 | 138 | |
---|
162 | | -static const char *type2name[5] = { "single", "page", |
---|
163 | | - "scather-gather", "coherent", |
---|
164 | | - "resource" }; |
---|
| 139 | +static const char *type2name[] = { |
---|
| 140 | + [dma_debug_single] = "single", |
---|
| 141 | + [dma_debug_sg] = "scather-gather", |
---|
| 142 | + [dma_debug_coherent] = "coherent", |
---|
| 143 | + [dma_debug_resource] = "resource", |
---|
| 144 | +}; |
---|
165 | 145 | |
---|
166 | | -static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", |
---|
167 | | - "DMA_FROM_DEVICE", "DMA_NONE" }; |
---|
| 146 | +static const char *dir2name[] = { |
---|
| 147 | + [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL", |
---|
| 148 | + [DMA_TO_DEVICE] = "DMA_TO_DEVICE", |
---|
| 149 | + [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE", |
---|
| 150 | + [DMA_NONE] = "DMA_NONE", |
---|
| 151 | +}; |
---|
168 | 152 | |
---|
169 | 153 | /* |
---|
170 | 154 | * The access to some variables in this macro is racy. We can't use atomic_t |
---|
.. | .. |
---|
183 | 167 | { |
---|
184 | 168 | #ifdef CONFIG_STACKTRACE |
---|
185 | 169 | if (entry) { |
---|
186 | | - pr_warning("Mapped at:\n"); |
---|
187 | | - print_stack_trace(&entry->stacktrace, 0); |
---|
| 170 | + pr_warn("Mapped at:\n"); |
---|
| 171 | + stack_trace_print(entry->stack_entries, entry->stack_len, 0); |
---|
188 | 172 | } |
---|
189 | 173 | #endif |
---|
190 | 174 | } |
---|
.. | .. |
---|
234 | 218 | error_count += 1; \ |
---|
235 | 219 | if (driver_filter(dev) && \ |
---|
236 | 220 | (show_all_errors || show_num_errors > 0)) { \ |
---|
237 | | - WARN(1, "%s %s: " format, \ |
---|
| 221 | + WARN(1, pr_fmt("%s %s: ") format, \ |
---|
238 | 222 | dev ? dev_driver_string(dev) : "NULL", \ |
---|
239 | 223 | dev ? dev_name(dev) : "NULL", ## arg); \ |
---|
240 | 224 | dump_entry_trace(entry); \ |
---|
.. | .. |
---|
277 | 261 | * Give up exclusive access to the hash bucket |
---|
278 | 262 | */ |
---|
279 | 263 | static void put_hash_bucket(struct hash_bucket *bucket, |
---|
280 | | - unsigned long *flags) |
---|
| 264 | + unsigned long flags) |
---|
281 | 265 | __releases(&bucket->lock) |
---|
282 | 266 | { |
---|
283 | | - unsigned long __flags = *flags; |
---|
284 | | - |
---|
285 | | - spin_unlock_irqrestore(&bucket->lock, __flags); |
---|
| 267 | + spin_unlock_irqrestore(&bucket->lock, flags); |
---|
286 | 268 | } |
---|
287 | 269 | |
---|
288 | 270 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) |
---|
.. | .. |
---|
381 | 363 | /* |
---|
382 | 364 | * Nothing found, go back a hash bucket |
---|
383 | 365 | */ |
---|
384 | | - put_hash_bucket(*bucket, flags); |
---|
| 366 | + put_hash_bucket(*bucket, *flags); |
---|
385 | 367 | range += (1 << HASH_FN_SHIFT); |
---|
386 | 368 | index.dev_addr -= (1 << HASH_FN_SHIFT); |
---|
387 | 369 | *bucket = get_hash_bucket(&index, flags); |
---|
.. | .. |
---|
465 | 447 | * dma_active_cacheline entry to track per event. dma_map_sg(), on the |
---|
466 | 448 | * other hand, consumes a single dma_debug_entry, but inserts 'nents' |
---|
467 | 449 | * entries into the tree. |
---|
468 | | - * |
---|
469 | | - * At any time debug_dma_assert_idle() can be called to trigger a |
---|
470 | | - * warning if any cachelines in the given page are in the active set. |
---|
471 | 450 | */ |
---|
472 | | -static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); |
---|
| 451 | +static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); |
---|
473 | 452 | static DEFINE_SPINLOCK(radix_lock); |
---|
474 | 453 | #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) |
---|
475 | 454 | #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) |
---|
.. | .. |
---|
514 | 493 | overlap = active_cacheline_set_overlap(cln, ++overlap); |
---|
515 | 494 | |
---|
516 | 495 | /* If we overflowed the overlap counter then we're potentially |
---|
517 | | - * leaking dma-mappings. Otherwise, if maps and unmaps are |
---|
518 | | - * balanced then this overflow may cause false negatives in |
---|
519 | | - * debug_dma_assert_idle() as the cacheline may be marked idle |
---|
520 | | - * prematurely. |
---|
| 496 | + * leaking dma-mappings. |
---|
521 | 497 | */ |
---|
522 | 498 | WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, |
---|
523 | | - "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", |
---|
| 499 | + pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), |
---|
524 | 500 | ACTIVE_CACHELINE_MAX_OVERLAP, &cln); |
---|
525 | 501 | } |
---|
526 | 502 | |
---|
.. | .. |
---|
572 | 548 | spin_unlock_irqrestore(&radix_lock, flags); |
---|
573 | 549 | } |
---|
574 | 550 | |
---|
575 | | -/** |
---|
576 | | - * debug_dma_assert_idle() - assert that a page is not undergoing dma |
---|
577 | | - * @page: page to lookup in the dma_active_cacheline tree |
---|
578 | | - * |
---|
579 | | - * Place a call to this routine in cases where the cpu touching the page |
---|
580 | | - * before the dma completes (page is dma_unmapped) will lead to data |
---|
581 | | - * corruption. |
---|
582 | | - */ |
---|
583 | | -void debug_dma_assert_idle(struct page *page) |
---|
584 | | -{ |
---|
585 | | - static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; |
---|
586 | | - struct dma_debug_entry *entry = NULL; |
---|
587 | | - void **results = (void **) &ents; |
---|
588 | | - unsigned int nents, i; |
---|
589 | | - unsigned long flags; |
---|
590 | | - phys_addr_t cln; |
---|
591 | | - |
---|
592 | | - if (dma_debug_disabled()) |
---|
593 | | - return; |
---|
594 | | - |
---|
595 | | - if (!page) |
---|
596 | | - return; |
---|
597 | | - |
---|
598 | | - cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; |
---|
599 | | - spin_lock_irqsave(&radix_lock, flags); |
---|
600 | | - nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, |
---|
601 | | - CACHELINES_PER_PAGE); |
---|
602 | | - for (i = 0; i < nents; i++) { |
---|
603 | | - phys_addr_t ent_cln = to_cacheline_number(ents[i]); |
---|
604 | | - |
---|
605 | | - if (ent_cln == cln) { |
---|
606 | | - entry = ents[i]; |
---|
607 | | - break; |
---|
608 | | - } else if (ent_cln >= cln + CACHELINES_PER_PAGE) |
---|
609 | | - break; |
---|
610 | | - } |
---|
611 | | - spin_unlock_irqrestore(&radix_lock, flags); |
---|
612 | | - |
---|
613 | | - if (!entry) |
---|
614 | | - return; |
---|
615 | | - |
---|
616 | | - cln = to_cacheline_number(entry); |
---|
617 | | - err_printk(entry->dev, entry, |
---|
618 | | - "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", |
---|
619 | | - &cln); |
---|
620 | | -} |
---|
621 | | - |
---|
622 | 551 | /* |
---|
623 | 552 | * Wrapper function for adding an entry to the hash. |
---|
624 | 553 | * This function takes care of locking itself. |
---|
.. | .. |
---|
631 | 560 | |
---|
632 | 561 | bucket = get_hash_bucket(entry, &flags); |
---|
633 | 562 | hash_bucket_add(bucket, entry); |
---|
634 | | - put_hash_bucket(bucket, &flags); |
---|
| 563 | + put_hash_bucket(bucket, flags); |
---|
635 | 564 | |
---|
636 | 565 | rc = active_cacheline_insert(entry); |
---|
637 | 566 | if (rc == -ENOMEM) { |
---|
638 | | - pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); |
---|
| 567 | + pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); |
---|
639 | 568 | global_disable = true; |
---|
640 | 569 | } |
---|
641 | 570 | |
---|
642 | 571 | /* TODO: report -EEXIST errors here as overlapping mappings are |
---|
643 | 572 | * not supported by the DMA API |
---|
644 | 573 | */ |
---|
| 574 | +} |
---|
| 575 | + |
---|
| 576 | +static int dma_debug_create_entries(gfp_t gfp) |
---|
| 577 | +{ |
---|
| 578 | + struct dma_debug_entry *entry; |
---|
| 579 | + int i; |
---|
| 580 | + |
---|
| 581 | + entry = (void *)get_zeroed_page(gfp); |
---|
| 582 | + if (!entry) |
---|
| 583 | + return -ENOMEM; |
---|
| 584 | + |
---|
| 585 | + for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) |
---|
| 586 | + list_add_tail(&entry[i].list, &free_entries); |
---|
| 587 | + |
---|
| 588 | + num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; |
---|
| 589 | + nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; |
---|
| 590 | + |
---|
| 591 | + return 0; |
---|
645 | 592 | } |
---|
646 | 593 | |
---|
647 | 594 | static struct dma_debug_entry *__dma_entry_alloc(void) |
---|
.. | .. |
---|
659 | 606 | return entry; |
---|
660 | 607 | } |
---|
661 | 608 | |
---|
| 609 | +/* |
---|
| 610 | + * This should be called outside of free_entries_lock scope to avoid potential |
---|
| 611 | + * deadlocks with serial consoles that use DMA. |
---|
| 612 | + */ |
---|
| 613 | +static void __dma_entry_alloc_check_leak(u32 nr_entries) |
---|
| 614 | +{ |
---|
| 615 | + u32 tmp = nr_entries % nr_prealloc_entries; |
---|
| 616 | + |
---|
| 617 | + /* Shout each time we tick over some multiple of the initial pool */ |
---|
| 618 | + if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { |
---|
| 619 | + pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", |
---|
| 620 | + nr_entries, |
---|
| 621 | + (nr_entries / nr_prealloc_entries)); |
---|
| 622 | + } |
---|
| 623 | +} |
---|
| 624 | + |
---|
662 | 625 | /* struct dma_entry allocator |
---|
663 | 626 | * |
---|
664 | 627 | * The next two functions implement the allocator for |
---|
.. | .. |
---|
666 | 629 | */ |
---|
667 | 630 | static struct dma_debug_entry *dma_entry_alloc(void) |
---|
668 | 631 | { |
---|
| 632 | + bool alloc_check_leak = false; |
---|
669 | 633 | struct dma_debug_entry *entry; |
---|
670 | 634 | unsigned long flags; |
---|
| 635 | + u32 nr_entries; |
---|
671 | 636 | |
---|
672 | 637 | spin_lock_irqsave(&free_entries_lock, flags); |
---|
673 | | - |
---|
674 | | - if (list_empty(&free_entries)) { |
---|
675 | | - global_disable = true; |
---|
676 | | - spin_unlock_irqrestore(&free_entries_lock, flags); |
---|
677 | | - pr_err("DMA-API: debugging out of memory - disabling\n"); |
---|
678 | | - return NULL; |
---|
| 638 | + if (num_free_entries == 0) { |
---|
| 639 | + if (dma_debug_create_entries(GFP_ATOMIC)) { |
---|
| 640 | + global_disable = true; |
---|
| 641 | + spin_unlock_irqrestore(&free_entries_lock, flags); |
---|
| 642 | + pr_err("debugging out of memory - disabling\n"); |
---|
| 643 | + return NULL; |
---|
| 644 | + } |
---|
| 645 | + alloc_check_leak = true; |
---|
| 646 | + nr_entries = nr_total_entries; |
---|
679 | 647 | } |
---|
680 | 648 | |
---|
681 | 649 | entry = __dma_entry_alloc(); |
---|
682 | 650 | |
---|
683 | 651 | spin_unlock_irqrestore(&free_entries_lock, flags); |
---|
684 | 652 | |
---|
685 | | -#ifdef CONFIG_STACKTRACE |
---|
686 | | - entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; |
---|
687 | | - entry->stacktrace.entries = entry->st_entries; |
---|
688 | | - entry->stacktrace.skip = 2; |
---|
689 | | - save_stack_trace(&entry->stacktrace); |
---|
690 | | -#endif |
---|
| 653 | + if (alloc_check_leak) |
---|
| 654 | + __dma_entry_alloc_check_leak(nr_entries); |
---|
691 | 655 | |
---|
| 656 | +#ifdef CONFIG_STACKTRACE |
---|
| 657 | + entry->stack_len = stack_trace_save(entry->stack_entries, |
---|
| 658 | + ARRAY_SIZE(entry->stack_entries), |
---|
| 659 | + 1); |
---|
| 660 | +#endif |
---|
692 | 661 | return entry; |
---|
693 | 662 | } |
---|
694 | 663 | |
---|
.. | .. |
---|
708 | 677 | spin_unlock_irqrestore(&free_entries_lock, flags); |
---|
709 | 678 | } |
---|
710 | 679 | |
---|
711 | | -int dma_debug_resize_entries(u32 num_entries) |
---|
712 | | -{ |
---|
713 | | - int i, delta, ret = 0; |
---|
714 | | - unsigned long flags; |
---|
715 | | - struct dma_debug_entry *entry; |
---|
716 | | - LIST_HEAD(tmp); |
---|
717 | | - |
---|
718 | | - spin_lock_irqsave(&free_entries_lock, flags); |
---|
719 | | - |
---|
720 | | - if (nr_total_entries < num_entries) { |
---|
721 | | - delta = num_entries - nr_total_entries; |
---|
722 | | - |
---|
723 | | - spin_unlock_irqrestore(&free_entries_lock, flags); |
---|
724 | | - |
---|
725 | | - for (i = 0; i < delta; i++) { |
---|
726 | | - entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
---|
727 | | - if (!entry) |
---|
728 | | - break; |
---|
729 | | - |
---|
730 | | - list_add_tail(&entry->list, &tmp); |
---|
731 | | - } |
---|
732 | | - |
---|
733 | | - spin_lock_irqsave(&free_entries_lock, flags); |
---|
734 | | - |
---|
735 | | - list_splice(&tmp, &free_entries); |
---|
736 | | - nr_total_entries += i; |
---|
737 | | - num_free_entries += i; |
---|
738 | | - } else { |
---|
739 | | - delta = nr_total_entries - num_entries; |
---|
740 | | - |
---|
741 | | - for (i = 0; i < delta && !list_empty(&free_entries); i++) { |
---|
742 | | - entry = __dma_entry_alloc(); |
---|
743 | | - kfree(entry); |
---|
744 | | - } |
---|
745 | | - |
---|
746 | | - nr_total_entries -= i; |
---|
747 | | - } |
---|
748 | | - |
---|
749 | | - if (nr_total_entries != num_entries) |
---|
750 | | - ret = 1; |
---|
751 | | - |
---|
752 | | - spin_unlock_irqrestore(&free_entries_lock, flags); |
---|
753 | | - |
---|
754 | | - return ret; |
---|
755 | | -} |
---|
756 | | - |
---|
757 | 680 | /* |
---|
758 | 681 | * DMA-API debugging init code |
---|
759 | 682 | * |
---|
.. | .. |
---|
761 | 684 | * 1. Initialize core data structures |
---|
762 | 685 | * 2. Preallocate a given number of dma_debug_entry structs |
---|
763 | 686 | */ |
---|
764 | | - |
---|
765 | | -static int prealloc_memory(u32 num_entries) |
---|
766 | | -{ |
---|
767 | | - struct dma_debug_entry *entry, *next_entry; |
---|
768 | | - int i; |
---|
769 | | - |
---|
770 | | - for (i = 0; i < num_entries; ++i) { |
---|
771 | | - entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
---|
772 | | - if (!entry) |
---|
773 | | - goto out_err; |
---|
774 | | - |
---|
775 | | - list_add_tail(&entry->list, &free_entries); |
---|
776 | | - } |
---|
777 | | - |
---|
778 | | - num_free_entries = num_entries; |
---|
779 | | - min_free_entries = num_entries; |
---|
780 | | - |
---|
781 | | - pr_info("DMA-API: preallocated %d debug entries\n", num_entries); |
---|
782 | | - |
---|
783 | | - return 0; |
---|
784 | | - |
---|
785 | | -out_err: |
---|
786 | | - |
---|
787 | | - list_for_each_entry_safe(entry, next_entry, &free_entries, list) { |
---|
788 | | - list_del(&entry->list); |
---|
789 | | - kfree(entry); |
---|
790 | | - } |
---|
791 | | - |
---|
792 | | - return -ENOMEM; |
---|
793 | | -} |
---|
794 | 687 | |
---|
795 | 688 | static ssize_t filter_read(struct file *file, char __user *user_buf, |
---|
796 | 689 | size_t count, loff_t *ppos) |
---|
.. | .. |
---|
851 | 744 | * switched off. |
---|
852 | 745 | */ |
---|
853 | 746 | if (current_driver_name[0]) |
---|
854 | | - pr_info("DMA-API: switching off dma-debug driver filter\n"); |
---|
| 747 | + pr_info("switching off dma-debug driver filter\n"); |
---|
855 | 748 | current_driver_name[0] = 0; |
---|
856 | 749 | current_driver = NULL; |
---|
857 | 750 | goto out_unlock; |
---|
.. | .. |
---|
869 | 762 | current_driver_name[i] = 0; |
---|
870 | 763 | current_driver = NULL; |
---|
871 | 764 | |
---|
872 | | - pr_info("DMA-API: enable driver filter for driver [%s]\n", |
---|
| 765 | + pr_info("enable driver filter for driver [%s]\n", |
---|
873 | 766 | current_driver_name); |
---|
874 | 767 | |
---|
875 | 768 | out_unlock: |
---|
.. | .. |
---|
884 | 777 | .llseek = default_llseek, |
---|
885 | 778 | }; |
---|
886 | 779 | |
---|
887 | | -static int dma_debug_fs_init(void) |
---|
| 780 | +static int dump_show(struct seq_file *seq, void *v) |
---|
888 | 781 | { |
---|
889 | | - dma_debug_dent = debugfs_create_dir("dma-api", NULL); |
---|
890 | | - if (!dma_debug_dent) { |
---|
891 | | - pr_err("DMA-API: can not create debugfs directory\n"); |
---|
892 | | - return -ENOMEM; |
---|
| 782 | + int idx; |
---|
| 783 | + |
---|
| 784 | + for (idx = 0; idx < HASH_SIZE; idx++) { |
---|
| 785 | + struct hash_bucket *bucket = &dma_entry_hash[idx]; |
---|
| 786 | + struct dma_debug_entry *entry; |
---|
| 787 | + unsigned long flags; |
---|
| 788 | + |
---|
| 789 | + spin_lock_irqsave(&bucket->lock, flags); |
---|
| 790 | + list_for_each_entry(entry, &bucket->list, list) { |
---|
| 791 | + seq_printf(seq, |
---|
| 792 | + "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n", |
---|
| 793 | + dev_name(entry->dev), |
---|
| 794 | + dev_driver_string(entry->dev), |
---|
| 795 | + type2name[entry->type], idx, |
---|
| 796 | + phys_addr(entry), entry->pfn, |
---|
| 797 | + entry->dev_addr, entry->size, |
---|
| 798 | + dir2name[entry->direction], |
---|
| 799 | + maperr2str[entry->map_err_type]); |
---|
| 800 | + } |
---|
| 801 | + spin_unlock_irqrestore(&bucket->lock, flags); |
---|
893 | 802 | } |
---|
| 803 | + return 0; |
---|
| 804 | +} |
---|
| 805 | +DEFINE_SHOW_ATTRIBUTE(dump); |
---|
894 | 806 | |
---|
895 | | - global_disable_dent = debugfs_create_bool("disabled", 0444, |
---|
896 | | - dma_debug_dent, |
---|
897 | | - &global_disable); |
---|
898 | | - if (!global_disable_dent) |
---|
899 | | - goto out_err; |
---|
| 807 | +static int __init dma_debug_fs_init(void) |
---|
| 808 | +{ |
---|
| 809 | + struct dentry *dentry = debugfs_create_dir("dma-api", NULL); |
---|
900 | 810 | |
---|
901 | | - error_count_dent = debugfs_create_u32("error_count", 0444, |
---|
902 | | - dma_debug_dent, &error_count); |
---|
903 | | - if (!error_count_dent) |
---|
904 | | - goto out_err; |
---|
905 | | - |
---|
906 | | - show_all_errors_dent = debugfs_create_u32("all_errors", 0644, |
---|
907 | | - dma_debug_dent, |
---|
908 | | - &show_all_errors); |
---|
909 | | - if (!show_all_errors_dent) |
---|
910 | | - goto out_err; |
---|
911 | | - |
---|
912 | | - show_num_errors_dent = debugfs_create_u32("num_errors", 0644, |
---|
913 | | - dma_debug_dent, |
---|
914 | | - &show_num_errors); |
---|
915 | | - if (!show_num_errors_dent) |
---|
916 | | - goto out_err; |
---|
917 | | - |
---|
918 | | - num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, |
---|
919 | | - dma_debug_dent, |
---|
920 | | - &num_free_entries); |
---|
921 | | - if (!num_free_entries_dent) |
---|
922 | | - goto out_err; |
---|
923 | | - |
---|
924 | | - min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, |
---|
925 | | - dma_debug_dent, |
---|
926 | | - &min_free_entries); |
---|
927 | | - if (!min_free_entries_dent) |
---|
928 | | - goto out_err; |
---|
929 | | - |
---|
930 | | - filter_dent = debugfs_create_file("driver_filter", 0644, |
---|
931 | | - dma_debug_dent, NULL, &filter_fops); |
---|
932 | | - if (!filter_dent) |
---|
933 | | - goto out_err; |
---|
| 811 | + debugfs_create_bool("disabled", 0444, dentry, &global_disable); |
---|
| 812 | + debugfs_create_u32("error_count", 0444, dentry, &error_count); |
---|
| 813 | + debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors); |
---|
| 814 | + debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors); |
---|
| 815 | + debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries); |
---|
| 816 | + debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries); |
---|
| 817 | + debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); |
---|
| 818 | + debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); |
---|
| 819 | + debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); |
---|
934 | 820 | |
---|
935 | 821 | return 0; |
---|
936 | | - |
---|
937 | | -out_err: |
---|
938 | | - debugfs_remove_recursive(dma_debug_dent); |
---|
939 | | - |
---|
940 | | - return -ENOMEM; |
---|
941 | 822 | } |
---|
| 823 | +core_initcall_sync(dma_debug_fs_init); |
---|
942 | 824 | |
---|
943 | 825 | static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) |
---|
944 | 826 | { |
---|
.. | .. |
---|
963 | 845 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
---|
964 | 846 | { |
---|
965 | 847 | struct device *dev = data; |
---|
966 | | - struct dma_debug_entry *uninitialized_var(entry); |
---|
| 848 | + struct dma_debug_entry *entry; |
---|
967 | 849 | int count; |
---|
968 | 850 | |
---|
969 | 851 | if (dma_debug_disabled()) |
---|
.. | .. |
---|
974 | 856 | count = device_dma_allocations(dev, &entry); |
---|
975 | 857 | if (count == 0) |
---|
976 | 858 | break; |
---|
977 | | - err_printk(dev, entry, "DMA-API: device driver has pending " |
---|
| 859 | + err_printk(dev, entry, "device driver has pending " |
---|
978 | 860 | "DMA allocations while released from device " |
---|
979 | 861 | "[count=%d]\n" |
---|
980 | 862 | "One of leaked entries details: " |
---|
.. | .. |
---|
1010 | 892 | |
---|
1011 | 893 | static int dma_debug_init(void) |
---|
1012 | 894 | { |
---|
1013 | | - int i; |
---|
| 895 | + int i, nr_pages; |
---|
1014 | 896 | |
---|
1015 | 897 | /* Do not use dma_debug_initialized here, since we really want to be |
---|
1016 | 898 | * called to set dma_debug_initialized |
---|
.. | .. |
---|
1023 | 905 | spin_lock_init(&dma_entry_hash[i].lock); |
---|
1024 | 906 | } |
---|
1025 | 907 | |
---|
1026 | | - if (dma_debug_fs_init() != 0) { |
---|
1027 | | - pr_err("DMA-API: error creating debugfs entries - disabling\n"); |
---|
| 908 | + nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); |
---|
| 909 | + for (i = 0; i < nr_pages; ++i) |
---|
| 910 | + dma_debug_create_entries(GFP_KERNEL); |
---|
| 911 | + if (num_free_entries >= nr_prealloc_entries) { |
---|
| 912 | + pr_info("preallocated %d debug entries\n", nr_total_entries); |
---|
| 913 | + } else if (num_free_entries > 0) { |
---|
| 914 | + pr_warn("%d debug entries requested but only %d allocated\n", |
---|
| 915 | + nr_prealloc_entries, nr_total_entries); |
---|
| 916 | + } else { |
---|
| 917 | + pr_err("debugging out of memory error - disabled\n"); |
---|
1028 | 918 | global_disable = true; |
---|
1029 | 919 | |
---|
1030 | 920 | return 0; |
---|
1031 | 921 | } |
---|
1032 | | - |
---|
1033 | | - if (prealloc_memory(nr_prealloc_entries) != 0) { |
---|
1034 | | - pr_err("DMA-API: debugging out of memory error - disabled\n"); |
---|
1035 | | - global_disable = true; |
---|
1036 | | - |
---|
1037 | | - return 0; |
---|
1038 | | - } |
---|
1039 | | - |
---|
1040 | | - nr_total_entries = num_free_entries; |
---|
| 922 | + min_free_entries = num_free_entries; |
---|
1041 | 923 | |
---|
1042 | 924 | dma_debug_initialized = true; |
---|
1043 | 925 | |
---|
1044 | | - pr_info("DMA-API: debugging enabled by kernel config\n"); |
---|
| 926 | + pr_info("debugging enabled by kernel config\n"); |
---|
1045 | 927 | return 0; |
---|
1046 | 928 | } |
---|
1047 | 929 | core_initcall(dma_debug_init); |
---|
.. | .. |
---|
1052 | 934 | return -EINVAL; |
---|
1053 | 935 | |
---|
1054 | 936 | if (strncmp(str, "off", 3) == 0) { |
---|
1055 | | - pr_info("DMA-API: debugging disabled on kernel command line\n"); |
---|
| 937 | + pr_info("debugging disabled on kernel command line\n"); |
---|
1056 | 938 | global_disable = true; |
---|
1057 | 939 | } |
---|
1058 | 940 | |
---|
1059 | | - return 0; |
---|
| 941 | + return 1; |
---|
1060 | 942 | } |
---|
1061 | 943 | |
---|
1062 | 944 | static __init int dma_debug_entries_cmdline(char *str) |
---|
.. | .. |
---|
1065 | 947 | return -EINVAL; |
---|
1066 | 948 | if (!get_option(&str, &nr_prealloc_entries)) |
---|
1067 | 949 | nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
---|
1068 | | - return 0; |
---|
| 950 | + return 1; |
---|
1069 | 951 | } |
---|
1070 | 952 | |
---|
1071 | 953 | __setup("dma_debug=", dma_debug_cmdline); |
---|
.. | .. |
---|
1082 | 964 | |
---|
1083 | 965 | if (!entry) { |
---|
1084 | 966 | /* must drop lock before calling dma_mapping_error */ |
---|
1085 | | - put_hash_bucket(bucket, &flags); |
---|
| 967 | + put_hash_bucket(bucket, flags); |
---|
1086 | 968 | |
---|
1087 | 969 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { |
---|
1088 | 970 | err_printk(ref->dev, NULL, |
---|
1089 | | - "DMA-API: device driver tries to free an " |
---|
| 971 | + "device driver tries to free an " |
---|
1090 | 972 | "invalid DMA memory address\n"); |
---|
1091 | 973 | } else { |
---|
1092 | 974 | err_printk(ref->dev, NULL, |
---|
1093 | | - "DMA-API: device driver tries to free DMA " |
---|
| 975 | + "device driver tries to free DMA " |
---|
1094 | 976 | "memory it has not allocated [device " |
---|
1095 | 977 | "address=0x%016llx] [size=%llu bytes]\n", |
---|
1096 | 978 | ref->dev_addr, ref->size); |
---|
.. | .. |
---|
1099 | 981 | } |
---|
1100 | 982 | |
---|
1101 | 983 | if (ref->size != entry->size) { |
---|
1102 | | - err_printk(ref->dev, entry, "DMA-API: device driver frees " |
---|
| 984 | + err_printk(ref->dev, entry, "device driver frees " |
---|
1103 | 985 | "DMA memory with different size " |
---|
1104 | 986 | "[device address=0x%016llx] [map size=%llu bytes] " |
---|
1105 | 987 | "[unmap size=%llu bytes]\n", |
---|
.. | .. |
---|
1107 | 989 | } |
---|
1108 | 990 | |
---|
1109 | 991 | if (ref->type != entry->type) { |
---|
1110 | | - err_printk(ref->dev, entry, "DMA-API: device driver frees " |
---|
| 992 | + err_printk(ref->dev, entry, "device driver frees " |
---|
1111 | 993 | "DMA memory with wrong function " |
---|
1112 | 994 | "[device address=0x%016llx] [size=%llu bytes] " |
---|
1113 | 995 | "[mapped as %s] [unmapped as %s]\n", |
---|
.. | .. |
---|
1115 | 997 | type2name[entry->type], type2name[ref->type]); |
---|
1116 | 998 | } else if ((entry->type == dma_debug_coherent) && |
---|
1117 | 999 | (phys_addr(ref) != phys_addr(entry))) { |
---|
1118 | | - err_printk(ref->dev, entry, "DMA-API: device driver frees " |
---|
| 1000 | + err_printk(ref->dev, entry, "device driver frees " |
---|
1119 | 1001 | "DMA memory with different CPU address " |
---|
1120 | 1002 | "[device address=0x%016llx] [size=%llu bytes] " |
---|
1121 | 1003 | "[cpu alloc address=0x%016llx] " |
---|
.. | .. |
---|
1127 | 1009 | |
---|
1128 | 1010 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
---|
1129 | 1011 | ref->sg_call_ents != entry->sg_call_ents) { |
---|
1130 | | - err_printk(ref->dev, entry, "DMA-API: device driver frees " |
---|
| 1012 | + err_printk(ref->dev, entry, "device driver frees " |
---|
1131 | 1013 | "DMA sg list with different entry count " |
---|
1132 | 1014 | "[map count=%d] [unmap count=%d]\n", |
---|
1133 | 1015 | entry->sg_call_ents, ref->sg_call_ents); |
---|
.. | .. |
---|
1138 | 1020 | * DMA API don't handle this properly, so check for it here |
---|
1139 | 1021 | */ |
---|
1140 | 1022 | if (ref->direction != entry->direction) { |
---|
1141 | | - err_printk(ref->dev, entry, "DMA-API: device driver frees " |
---|
| 1023 | + err_printk(ref->dev, entry, "device driver frees " |
---|
1142 | 1024 | "DMA memory with different direction " |
---|
1143 | 1025 | "[device address=0x%016llx] [size=%llu bytes] " |
---|
1144 | 1026 | "[mapped with %s] [unmapped with %s]\n", |
---|
.. | .. |
---|
1150 | 1032 | /* |
---|
1151 | 1033 | * Drivers should use dma_mapping_error() to check the returned |
---|
1152 | 1034 | * addresses of dma_map_single() and dma_map_page(). |
---|
1153 | | - * If not, print this warning message. See Documentation/DMA-API.txt. |
---|
| 1035 | + * If not, print this warning message. See Documentation/core-api/dma-api.rst. |
---|
1154 | 1036 | */ |
---|
1155 | 1037 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
---|
1156 | 1038 | err_printk(ref->dev, entry, |
---|
1157 | | - "DMA-API: device driver failed to check map error" |
---|
| 1039 | + "device driver failed to check map error" |
---|
1158 | 1040 | "[device address=0x%016llx] [size=%llu bytes] " |
---|
1159 | 1041 | "[mapped as %s]", |
---|
1160 | 1042 | ref->dev_addr, ref->size, |
---|
.. | .. |
---|
1164 | 1046 | hash_bucket_del(entry); |
---|
1165 | 1047 | dma_entry_free(entry); |
---|
1166 | 1048 | |
---|
1167 | | - put_hash_bucket(bucket, &flags); |
---|
| 1049 | + put_hash_bucket(bucket, flags); |
---|
1168 | 1050 | } |
---|
1169 | 1051 | |
---|
1170 | 1052 | static void check_for_stack(struct device *dev, |
---|
.. | .. |
---|
1179 | 1061 | return; |
---|
1180 | 1062 | addr = page_address(page) + offset; |
---|
1181 | 1063 | if (object_is_on_stack(addr)) |
---|
1182 | | - err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr); |
---|
| 1064 | + err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); |
---|
1183 | 1065 | } else { |
---|
1184 | 1066 | /* Stack is vmalloced. */ |
---|
1185 | 1067 | int i; |
---|
.. | .. |
---|
1189 | 1071 | continue; |
---|
1190 | 1072 | |
---|
1191 | 1073 | addr = (u8 *)current->stack + i * PAGE_SIZE + offset; |
---|
1192 | | - err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr); |
---|
| 1074 | + err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); |
---|
1193 | 1075 | break; |
---|
1194 | 1076 | } |
---|
1195 | 1077 | } |
---|
.. | .. |
---|
1209 | 1091 | { |
---|
1210 | 1092 | if (overlap(addr, len, _stext, _etext) || |
---|
1211 | 1093 | overlap(addr, len, __start_rodata, __end_rodata)) |
---|
1212 | | - err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); |
---|
| 1094 | + err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); |
---|
1213 | 1095 | } |
---|
1214 | 1096 | |
---|
1215 | 1097 | static void check_sync(struct device *dev, |
---|
.. | .. |
---|
1225 | 1107 | entry = bucket_find_contain(&bucket, ref, &flags); |
---|
1226 | 1108 | |
---|
1227 | 1109 | if (!entry) { |
---|
1228 | | - err_printk(dev, NULL, "DMA-API: device driver tries " |
---|
| 1110 | + err_printk(dev, NULL, "device driver tries " |
---|
1229 | 1111 | "to sync DMA memory it has not allocated " |
---|
1230 | 1112 | "[device address=0x%016llx] [size=%llu bytes]\n", |
---|
1231 | 1113 | (unsigned long long)ref->dev_addr, ref->size); |
---|
.. | .. |
---|
1233 | 1115 | } |
---|
1234 | 1116 | |
---|
1235 | 1117 | if (ref->size > entry->size) { |
---|
1236 | | - err_printk(dev, entry, "DMA-API: device driver syncs" |
---|
| 1118 | + err_printk(dev, entry, "device driver syncs" |
---|
1237 | 1119 | " DMA memory outside allocated range " |
---|
1238 | 1120 | "[device address=0x%016llx] " |
---|
1239 | 1121 | "[allocation size=%llu bytes] " |
---|
.. | .. |
---|
1246 | 1128 | goto out; |
---|
1247 | 1129 | |
---|
1248 | 1130 | if (ref->direction != entry->direction) { |
---|
1249 | | - err_printk(dev, entry, "DMA-API: device driver syncs " |
---|
| 1131 | + err_printk(dev, entry, "device driver syncs " |
---|
1250 | 1132 | "DMA memory with different direction " |
---|
1251 | 1133 | "[device address=0x%016llx] [size=%llu bytes] " |
---|
1252 | 1134 | "[mapped with %s] [synced with %s]\n", |
---|
.. | .. |
---|
1257 | 1139 | |
---|
1258 | 1140 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
---|
1259 | 1141 | !(ref->direction == DMA_TO_DEVICE)) |
---|
1260 | | - err_printk(dev, entry, "DMA-API: device driver syncs " |
---|
| 1142 | + err_printk(dev, entry, "device driver syncs " |
---|
1261 | 1143 | "device read-only DMA memory for cpu " |
---|
1262 | 1144 | "[device address=0x%016llx] [size=%llu bytes] " |
---|
1263 | 1145 | "[mapped with %s] [synced with %s]\n", |
---|
.. | .. |
---|
1267 | 1149 | |
---|
1268 | 1150 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && |
---|
1269 | 1151 | !(ref->direction == DMA_FROM_DEVICE)) |
---|
1270 | | - err_printk(dev, entry, "DMA-API: device driver syncs " |
---|
| 1152 | + err_printk(dev, entry, "device driver syncs " |
---|
1271 | 1153 | "device write-only DMA memory to device " |
---|
1272 | 1154 | "[device address=0x%016llx] [size=%llu bytes] " |
---|
1273 | 1155 | "[mapped with %s] [synced with %s]\n", |
---|
.. | .. |
---|
1275 | 1157 | dir2name[entry->direction], |
---|
1276 | 1158 | dir2name[ref->direction]); |
---|
1277 | 1159 | |
---|
| 1160 | + /* sg list count can be less than map count when partial cache sync */ |
---|
1278 | 1161 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
---|
1279 | | - ref->sg_call_ents != entry->sg_call_ents) { |
---|
1280 | | - err_printk(ref->dev, entry, "DMA-API: device driver syncs " |
---|
1281 | | - "DMA sg list with different entry count " |
---|
| 1162 | + ref->sg_call_ents > entry->sg_call_ents) { |
---|
| 1163 | + err_printk(ref->dev, entry, "device driver syncs " |
---|
| 1164 | + "DMA sg list count larger than map count " |
---|
1282 | 1165 | "[map count=%d] [sync count=%d]\n", |
---|
1283 | 1166 | entry->sg_call_ents, ref->sg_call_ents); |
---|
1284 | 1167 | } |
---|
1285 | 1168 | |
---|
1286 | 1169 | out: |
---|
1287 | | - put_hash_bucket(bucket, &flags); |
---|
| 1170 | + put_hash_bucket(bucket, flags); |
---|
1288 | 1171 | } |
---|
1289 | 1172 | |
---|
1290 | 1173 | static void check_sg_segment(struct device *dev, struct scatterlist *sg) |
---|
.. | .. |
---|
1298 | 1181 | * whoever generated the list forgot to check them. |
---|
1299 | 1182 | */ |
---|
1300 | 1183 | if (sg->length > max_seg) |
---|
1301 | | - err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", |
---|
| 1184 | + err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", |
---|
1302 | 1185 | sg->length, max_seg); |
---|
1303 | 1186 | /* |
---|
1304 | 1187 | * In some cases this could potentially be the DMA API |
---|
.. | .. |
---|
1308 | 1191 | start = sg_dma_address(sg); |
---|
1309 | 1192 | end = start + sg_dma_len(sg) - 1; |
---|
1310 | 1193 | if ((start ^ end) & ~boundary) |
---|
1311 | | - err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", |
---|
| 1194 | + err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", |
---|
1312 | 1195 | start, end, boundary); |
---|
1313 | 1196 | #endif |
---|
1314 | 1197 | } |
---|
1315 | 1198 | |
---|
| 1199 | +void debug_dma_map_single(struct device *dev, const void *addr, |
---|
| 1200 | + unsigned long len) |
---|
| 1201 | +{ |
---|
| 1202 | + if (unlikely(dma_debug_disabled())) |
---|
| 1203 | + return; |
---|
| 1204 | + |
---|
| 1205 | + if (!virt_addr_valid(addr)) |
---|
| 1206 | + err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", |
---|
| 1207 | + addr, len); |
---|
| 1208 | + |
---|
| 1209 | + if (is_vmalloc_addr(addr)) |
---|
| 1210 | + err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", |
---|
| 1211 | + addr, len); |
---|
| 1212 | +} |
---|
| 1213 | +EXPORT_SYMBOL(debug_dma_map_single); |
---|
| 1214 | + |
---|
1316 | 1215 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
---|
1317 | | - size_t size, int direction, dma_addr_t dma_addr, |
---|
1318 | | - bool map_single) |
---|
| 1216 | + size_t size, int direction, dma_addr_t dma_addr) |
---|
1319 | 1217 | { |
---|
1320 | 1218 | struct dma_debug_entry *entry; |
---|
1321 | 1219 | |
---|
.. | .. |
---|
1330 | 1228 | return; |
---|
1331 | 1229 | |
---|
1332 | 1230 | entry->dev = dev; |
---|
1333 | | - entry->type = dma_debug_page; |
---|
| 1231 | + entry->type = dma_debug_single; |
---|
1334 | 1232 | entry->pfn = page_to_pfn(page); |
---|
1335 | | - entry->offset = offset, |
---|
| 1233 | + entry->offset = offset; |
---|
1336 | 1234 | entry->dev_addr = dma_addr; |
---|
1337 | 1235 | entry->size = size; |
---|
1338 | 1236 | entry->direction = direction; |
---|
1339 | 1237 | entry->map_err_type = MAP_ERR_NOT_CHECKED; |
---|
1340 | | - |
---|
1341 | | - if (map_single) |
---|
1342 | | - entry->type = dma_debug_single; |
---|
1343 | 1238 | |
---|
1344 | 1239 | check_for_stack(dev, page, offset); |
---|
1345 | 1240 | |
---|
.. | .. |
---|
1351 | 1246 | |
---|
1352 | 1247 | add_dma_entry(entry); |
---|
1353 | 1248 | } |
---|
1354 | | -EXPORT_SYMBOL(debug_dma_map_page); |
---|
1355 | 1249 | |
---|
1356 | 1250 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
1357 | 1251 | { |
---|
.. | .. |
---|
1387 | 1281 | } |
---|
1388 | 1282 | } |
---|
1389 | 1283 | |
---|
1390 | | - put_hash_bucket(bucket, &flags); |
---|
| 1284 | + put_hash_bucket(bucket, flags); |
---|
1391 | 1285 | } |
---|
1392 | 1286 | EXPORT_SYMBOL(debug_dma_mapping_error); |
---|
1393 | 1287 | |
---|
1394 | 1288 | void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, |
---|
1395 | | - size_t size, int direction, bool map_single) |
---|
| 1289 | + size_t size, int direction) |
---|
1396 | 1290 | { |
---|
1397 | 1291 | struct dma_debug_entry ref = { |
---|
1398 | | - .type = dma_debug_page, |
---|
| 1292 | + .type = dma_debug_single, |
---|
1399 | 1293 | .dev = dev, |
---|
1400 | 1294 | .dev_addr = addr, |
---|
1401 | 1295 | .size = size, |
---|
.. | .. |
---|
1404 | 1298 | |
---|
1405 | 1299 | if (unlikely(dma_debug_disabled())) |
---|
1406 | 1300 | return; |
---|
1407 | | - |
---|
1408 | | - if (map_single) |
---|
1409 | | - ref.type = dma_debug_single; |
---|
1410 | | - |
---|
1411 | 1301 | check_unmap(&ref); |
---|
1412 | 1302 | } |
---|
1413 | | -EXPORT_SYMBOL(debug_dma_unmap_page); |
---|
1414 | 1303 | |
---|
1415 | 1304 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, |
---|
1416 | 1305 | int nents, int mapped_ents, int direction) |
---|
.. | .. |
---|
1436 | 1325 | entry->type = dma_debug_sg; |
---|
1437 | 1326 | entry->dev = dev; |
---|
1438 | 1327 | entry->pfn = page_to_pfn(sg_page(s)); |
---|
1439 | | - entry->offset = s->offset, |
---|
| 1328 | + entry->offset = s->offset; |
---|
1440 | 1329 | entry->size = sg_dma_len(s); |
---|
1441 | 1330 | entry->dev_addr = sg_dma_address(s); |
---|
1442 | 1331 | entry->direction = direction; |
---|
.. | .. |
---|
1448 | 1337 | add_dma_entry(entry); |
---|
1449 | 1338 | } |
---|
1450 | 1339 | } |
---|
1451 | | -EXPORT_SYMBOL(debug_dma_map_sg); |
---|
1452 | 1340 | |
---|
1453 | 1341 | static int get_nr_mapped_entries(struct device *dev, |
---|
1454 | 1342 | struct dma_debug_entry *ref) |
---|
.. | .. |
---|
1464 | 1352 | |
---|
1465 | 1353 | if (entry) |
---|
1466 | 1354 | mapped_ents = entry->sg_mapped_ents; |
---|
1467 | | - put_hash_bucket(bucket, &flags); |
---|
| 1355 | + put_hash_bucket(bucket, flags); |
---|
1468 | 1356 | |
---|
1469 | 1357 | return mapped_ents; |
---|
1470 | 1358 | } |
---|
.. | .. |
---|
1500 | 1388 | check_unmap(&ref); |
---|
1501 | 1389 | } |
---|
1502 | 1390 | } |
---|
1503 | | -EXPORT_SYMBOL(debug_dma_unmap_sg); |
---|
1504 | 1391 | |
---|
1505 | 1392 | void debug_dma_alloc_coherent(struct device *dev, size_t size, |
---|
1506 | 1393 | dma_addr_t dma_addr, void *virt) |
---|
.. | .. |
---|
1535 | 1422 | |
---|
1536 | 1423 | add_dma_entry(entry); |
---|
1537 | 1424 | } |
---|
1538 | | -EXPORT_SYMBOL(debug_dma_alloc_coherent); |
---|
1539 | 1425 | |
---|
1540 | 1426 | void debug_dma_free_coherent(struct device *dev, size_t size, |
---|
1541 | 1427 | void *virt, dma_addr_t addr) |
---|
.. | .. |
---|
1563 | 1449 | |
---|
1564 | 1450 | check_unmap(&ref); |
---|
1565 | 1451 | } |
---|
1566 | | -EXPORT_SYMBOL(debug_dma_free_coherent); |
---|
1567 | 1452 | |
---|
1568 | 1453 | void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, |
---|
1569 | 1454 | int direction, dma_addr_t dma_addr) |
---|
.. | .. |
---|
1588 | 1473 | |
---|
1589 | 1474 | add_dma_entry(entry); |
---|
1590 | 1475 | } |
---|
1591 | | -EXPORT_SYMBOL(debug_dma_map_resource); |
---|
1592 | 1476 | |
---|
1593 | 1477 | void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, |
---|
1594 | 1478 | size_t size, int direction) |
---|
.. | .. |
---|
1606 | 1490 | |
---|
1607 | 1491 | check_unmap(&ref); |
---|
1608 | 1492 | } |
---|
1609 | | -EXPORT_SYMBOL(debug_dma_unmap_resource); |
---|
1610 | 1493 | |
---|
1611 | 1494 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
---|
1612 | 1495 | size_t size, int direction) |
---|
.. | .. |
---|
1625 | 1508 | |
---|
1626 | 1509 | check_sync(dev, &ref, true); |
---|
1627 | 1510 | } |
---|
1628 | | -EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); |
---|
1629 | 1511 | |
---|
1630 | 1512 | void debug_dma_sync_single_for_device(struct device *dev, |
---|
1631 | 1513 | dma_addr_t dma_handle, size_t size, |
---|
.. | .. |
---|
1645 | 1527 | |
---|
1646 | 1528 | check_sync(dev, &ref, false); |
---|
1647 | 1529 | } |
---|
1648 | | -EXPORT_SYMBOL(debug_dma_sync_single_for_device); |
---|
1649 | | - |
---|
1650 | | -void debug_dma_sync_single_range_for_cpu(struct device *dev, |
---|
1651 | | - dma_addr_t dma_handle, |
---|
1652 | | - unsigned long offset, size_t size, |
---|
1653 | | - int direction) |
---|
1654 | | -{ |
---|
1655 | | - struct dma_debug_entry ref; |
---|
1656 | | - |
---|
1657 | | - if (unlikely(dma_debug_disabled())) |
---|
1658 | | - return; |
---|
1659 | | - |
---|
1660 | | - ref.type = dma_debug_single; |
---|
1661 | | - ref.dev = dev; |
---|
1662 | | - ref.dev_addr = dma_handle; |
---|
1663 | | - ref.size = offset + size; |
---|
1664 | | - ref.direction = direction; |
---|
1665 | | - ref.sg_call_ents = 0; |
---|
1666 | | - |
---|
1667 | | - check_sync(dev, &ref, true); |
---|
1668 | | -} |
---|
1669 | | -EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); |
---|
1670 | | - |
---|
1671 | | -void debug_dma_sync_single_range_for_device(struct device *dev, |
---|
1672 | | - dma_addr_t dma_handle, |
---|
1673 | | - unsigned long offset, |
---|
1674 | | - size_t size, int direction) |
---|
1675 | | -{ |
---|
1676 | | - struct dma_debug_entry ref; |
---|
1677 | | - |
---|
1678 | | - if (unlikely(dma_debug_disabled())) |
---|
1679 | | - return; |
---|
1680 | | - |
---|
1681 | | - ref.type = dma_debug_single; |
---|
1682 | | - ref.dev = dev; |
---|
1683 | | - ref.dev_addr = dma_handle; |
---|
1684 | | - ref.size = offset + size; |
---|
1685 | | - ref.direction = direction; |
---|
1686 | | - ref.sg_call_ents = 0; |
---|
1687 | | - |
---|
1688 | | - check_sync(dev, &ref, false); |
---|
1689 | | -} |
---|
1690 | | -EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); |
---|
1691 | 1530 | |
---|
1692 | 1531 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
---|
1693 | 1532 | int nelems, int direction) |
---|
.. | .. |
---|
1720 | 1559 | check_sync(dev, &ref, true); |
---|
1721 | 1560 | } |
---|
1722 | 1561 | } |
---|
1723 | | -EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); |
---|
1724 | 1562 | |
---|
1725 | 1563 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
---|
1726 | 1564 | int nelems, int direction) |
---|
.. | .. |
---|
1752 | 1590 | check_sync(dev, &ref, false); |
---|
1753 | 1591 | } |
---|
1754 | 1592 | } |
---|
1755 | | -EXPORT_SYMBOL(debug_dma_sync_sg_for_device); |
---|
1756 | 1593 | |
---|
1757 | 1594 | static int __init dma_debug_driver_setup(char *str) |
---|
1758 | 1595 | { |
---|
.. | .. |
---|
1765 | 1602 | } |
---|
1766 | 1603 | |
---|
1767 | 1604 | if (current_driver_name[0]) |
---|
1768 | | - pr_info("DMA-API: enable driver filter for driver [%s]\n", |
---|
| 1605 | + pr_info("enable driver filter for driver [%s]\n", |
---|
1769 | 1606 | current_driver_name); |
---|
1770 | 1607 | |
---|
1771 | 1608 | |
---|