| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Dynamic DMA mapping support for AMD Hammer. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 5 | 6 | * This allows to use PCI devices that only support 32bit addresses on systems |
|---|
| 6 | 7 | * with more than 4GB. |
|---|
| 7 | 8 | * |
|---|
| 8 | | - * See Documentation/DMA-API-HOWTO.txt for the interface specification. |
|---|
| 9 | + * See Documentation/core-api/dma-api-howto.rst for the interface specification. |
|---|
| 9 | 10 | * |
|---|
| 10 | 11 | * Copyright 2002 Andi Kleen, SuSE Labs. |
|---|
| 11 | | - * Subject to the GNU General Public License v2 only. |
|---|
| 12 | 12 | */ |
|---|
| 13 | 13 | |
|---|
| 14 | 14 | #include <linux/types.h> |
|---|
| .. | .. |
|---|
| 32 | 32 | #include <linux/gfp.h> |
|---|
| 33 | 33 | #include <linux/atomic.h> |
|---|
| 34 | 34 | #include <linux/dma-direct.h> |
|---|
| 35 | +#include <linux/dma-map-ops.h> |
|---|
| 35 | 36 | #include <asm/mtrr.h> |
|---|
| 36 | | -#include <asm/pgtable.h> |
|---|
| 37 | 37 | #include <asm/proto.h> |
|---|
| 38 | 38 | #include <asm/iommu.h> |
|---|
| 39 | 39 | #include <asm/gart.h> |
|---|
| .. | .. |
|---|
| 49 | 49 | static unsigned long iommu_pages; /* .. and in pages */ |
|---|
| 50 | 50 | |
|---|
| 51 | 51 | static u32 *iommu_gatt_base; /* Remapping table */ |
|---|
| 52 | | - |
|---|
| 53 | | -static dma_addr_t bad_dma_addr; |
|---|
| 54 | 52 | |
|---|
| 55 | 53 | /* |
|---|
| 56 | 54 | * If this is disabled the IOMMU will use an optimized flushing strategy |
|---|
| .. | .. |
|---|
| 73 | 71 | #define GPTE_ENCODE(x) \ |
|---|
| 74 | 72 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) |
|---|
| 75 | 73 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) |
|---|
| 76 | | - |
|---|
| 77 | | -#define EMERGENCY_PAGES 32 /* = 128KB */ |
|---|
| 78 | 74 | |
|---|
| 79 | 75 | #ifdef CONFIG_AGP |
|---|
| 80 | 76 | #define AGPEXTERN extern |
|---|
| .. | .. |
|---|
| 101 | 97 | |
|---|
| 102 | 98 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), |
|---|
| 103 | 99 | PAGE_SIZE) >> PAGE_SHIFT; |
|---|
| 104 | | - boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, |
|---|
| 105 | | - PAGE_SIZE) >> PAGE_SHIFT; |
|---|
| 100 | + boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT); |
|---|
| 106 | 101 | |
|---|
| 107 | 102 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
|---|
| 108 | 103 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, |
|---|
| .. | .. |
|---|
| 155 | 150 | |
|---|
| 156 | 151 | #ifdef CONFIG_IOMMU_LEAK |
|---|
| 157 | 152 | /* Debugging aid for drivers that don't free their IOMMU tables */ |
|---|
| 158 | | -static int leak_trace; |
|---|
| 159 | | -static int iommu_leak_pages = 20; |
|---|
| 160 | | - |
|---|
| 161 | 153 | static void dump_leak(void) |
|---|
| 162 | 154 | { |
|---|
| 163 | 155 | static int dump; |
|---|
| .. | .. |
|---|
| 166 | 158 | return; |
|---|
| 167 | 159 | dump = 1; |
|---|
| 168 | 160 | |
|---|
| 169 | | - show_stack(NULL, NULL); |
|---|
| 161 | + show_stack(NULL, NULL, KERN_ERR); |
|---|
| 170 | 162 | debug_dma_dump_mappings(NULL); |
|---|
| 171 | 163 | } |
|---|
| 172 | 164 | #endif |
|---|
| .. | .. |
|---|
| 184 | 176 | */ |
|---|
| 185 | 177 | |
|---|
| 186 | 178 | dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); |
|---|
| 187 | | - |
|---|
| 188 | | - if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
|---|
| 189 | | - if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
|---|
| 190 | | - panic("PCI-DMA: Memory would be corrupted\n"); |
|---|
| 191 | | - if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
|---|
| 192 | | - panic(KERN_ERR |
|---|
| 193 | | - "PCI-DMA: Random memory would be DMAed\n"); |
|---|
| 194 | | - } |
|---|
| 195 | 179 | #ifdef CONFIG_IOMMU_LEAK |
|---|
| 196 | 180 | dump_leak(); |
|---|
| 197 | 181 | #endif |
|---|
| .. | .. |
|---|
| 200 | 184 | static inline int |
|---|
| 201 | 185 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
|---|
| 202 | 186 | { |
|---|
| 203 | | - return force_iommu || !dma_capable(dev, addr, size); |
|---|
| 187 | + return force_iommu || !dma_capable(dev, addr, size, true); |
|---|
| 204 | 188 | } |
|---|
| 205 | 189 | |
|---|
| 206 | 190 | static inline int |
|---|
| 207 | 191 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
|---|
| 208 | 192 | { |
|---|
| 209 | | - return !dma_capable(dev, addr, size); |
|---|
| 193 | + return !dma_capable(dev, addr, size, true); |
|---|
| 210 | 194 | } |
|---|
| 211 | 195 | |
|---|
| 212 | 196 | /* Map a single continuous physical area into the IOMMU. |
|---|
| .. | .. |
|---|
| 220 | 204 | int i; |
|---|
| 221 | 205 | |
|---|
| 222 | 206 | if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) |
|---|
| 223 | | - return bad_dma_addr; |
|---|
| 207 | + return DMA_MAPPING_ERROR; |
|---|
| 224 | 208 | |
|---|
| 225 | 209 | iommu_page = alloc_iommu(dev, npages, align_mask); |
|---|
| 226 | 210 | if (iommu_page == -1) { |
|---|
| .. | .. |
|---|
| 229 | 213 | if (panic_on_overflow) |
|---|
| 230 | 214 | panic("dma_map_area overflow %lu bytes\n", size); |
|---|
| 231 | 215 | iommu_full(dev, size, dir); |
|---|
| 232 | | - return bad_dma_addr; |
|---|
| 216 | + return DMA_MAPPING_ERROR; |
|---|
| 233 | 217 | } |
|---|
| 234 | 218 | |
|---|
| 235 | 219 | for (i = 0; i < npages; i++) { |
|---|
| .. | .. |
|---|
| 247 | 231 | { |
|---|
| 248 | 232 | unsigned long bus; |
|---|
| 249 | 233 | phys_addr_t paddr = page_to_phys(page) + offset; |
|---|
| 250 | | - |
|---|
| 251 | | - if (!dev) |
|---|
| 252 | | - dev = &x86_dma_fallback_dev; |
|---|
| 253 | 234 | |
|---|
| 254 | 235 | if (!need_iommu(dev, paddr, size)) |
|---|
| 255 | 236 | return paddr; |
|---|
| .. | .. |
|---|
| 271 | 252 | int npages; |
|---|
| 272 | 253 | int i; |
|---|
| 273 | 254 | |
|---|
| 274 | | - if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || |
|---|
| 255 | + if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR)) |
|---|
| 256 | + return; |
|---|
| 257 | + |
|---|
| 258 | + /* |
|---|
| 259 | + * This driver will not always use a GART mapping, but might have |
|---|
| 260 | + * created a direct mapping instead. If that is the case there is |
|---|
| 261 | + * nothing to unmap here. |
|---|
| 262 | + */ |
|---|
| 263 | + if (dma_addr < iommu_bus_base || |
|---|
| 275 | 264 | dma_addr >= iommu_bus_base + iommu_size) |
|---|
| 276 | 265 | return; |
|---|
| 277 | 266 | |
|---|
| .. | .. |
|---|
| 315 | 304 | |
|---|
| 316 | 305 | if (nonforced_iommu(dev, addr, s->length)) { |
|---|
| 317 | 306 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
|---|
| 318 | | - if (addr == bad_dma_addr) { |
|---|
| 307 | + if (addr == DMA_MAPPING_ERROR) { |
|---|
| 319 | 308 | if (i > 0) |
|---|
| 320 | 309 | gart_unmap_sg(dev, sg, i, dir, 0); |
|---|
| 321 | 310 | nents = 0; |
|---|
| .. | .. |
|---|
| 399 | 388 | if (nents == 0) |
|---|
| 400 | 389 | return 0; |
|---|
| 401 | 390 | |
|---|
| 402 | | - if (!dev) |
|---|
| 403 | | - dev = &x86_dma_fallback_dev; |
|---|
| 404 | | - |
|---|
| 405 | 391 | out = 0; |
|---|
| 406 | 392 | start = 0; |
|---|
| 407 | 393 | start_sg = sg; |
|---|
| .. | .. |
|---|
| 471 | 457 | |
|---|
| 472 | 458 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
|---|
| 473 | 459 | for_each_sg(sg, s, nents, i) |
|---|
| 474 | | - s->dma_address = bad_dma_addr; |
|---|
| 460 | + s->dma_address = DMA_MAPPING_ERROR; |
|---|
| 475 | 461 | return 0; |
|---|
| 476 | 462 | } |
|---|
| 477 | 463 | |
|---|
| .. | .. |
|---|
| 490 | 476 | *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size, |
|---|
| 491 | 477 | DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1); |
|---|
| 492 | 478 | flush_gart(); |
|---|
| 493 | | - if (unlikely(*dma_addr == bad_dma_addr)) |
|---|
| 479 | + if (unlikely(*dma_addr == DMA_MAPPING_ERROR)) |
|---|
| 494 | 480 | goto out_free; |
|---|
| 495 | 481 | return vaddr; |
|---|
| 496 | 482 | out_free: |
|---|
| .. | .. |
|---|
| 505 | 491 | { |
|---|
| 506 | 492 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); |
|---|
| 507 | 493 | dma_direct_free(dev, size, vaddr, dma_addr, attrs); |
|---|
| 508 | | -} |
|---|
| 509 | | - |
|---|
| 510 | | -static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) |
|---|
| 511 | | -{ |
|---|
| 512 | | - return (dma_addr == bad_dma_addr); |
|---|
| 513 | 494 | } |
|---|
| 514 | 495 | |
|---|
| 515 | 496 | static int no_agp; |
|---|
| .. | .. |
|---|
| 528 | 509 | iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; |
|---|
| 529 | 510 | |
|---|
| 530 | 511 | if (iommu_size < 64*1024*1024) { |
|---|
| 531 | | - pr_warning( |
|---|
| 532 | | - "PCI-DMA: Warning: Small IOMMU %luMB." |
|---|
| 512 | + pr_warn("PCI-DMA: Warning: Small IOMMU %luMB." |
|---|
| 533 | 513 | " Consider increasing the AGP aperture in BIOS\n", |
|---|
| 534 | | - iommu_size >> 20); |
|---|
| 514 | + iommu_size >> 20); |
|---|
| 535 | 515 | } |
|---|
| 536 | 516 | |
|---|
| 537 | 517 | return iommu_size; |
|---|
| .. | .. |
|---|
| 683 | 663 | |
|---|
| 684 | 664 | nommu: |
|---|
| 685 | 665 | /* Should not happen anymore */ |
|---|
| 686 | | - pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n" |
|---|
| 687 | | - "falling back to iommu=soft.\n"); |
|---|
| 666 | + pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n"); |
|---|
| 688 | 667 | return -1; |
|---|
| 689 | 668 | } |
|---|
| 690 | 669 | |
|---|
| .. | .. |
|---|
| 695 | 674 | .unmap_page = gart_unmap_page, |
|---|
| 696 | 675 | .alloc = gart_alloc_coherent, |
|---|
| 697 | 676 | .free = gart_free_coherent, |
|---|
| 698 | | - .mapping_error = gart_mapping_error, |
|---|
| 677 | + .mmap = dma_common_mmap, |
|---|
| 678 | + .get_sgtable = dma_common_get_sgtable, |
|---|
| 699 | 679 | .dma_supported = dma_direct_supported, |
|---|
| 680 | + .get_required_mask = dma_direct_get_required_mask, |
|---|
| 681 | + .alloc_pages = dma_direct_alloc_pages, |
|---|
| 682 | + .free_pages = dma_direct_free_pages, |
|---|
| 700 | 683 | }; |
|---|
| 701 | 684 | |
|---|
| 702 | 685 | static void gart_iommu_shutdown(void) |
|---|
| .. | .. |
|---|
| 730 | 713 | unsigned long aper_base, aper_size; |
|---|
| 731 | 714 | unsigned long start_pfn, end_pfn; |
|---|
| 732 | 715 | unsigned long scratch; |
|---|
| 733 | | - long i; |
|---|
| 734 | 716 | |
|---|
| 735 | 717 | if (!amd_nb_has_feature(AMD_NB_GART)) |
|---|
| 736 | 718 | return 0; |
|---|
| .. | .. |
|---|
| 750 | 732 | !gart_iommu_aperture || |
|---|
| 751 | 733 | (no_agp && init_amd_gatt(&info) < 0)) { |
|---|
| 752 | 734 | if (max_pfn > MAX_DMA32_PFN) { |
|---|
| 753 | | - pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); |
|---|
| 754 | | - pr_warning("falling back to iommu=soft.\n"); |
|---|
| 735 | + pr_warn("More than 4GB of memory but GART IOMMU not available.\n"); |
|---|
| 736 | + pr_warn("falling back to iommu=soft.\n"); |
|---|
| 755 | 737 | } |
|---|
| 756 | 738 | return 0; |
|---|
| 757 | 739 | } |
|---|
| .. | .. |
|---|
| 763 | 745 | |
|---|
| 764 | 746 | start_pfn = PFN_DOWN(aper_base); |
|---|
| 765 | 747 | if (!pfn_range_is_mapped(start_pfn, end_pfn)) |
|---|
| 766 | | - init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); |
|---|
| 748 | + init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT, |
|---|
| 749 | + PAGE_KERNEL); |
|---|
| 767 | 750 | |
|---|
| 768 | 751 | pr_info("PCI-DMA: using GART IOMMU.\n"); |
|---|
| 769 | 752 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
|---|
| .. | .. |
|---|
| 774 | 757 | if (!iommu_gart_bitmap) |
|---|
| 775 | 758 | panic("Cannot allocate iommu bitmap\n"); |
|---|
| 776 | 759 | |
|---|
| 777 | | -#ifdef CONFIG_IOMMU_LEAK |
|---|
| 778 | | - if (leak_trace) { |
|---|
| 779 | | - int ret; |
|---|
| 780 | | - |
|---|
| 781 | | - ret = dma_debug_resize_entries(iommu_pages); |
|---|
| 782 | | - if (ret) |
|---|
| 783 | | - pr_debug("PCI-DMA: Cannot trace all the entries\n"); |
|---|
| 784 | | - } |
|---|
| 785 | | -#endif |
|---|
| 786 | | - |
|---|
| 787 | | - /* |
|---|
| 788 | | - * Out of IOMMU space handling. |
|---|
| 789 | | - * Reserve some invalid pages at the beginning of the GART. |
|---|
| 790 | | - */ |
|---|
| 791 | | - bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES); |
|---|
| 792 | | - |
|---|
| 793 | 760 | pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", |
|---|
| 794 | 761 | iommu_size >> 20); |
|---|
| 795 | 762 | |
|---|
| 796 | 763 | agp_memory_reserved = iommu_size; |
|---|
| 797 | 764 | iommu_start = aper_size - iommu_size; |
|---|
| 798 | 765 | iommu_bus_base = info.aper_base + iommu_start; |
|---|
| 799 | | - bad_dma_addr = iommu_bus_base; |
|---|
| 800 | 766 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); |
|---|
| 801 | 767 | |
|---|
| 802 | 768 | /* |
|---|
| .. | .. |
|---|
| 838 | 804 | if (!scratch) |
|---|
| 839 | 805 | panic("Cannot allocate iommu scratch page"); |
|---|
| 840 | 806 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); |
|---|
| 841 | | - for (i = EMERGENCY_PAGES; i < iommu_pages; i++) |
|---|
| 842 | | - iommu_gatt_base[i] = gart_unmapped_entry; |
|---|
| 843 | 807 | |
|---|
| 844 | 808 | flush_gart(); |
|---|
| 845 | 809 | dma_ops = &gart_dma_ops; |
|---|
| .. | .. |
|---|
| 853 | 817 | { |
|---|
| 854 | 818 | int arg; |
|---|
| 855 | 819 | |
|---|
| 856 | | -#ifdef CONFIG_IOMMU_LEAK |
|---|
| 857 | | - if (!strncmp(p, "leak", 4)) { |
|---|
| 858 | | - leak_trace = 1; |
|---|
| 859 | | - p += 4; |
|---|
| 860 | | - if (*p == '=') |
|---|
| 861 | | - ++p; |
|---|
| 862 | | - if (isdigit(*p) && get_option(&p, &arg)) |
|---|
| 863 | | - iommu_leak_pages = arg; |
|---|
| 864 | | - } |
|---|
| 865 | | -#endif |
|---|
| 866 | 820 | if (isdigit(*p) && get_option(&p, &arg)) |
|---|
| 867 | 821 | iommu_size = arg; |
|---|
| 868 | 822 | if (!strncmp(p, "fullflush", 9)) |
|---|