| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 6 | 7 | * and Ben. Herrenschmidt, IBM Corporation |
|---|
| 7 | 8 | * |
|---|
| 8 | 9 | * Dynamic DMA mapping support, bus-independent parts. |
|---|
| 9 | | - * |
|---|
| 10 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 11 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 12 | | - * the Free Software Foundation; either version 2 of the License, or |
|---|
| 13 | | - * (at your option) any later version. |
|---|
| 14 | | - * |
|---|
| 15 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 16 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 17 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 18 | | - * GNU General Public License for more details. |
|---|
| 19 | | - * |
|---|
| 20 | | - * You should have received a copy of the GNU General Public License |
|---|
| 21 | | - * along with this program; if not, write to the Free Software |
|---|
| 22 | | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|---|
| 23 | 10 | */ |
|---|
| 24 | 11 | |
|---|
| 25 | 12 | |
|---|
| .. | .. |
|---|
| 47 | 34 | #include <asm/fadump.h> |
|---|
| 48 | 35 | #include <asm/vio.h> |
|---|
| 49 | 36 | #include <asm/tce.h> |
|---|
| 37 | +#include <asm/mmu_context.h> |
|---|
| 50 | 38 | |
|---|
| 51 | 39 | #define DBG(...) |
|---|
| 52 | 40 | |
|---|
| .. | .. |
|---|
| 184 | 172 | int largealloc = npages > 15; |
|---|
| 185 | 173 | int pass = 0; |
|---|
| 186 | 174 | unsigned long align_mask; |
|---|
| 187 | | - unsigned long boundary_size; |
|---|
| 188 | 175 | unsigned long flags; |
|---|
| 189 | 176 | unsigned int pool_nr; |
|---|
| 190 | 177 | struct iommu_pool *pool; |
|---|
| .. | .. |
|---|
| 197 | 184 | if (unlikely(npages == 0)) { |
|---|
| 198 | 185 | if (printk_ratelimit()) |
|---|
| 199 | 186 | WARN_ON(1); |
|---|
| 200 | | - return IOMMU_MAPPING_ERROR; |
|---|
| 187 | + return DMA_MAPPING_ERROR; |
|---|
| 201 | 188 | } |
|---|
| 202 | 189 | |
|---|
| 203 | 190 | if (should_fail_iommu(dev)) |
|---|
| 204 | | - return IOMMU_MAPPING_ERROR; |
|---|
| 191 | + return DMA_MAPPING_ERROR; |
|---|
| 205 | 192 | |
|---|
| 206 | 193 | /* |
|---|
| 207 | 194 | * We don't need to disable preemption here because any CPU can |
|---|
| .. | .. |
|---|
| 248 | 235 | } |
|---|
| 249 | 236 | } |
|---|
| 250 | 237 | |
|---|
| 251 | | - if (dev) |
|---|
| 252 | | - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
|---|
| 253 | | - 1 << tbl->it_page_shift); |
|---|
| 254 | | - else |
|---|
| 255 | | - boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); |
|---|
| 256 | | - /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ |
|---|
| 257 | | - |
|---|
| 258 | 238 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, |
|---|
| 259 | | - boundary_size >> tbl->it_page_shift, align_mask); |
|---|
| 239 | + dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), |
|---|
| 240 | + align_mask); |
|---|
| 260 | 241 | if (n == -1) { |
|---|
| 261 | 242 | if (likely(pass == 0)) { |
|---|
| 262 | 243 | /* First try the pool from the start */ |
|---|
| .. | .. |
|---|
| 277 | 258 | } else { |
|---|
| 278 | 259 | /* Give up */ |
|---|
| 279 | 260 | spin_unlock_irqrestore(&(pool->lock), flags); |
|---|
| 280 | | - return IOMMU_MAPPING_ERROR; |
|---|
| 261 | + return DMA_MAPPING_ERROR; |
|---|
| 281 | 262 | } |
|---|
| 282 | 263 | } |
|---|
| 283 | 264 | |
|---|
| .. | .. |
|---|
| 309 | 290 | unsigned long attrs) |
|---|
| 310 | 291 | { |
|---|
| 311 | 292 | unsigned long entry; |
|---|
| 312 | | - dma_addr_t ret = IOMMU_MAPPING_ERROR; |
|---|
| 293 | + dma_addr_t ret = DMA_MAPPING_ERROR; |
|---|
| 313 | 294 | int build_fail; |
|---|
| 314 | 295 | |
|---|
| 315 | 296 | entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); |
|---|
| 316 | 297 | |
|---|
| 317 | | - if (unlikely(entry == IOMMU_MAPPING_ERROR)) |
|---|
| 318 | | - return IOMMU_MAPPING_ERROR; |
|---|
| 298 | + if (unlikely(entry == DMA_MAPPING_ERROR)) |
|---|
| 299 | + return DMA_MAPPING_ERROR; |
|---|
| 319 | 300 | |
|---|
| 320 | 301 | entry += tbl->it_offset; /* Offset into real TCE table */ |
|---|
| 321 | 302 | ret = entry << tbl->it_page_shift; /* Set the return dma address */ |
|---|
| .. | .. |
|---|
| 327 | 308 | |
|---|
| 328 | 309 | /* tbl->it_ops->set() only returns non-zero for transient errors. |
|---|
| 329 | 310 | * Clean up the table bitmap in this case and return |
|---|
| 330 | | - * IOMMU_MAPPING_ERROR. For all other errors the functionality is |
|---|
| 311 | + * DMA_MAPPING_ERROR. For all other errors the functionality is |
|---|
| 331 | 312 | * not altered. |
|---|
| 332 | 313 | */ |
|---|
| 333 | 314 | if (unlikely(build_fail)) { |
|---|
| 334 | 315 | __iommu_free(tbl, ret, npages); |
|---|
| 335 | | - return IOMMU_MAPPING_ERROR; |
|---|
| 316 | + return DMA_MAPPING_ERROR; |
|---|
| 336 | 317 | } |
|---|
| 337 | 318 | |
|---|
| 338 | 319 | /* Flush/invalidate TLB caches if necessary */ |
|---|
| .. | .. |
|---|
| 477 | 458 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); |
|---|
| 478 | 459 | |
|---|
| 479 | 460 | /* Handle failure */ |
|---|
| 480 | | - if (unlikely(entry == IOMMU_MAPPING_ERROR)) { |
|---|
| 461 | + if (unlikely(entry == DMA_MAPPING_ERROR)) { |
|---|
| 481 | 462 | if (!(attrs & DMA_ATTR_NO_WARN) && |
|---|
| 482 | 463 | printk_ratelimit()) |
|---|
| 483 | 464 | dev_info(dev, "iommu_alloc failed, tbl %p " |
|---|
| .. | .. |
|---|
| 544 | 525 | */ |
|---|
| 545 | 526 | if (outcount < incount) { |
|---|
| 546 | 527 | outs = sg_next(outs); |
|---|
| 547 | | - outs->dma_address = IOMMU_MAPPING_ERROR; |
|---|
| 528 | + outs->dma_address = DMA_MAPPING_ERROR; |
|---|
| 548 | 529 | outs->dma_length = 0; |
|---|
| 549 | 530 | } |
|---|
| 550 | 531 | |
|---|
| .. | .. |
|---|
| 562 | 543 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
|---|
| 563 | 544 | IOMMU_PAGE_SIZE(tbl)); |
|---|
| 564 | 545 | __iommu_free(tbl, vaddr, npages); |
|---|
| 565 | | - s->dma_address = IOMMU_MAPPING_ERROR; |
|---|
| 546 | + s->dma_address = DMA_MAPPING_ERROR; |
|---|
| 566 | 547 | s->dma_length = 0; |
|---|
| 567 | 548 | } |
|---|
| 568 | 549 | if (s == outs) |
|---|
| .. | .. |
|---|
| 645 | 626 | #endif |
|---|
| 646 | 627 | } |
|---|
| 647 | 628 | |
|---|
| 629 | +static void iommu_table_reserve_pages(struct iommu_table *tbl, |
|---|
| 630 | + unsigned long res_start, unsigned long res_end) |
|---|
| 631 | +{ |
|---|
| 632 | + int i; |
|---|
| 633 | + |
|---|
| 634 | + WARN_ON_ONCE(res_end < res_start); |
|---|
| 635 | + /* |
|---|
| 636 | + * Reserve page 0 so it will not be used for any mappings. |
|---|
| 637 | + * This avoids buggy drivers that consider page 0 to be invalid |
|---|
| 638 | + * to crash the machine or even lose data. |
|---|
| 639 | + */ |
|---|
| 640 | + if (tbl->it_offset == 0) |
|---|
| 641 | + set_bit(0, tbl->it_map); |
|---|
| 642 | + |
|---|
| 643 | + tbl->it_reserved_start = res_start; |
|---|
| 644 | + tbl->it_reserved_end = res_end; |
|---|
| 645 | + |
|---|
| 646 | + /* Check if res_start..res_end isn't empty and overlaps the table */ |
|---|
| 647 | + if (res_start && res_end && |
|---|
| 648 | + (tbl->it_offset + tbl->it_size < res_start || |
|---|
| 649 | + res_end < tbl->it_offset)) |
|---|
| 650 | + return; |
|---|
| 651 | + |
|---|
| 652 | + for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) |
|---|
| 653 | + set_bit(i - tbl->it_offset, tbl->it_map); |
|---|
| 654 | +} |
|---|
| 655 | + |
|---|
| 656 | +static void iommu_table_release_pages(struct iommu_table *tbl) |
|---|
| 657 | +{ |
|---|
| 658 | + int i; |
|---|
| 659 | + |
|---|
| 660 | + /* |
|---|
| 661 | + * In case we have reserved the first bit, we should not emit |
|---|
| 662 | + * the warning below. |
|---|
| 663 | + */ |
|---|
| 664 | + if (tbl->it_offset == 0) |
|---|
| 665 | + clear_bit(0, tbl->it_map); |
|---|
| 666 | + |
|---|
| 667 | + for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) |
|---|
| 668 | + clear_bit(i - tbl->it_offset, tbl->it_map); |
|---|
| 669 | +} |
|---|
| 670 | + |
|---|
| 648 | 671 | /* |
|---|
| 649 | 672 | * Build a iommu_table structure. This contains a bit map which |
|---|
| 650 | 673 | * is used to manage allocation of the tce space. |
|---|
| 651 | 674 | */ |
|---|
| 652 | | -struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) |
|---|
| 675 | +struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, |
|---|
| 676 | + unsigned long res_start, unsigned long res_end) |
|---|
| 653 | 677 | { |
|---|
| 654 | 678 | unsigned long sz; |
|---|
| 655 | 679 | static int welcomed = 0; |
|---|
| .. | .. |
|---|
| 668 | 692 | tbl->it_map = page_address(page); |
|---|
| 669 | 693 | memset(tbl->it_map, 0, sz); |
|---|
| 670 | 694 | |
|---|
| 671 | | - /* |
|---|
| 672 | | - * Reserve page 0 so it will not be used for any mappings. |
|---|
| 673 | | - * This avoids buggy drivers that consider page 0 to be invalid |
|---|
| 674 | | - * to crash the machine or even lose data. |
|---|
| 675 | | - */ |
|---|
| 676 | | - if (tbl->it_offset == 0) |
|---|
| 677 | | - set_bit(0, tbl->it_map); |
|---|
| 695 | + iommu_table_reserve_pages(tbl, res_start, res_end); |
|---|
| 678 | 696 | |
|---|
| 679 | 697 | /* We only split the IOMMU table if we have 1GB or more of space */ |
|---|
| 680 | 698 | if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) |
|---|
| .. | .. |
|---|
| 726 | 744 | return; |
|---|
| 727 | 745 | } |
|---|
| 728 | 746 | |
|---|
| 729 | | - /* |
|---|
| 730 | | - * In case we have reserved the first bit, we should not emit |
|---|
| 731 | | - * the warning below. |
|---|
| 732 | | - */ |
|---|
| 733 | | - if (tbl->it_offset == 0) |
|---|
| 734 | | - clear_bit(0, tbl->it_map); |
|---|
| 747 | + iommu_table_release_pages(tbl); |
|---|
| 735 | 748 | |
|---|
| 736 | 749 | /* verify that table contains no entries */ |
|---|
| 737 | 750 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) |
|---|
| .. | .. |
|---|
| 776 | 789 | unsigned long mask, enum dma_data_direction direction, |
|---|
| 777 | 790 | unsigned long attrs) |
|---|
| 778 | 791 | { |
|---|
| 779 | | - dma_addr_t dma_handle = IOMMU_MAPPING_ERROR; |
|---|
| 792 | + dma_addr_t dma_handle = DMA_MAPPING_ERROR; |
|---|
| 780 | 793 | void *vaddr; |
|---|
| 781 | 794 | unsigned long uaddr; |
|---|
| 782 | 795 | unsigned int npages, align; |
|---|
| .. | .. |
|---|
| 796 | 809 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, |
|---|
| 797 | 810 | mask >> tbl->it_page_shift, align, |
|---|
| 798 | 811 | attrs); |
|---|
| 799 | | - if (dma_handle == IOMMU_MAPPING_ERROR) { |
|---|
| 812 | + if (dma_handle == DMA_MAPPING_ERROR) { |
|---|
| 800 | 813 | if (!(attrs & DMA_ATTR_NO_WARN) && |
|---|
| 801 | 814 | printk_ratelimit()) { |
|---|
| 802 | 815 | dev_info(dev, "iommu_alloc failed, tbl %p " |
|---|
| .. | .. |
|---|
| 868 | 881 | io_order = get_iommu_order(size, tbl); |
|---|
| 869 | 882 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
|---|
| 870 | 883 | mask >> tbl->it_page_shift, io_order, 0); |
|---|
| 871 | | - if (mapping == IOMMU_MAPPING_ERROR) { |
|---|
| 884 | + if (mapping == DMA_MAPPING_ERROR) { |
|---|
| 872 | 885 | free_pages((unsigned long)ret, order); |
|---|
| 873 | 886 | return NULL; |
|---|
| 874 | 887 | } |
|---|
| .. | .. |
|---|
| 993 | 1006 | } |
|---|
| 994 | 1007 | EXPORT_SYMBOL_GPL(iommu_tce_check_gpa); |
|---|
| 995 | 1008 | |
|---|
| 996 | | -long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, |
|---|
| 997 | | - unsigned long *hpa, enum dma_data_direction *direction) |
|---|
| 1009 | +extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, |
|---|
| 1010 | + struct iommu_table *tbl, |
|---|
| 1011 | + unsigned long entry, unsigned long *hpa, |
|---|
| 1012 | + enum dma_data_direction *direction) |
|---|
| 998 | 1013 | { |
|---|
| 999 | 1014 | long ret; |
|---|
| 1015 | + unsigned long size = 0; |
|---|
| 1000 | 1016 | |
|---|
| 1001 | | - ret = tbl->it_ops->exchange(tbl, entry, hpa, direction); |
|---|
| 1002 | | - |
|---|
| 1017 | + ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); |
|---|
| 1003 | 1018 | if (!ret && ((*direction == DMA_FROM_DEVICE) || |
|---|
| 1004 | | - (*direction == DMA_BIDIRECTIONAL))) |
|---|
| 1019 | + (*direction == DMA_BIDIRECTIONAL)) && |
|---|
| 1020 | + !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, |
|---|
| 1021 | + &size)) |
|---|
| 1005 | 1022 | SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT)); |
|---|
| 1006 | | - |
|---|
| 1007 | | - /* if (unlikely(ret)) |
|---|
| 1008 | | - pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", |
|---|
| 1009 | | - __func__, hwaddr, entry << tbl->it_page_shift, |
|---|
| 1010 | | - hwaddr, ret); */ |
|---|
| 1011 | 1023 | |
|---|
| 1012 | 1024 | return ret; |
|---|
| 1013 | 1025 | } |
|---|
| 1014 | | -EXPORT_SYMBOL_GPL(iommu_tce_xchg); |
|---|
| 1026 | +EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill); |
|---|
| 1027 | + |
|---|
| 1028 | +void iommu_tce_kill(struct iommu_table *tbl, |
|---|
| 1029 | + unsigned long entry, unsigned long pages) |
|---|
| 1030 | +{ |
|---|
| 1031 | + if (tbl->it_ops->tce_kill) |
|---|
| 1032 | + tbl->it_ops->tce_kill(tbl, entry, pages, false); |
|---|
| 1033 | +} |
|---|
| 1034 | +EXPORT_SYMBOL_GPL(iommu_tce_kill); |
|---|
| 1015 | 1035 | |
|---|
| 1016 | 1036 | int iommu_take_ownership(struct iommu_table *tbl) |
|---|
| 1017 | 1037 | { |
|---|
| .. | .. |
|---|
| 1025 | 1045 | * requires exchange() callback defined so if it is not |
|---|
| 1026 | 1046 | * implemented, we disallow taking ownership over the table. |
|---|
| 1027 | 1047 | */ |
|---|
| 1028 | | - if (!tbl->it_ops->exchange) |
|---|
| 1048 | + if (!tbl->it_ops->xchg_no_kill) |
|---|
| 1029 | 1049 | return -EINVAL; |
|---|
| 1030 | 1050 | |
|---|
| 1031 | 1051 | spin_lock_irqsave(&tbl->large_pool.lock, flags); |
|---|
| 1032 | 1052 | for (i = 0; i < tbl->nr_pools; i++) |
|---|
| 1033 | 1053 | spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); |
|---|
| 1034 | 1054 | |
|---|
| 1035 | | - if (tbl->it_offset == 0) |
|---|
| 1036 | | - clear_bit(0, tbl->it_map); |
|---|
| 1055 | + iommu_table_release_pages(tbl); |
|---|
| 1037 | 1056 | |
|---|
| 1038 | 1057 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) { |
|---|
| 1039 | 1058 | pr_err("iommu_tce: it_map is not empty"); |
|---|
| 1040 | 1059 | ret = -EBUSY; |
|---|
| 1041 | | - /* Restore bit#0 set by iommu_init_table() */ |
|---|
| 1042 | | - if (tbl->it_offset == 0) |
|---|
| 1043 | | - set_bit(0, tbl->it_map); |
|---|
| 1060 | + /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */ |
|---|
| 1061 | + iommu_table_reserve_pages(tbl, tbl->it_reserved_start, |
|---|
| 1062 | + tbl->it_reserved_end); |
|---|
| 1044 | 1063 | } else { |
|---|
| 1045 | 1064 | memset(tbl->it_map, 0xff, sz); |
|---|
| 1046 | 1065 | } |
|---|
| .. | .. |
|---|
| 1063 | 1082 | |
|---|
| 1064 | 1083 | memset(tbl->it_map, 0, sz); |
|---|
| 1065 | 1084 | |
|---|
| 1066 | | - /* Restore bit#0 set by iommu_init_table() */ |
|---|
| 1067 | | - if (tbl->it_offset == 0) |
|---|
| 1068 | | - set_bit(0, tbl->it_map); |
|---|
| 1085 | + iommu_table_reserve_pages(tbl, tbl->it_reserved_start, |
|---|
| 1086 | + tbl->it_reserved_end); |
|---|
| 1069 | 1087 | |
|---|
| 1070 | 1088 | for (i = 0; i < tbl->nr_pools; i++) |
|---|
| 1071 | 1089 | spin_unlock(&tbl->pools[i].lock); |
|---|
| .. | .. |
|---|
| 1073 | 1091 | } |
|---|
| 1074 | 1092 | EXPORT_SYMBOL_GPL(iommu_release_ownership); |
|---|
| 1075 | 1093 | |
|---|
| 1076 | | -int iommu_add_device(struct device *dev) |
|---|
| 1094 | +int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) |
|---|
| 1077 | 1095 | { |
|---|
| 1078 | | - struct iommu_table *tbl; |
|---|
| 1079 | | - struct iommu_table_group_link *tgl; |
|---|
| 1080 | | - |
|---|
| 1081 | 1096 | /* |
|---|
| 1082 | 1097 | * The sysfs entries should be populated before |
|---|
| 1083 | 1098 | * binding IOMMU group. If sysfs entries isn't |
|---|
| .. | .. |
|---|
| 1086 | 1101 | if (!device_is_registered(dev)) |
|---|
| 1087 | 1102 | return -ENOENT; |
|---|
| 1088 | 1103 | |
|---|
| 1089 | | - if (dev->iommu_group) { |
|---|
| 1104 | + if (device_iommu_mapped(dev)) { |
|---|
| 1090 | 1105 | pr_debug("%s: Skipping device %s with iommu group %d\n", |
|---|
| 1091 | 1106 | __func__, dev_name(dev), |
|---|
| 1092 | 1107 | iommu_group_id(dev->iommu_group)); |
|---|
| 1093 | 1108 | return -EBUSY; |
|---|
| 1094 | 1109 | } |
|---|
| 1095 | 1110 | |
|---|
| 1096 | | - tbl = get_iommu_table_base(dev); |
|---|
| 1097 | | - if (!tbl) { |
|---|
| 1098 | | - pr_debug("%s: Skipping device %s with no tbl\n", |
|---|
| 1099 | | - __func__, dev_name(dev)); |
|---|
| 1100 | | - return 0; |
|---|
| 1101 | | - } |
|---|
| 1102 | | - |
|---|
| 1103 | | - tgl = list_first_entry_or_null(&tbl->it_group_list, |
|---|
| 1104 | | - struct iommu_table_group_link, next); |
|---|
| 1105 | | - if (!tgl) { |
|---|
| 1106 | | - pr_debug("%s: Skipping device %s with no group\n", |
|---|
| 1107 | | - __func__, dev_name(dev)); |
|---|
| 1108 | | - return 0; |
|---|
| 1109 | | - } |
|---|
| 1110 | 1111 | pr_debug("%s: Adding %s to iommu group %d\n", |
|---|
| 1111 | | - __func__, dev_name(dev), |
|---|
| 1112 | | - iommu_group_id(tgl->table_group->group)); |
|---|
| 1112 | + __func__, dev_name(dev), iommu_group_id(table_group->group)); |
|---|
| 1113 | 1113 | |
|---|
| 1114 | | - if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) { |
|---|
| 1115 | | - pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n", |
|---|
| 1116 | | - __func__, IOMMU_PAGE_SIZE(tbl), |
|---|
| 1117 | | - PAGE_SIZE, dev_name(dev)); |
|---|
| 1118 | | - return -EINVAL; |
|---|
| 1119 | | - } |
|---|
| 1120 | | - |
|---|
| 1121 | | - return iommu_group_add_device(tgl->table_group->group, dev); |
|---|
| 1114 | + return iommu_group_add_device(table_group->group, dev); |
|---|
| 1122 | 1115 | } |
|---|
| 1123 | 1116 | EXPORT_SYMBOL_GPL(iommu_add_device); |
|---|
| 1124 | 1117 | |
|---|
| .. | .. |
|---|
| 1129 | 1122 | * and we needn't detach them from the associated |
|---|
| 1130 | 1123 | * IOMMU groups |
|---|
| 1131 | 1124 | */ |
|---|
| 1132 | | - if (!dev->iommu_group) { |
|---|
| 1125 | + if (!device_iommu_mapped(dev)) { |
|---|
| 1133 | 1126 | pr_debug("iommu_tce: skipping device %s with no tbl\n", |
|---|
| 1134 | 1127 | dev_name(dev)); |
|---|
| 1135 | 1128 | return; |
|---|
| .. | .. |
|---|
| 1138 | 1131 | iommu_group_remove_device(dev); |
|---|
| 1139 | 1132 | } |
|---|
| 1140 | 1133 | EXPORT_SYMBOL_GPL(iommu_del_device); |
|---|
| 1141 | | - |
|---|
| 1142 | | -static int tce_iommu_bus_notifier(struct notifier_block *nb, |
|---|
| 1143 | | - unsigned long action, void *data) |
|---|
| 1144 | | -{ |
|---|
| 1145 | | - struct device *dev = data; |
|---|
| 1146 | | - |
|---|
| 1147 | | - switch (action) { |
|---|
| 1148 | | - case BUS_NOTIFY_ADD_DEVICE: |
|---|
| 1149 | | - return iommu_add_device(dev); |
|---|
| 1150 | | - case BUS_NOTIFY_DEL_DEVICE: |
|---|
| 1151 | | - if (dev->iommu_group) |
|---|
| 1152 | | - iommu_del_device(dev); |
|---|
| 1153 | | - return 0; |
|---|
| 1154 | | - default: |
|---|
| 1155 | | - return 0; |
|---|
| 1156 | | - } |
|---|
| 1157 | | -} |
|---|
| 1158 | | - |
|---|
| 1159 | | -static struct notifier_block tce_iommu_bus_nb = { |
|---|
| 1160 | | - .notifier_call = tce_iommu_bus_notifier, |
|---|
| 1161 | | -}; |
|---|
| 1162 | | - |
|---|
| 1163 | | -int __init tce_iommu_bus_notifier_init(void) |
|---|
| 1164 | | -{ |
|---|
| 1165 | | - bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); |
|---|
| 1166 | | - return 0; |
|---|
| 1167 | | -} |
|---|
| 1168 | 1134 | #endif /* CONFIG_IOMMU_API */ |
|---|