| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * IOMMU implementation for Cell Broadband Processor Architecture |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * (C) Copyright IBM Corporation 2006-2008 |
|---|
| 5 | 6 | * |
|---|
| 6 | 7 | * Author: Jeremy Kerr <jk@ozlabs.org> |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 9 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 10 | | - * the Free Software Foundation; either version 2, or (at your option) |
|---|
| 11 | | - * any later version. |
|---|
| 12 | | - * |
|---|
| 13 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 14 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 15 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 16 | | - * GNU General Public License for more details. |
|---|
| 17 | | - * |
|---|
| 18 | | - * You should have received a copy of the GNU General Public License |
|---|
| 19 | | - * along with this program; if not, write to the Free Software |
|---|
| 20 | | - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
|---|
| 21 | 8 | */ |
|---|
| 22 | 9 | |
|---|
| 23 | 10 | #undef DEBUG |
|---|
| .. | .. |
|---|
| 499 | 486 | window->table.it_size = size >> window->table.it_page_shift; |
|---|
| 500 | 487 | window->table.it_ops = &cell_iommu_ops; |
|---|
| 501 | 488 | |
|---|
| 502 | | - iommu_init_table(&window->table, iommu->nid); |
|---|
| 489 | + iommu_init_table(&window->table, iommu->nid, 0, 0); |
|---|
| 503 | 490 | |
|---|
| 504 | 491 | pr_debug("\tioid %d\n", window->ioid); |
|---|
| 505 | 492 | pr_debug("\tblocksize %ld\n", window->table.it_blocksize); |
|---|
| .. | .. |
|---|
| 544 | 531 | static unsigned long cell_dma_nommu_offset; |
|---|
| 545 | 532 | |
|---|
| 546 | 533 | static unsigned long dma_iommu_fixed_base; |
|---|
| 534 | +static bool cell_iommu_enabled; |
|---|
| 547 | 535 | |
|---|
| 548 | 536 | /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ |
|---|
| 549 | | -static int iommu_fixed_is_weak; |
|---|
| 537 | +bool iommu_fixed_is_weak; |
|---|
| 550 | 538 | |
|---|
| 551 | 539 | static struct iommu_table *cell_get_iommu_table(struct device *dev) |
|---|
| 552 | 540 | { |
|---|
| .. | .. |
|---|
| 568 | 556 | return &window->table; |
|---|
| 569 | 557 | } |
|---|
| 570 | 558 | |
|---|
| 571 | | -/* A coherent allocation implies strong ordering */ |
|---|
| 572 | | - |
|---|
| 573 | | -static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, |
|---|
| 574 | | - dma_addr_t *dma_handle, gfp_t flag, |
|---|
| 575 | | - unsigned long attrs) |
|---|
| 576 | | -{ |
|---|
| 577 | | - if (iommu_fixed_is_weak) |
|---|
| 578 | | - return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), |
|---|
| 579 | | - size, dma_handle, |
|---|
| 580 | | - device_to_mask(dev), flag, |
|---|
| 581 | | - dev_to_node(dev)); |
|---|
| 582 | | - else |
|---|
| 583 | | - return dma_nommu_ops.alloc(dev, size, dma_handle, flag, |
|---|
| 584 | | - attrs); |
|---|
| 585 | | -} |
|---|
| 586 | | - |
|---|
| 587 | | -static void dma_fixed_free_coherent(struct device *dev, size_t size, |
|---|
| 588 | | - void *vaddr, dma_addr_t dma_handle, |
|---|
| 589 | | - unsigned long attrs) |
|---|
| 590 | | -{ |
|---|
| 591 | | - if (iommu_fixed_is_weak) |
|---|
| 592 | | - iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, |
|---|
| 593 | | - dma_handle); |
|---|
| 594 | | - else |
|---|
| 595 | | - dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs); |
|---|
| 596 | | -} |
|---|
| 597 | | - |
|---|
| 598 | | -static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, |
|---|
| 599 | | - unsigned long offset, size_t size, |
|---|
| 600 | | - enum dma_data_direction direction, |
|---|
| 601 | | - unsigned long attrs) |
|---|
| 602 | | -{ |
|---|
| 603 | | - if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
|---|
| 604 | | - return dma_nommu_ops.map_page(dev, page, offset, size, |
|---|
| 605 | | - direction, attrs); |
|---|
| 606 | | - else |
|---|
| 607 | | - return iommu_map_page(dev, cell_get_iommu_table(dev), page, |
|---|
| 608 | | - offset, size, device_to_mask(dev), |
|---|
| 609 | | - direction, attrs); |
|---|
| 610 | | -} |
|---|
| 611 | | - |
|---|
| 612 | | -static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, |
|---|
| 613 | | - size_t size, enum dma_data_direction direction, |
|---|
| 614 | | - unsigned long attrs) |
|---|
| 615 | | -{ |
|---|
| 616 | | - if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
|---|
| 617 | | - dma_nommu_ops.unmap_page(dev, dma_addr, size, direction, |
|---|
| 618 | | - attrs); |
|---|
| 619 | | - else |
|---|
| 620 | | - iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size, |
|---|
| 621 | | - direction, attrs); |
|---|
| 622 | | -} |
|---|
| 623 | | - |
|---|
| 624 | | -static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, |
|---|
| 625 | | - int nents, enum dma_data_direction direction, |
|---|
| 626 | | - unsigned long attrs) |
|---|
| 627 | | -{ |
|---|
| 628 | | - if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
|---|
| 629 | | - return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs); |
|---|
| 630 | | - else |
|---|
| 631 | | - return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, |
|---|
| 632 | | - nents, device_to_mask(dev), |
|---|
| 633 | | - direction, attrs); |
|---|
| 634 | | -} |
|---|
| 635 | | - |
|---|
| 636 | | -static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, |
|---|
| 637 | | - int nents, enum dma_data_direction direction, |
|---|
| 638 | | - unsigned long attrs) |
|---|
| 639 | | -{ |
|---|
| 640 | | - if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
|---|
| 641 | | - dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs); |
|---|
| 642 | | - else |
|---|
| 643 | | - ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, |
|---|
| 644 | | - direction, attrs); |
|---|
| 645 | | -} |
|---|
| 646 | | - |
|---|
| 647 | | -static int dma_suported_and_switch(struct device *dev, u64 dma_mask); |
|---|
| 648 | | - |
|---|
| 649 | | -static const struct dma_map_ops dma_iommu_fixed_ops = { |
|---|
| 650 | | - .alloc = dma_fixed_alloc_coherent, |
|---|
| 651 | | - .free = dma_fixed_free_coherent, |
|---|
| 652 | | - .map_sg = dma_fixed_map_sg, |
|---|
| 653 | | - .unmap_sg = dma_fixed_unmap_sg, |
|---|
| 654 | | - .dma_supported = dma_suported_and_switch, |
|---|
| 655 | | - .map_page = dma_fixed_map_page, |
|---|
| 656 | | - .unmap_page = dma_fixed_unmap_page, |
|---|
| 657 | | - .mapping_error = dma_iommu_mapping_error, |
|---|
| 658 | | -}; |
|---|
| 559 | +static u64 cell_iommu_get_fixed_address(struct device *dev); |
|---|
| 659 | 560 | |
|---|
| 660 | 561 | static void cell_dma_dev_setup(struct device *dev) |
|---|
| 661 | 562 | { |
|---|
| 662 | | - if (get_pci_dma_ops() == &dma_iommu_ops) |
|---|
| 563 | + if (cell_iommu_enabled) { |
|---|
| 564 | + u64 addr = cell_iommu_get_fixed_address(dev); |
|---|
| 565 | + |
|---|
| 566 | + if (addr != OF_BAD_ADDR) |
|---|
| 567 | + dev->archdata.dma_offset = addr + dma_iommu_fixed_base; |
|---|
| 663 | 568 | set_iommu_table_base(dev, cell_get_iommu_table(dev)); |
|---|
| 664 | | - else if (get_pci_dma_ops() == &dma_nommu_ops) |
|---|
| 665 | | - set_dma_offset(dev, cell_dma_nommu_offset); |
|---|
| 666 | | - else |
|---|
| 667 | | - BUG(); |
|---|
| 569 | + } else { |
|---|
| 570 | + dev->archdata.dma_offset = cell_dma_nommu_offset; |
|---|
| 571 | + } |
|---|
| 668 | 572 | } |
|---|
| 669 | 573 | |
|---|
| 670 | 574 | static void cell_pci_dma_dev_setup(struct pci_dev *dev) |
|---|
| .. | .. |
|---|
| 681 | 585 | if (action != BUS_NOTIFY_ADD_DEVICE) |
|---|
| 682 | 586 | return 0; |
|---|
| 683 | 587 | |
|---|
| 684 | | - /* We use the PCI DMA ops */ |
|---|
| 685 | | - dev->dma_ops = get_pci_dma_ops(); |
|---|
| 686 | | - |
|---|
| 588 | + if (cell_iommu_enabled) |
|---|
| 589 | + dev->dma_ops = &dma_iommu_ops; |
|---|
| 687 | 590 | cell_dma_dev_setup(dev); |
|---|
| 688 | | - |
|---|
| 689 | 591 | return 0; |
|---|
| 690 | 592 | } |
|---|
| 691 | 593 | |
|---|
| .. | .. |
|---|
| 810 | 712 | unsigned long base = 0, size; |
|---|
| 811 | 713 | |
|---|
| 812 | 714 | /* When no iommu is present, we use direct DMA ops */ |
|---|
| 813 | | - set_pci_dma_ops(&dma_nommu_ops); |
|---|
| 814 | 715 | |
|---|
| 815 | 716 | /* First make sure all IOC translation is turned off */ |
|---|
| 816 | 717 | cell_disable_iommus(); |
|---|
| .. | .. |
|---|
| 895 | 796 | const u32 *ranges = NULL; |
|---|
| 896 | 797 | int i, len, best, naddr, nsize, pna, range_size; |
|---|
| 897 | 798 | |
|---|
| 799 | + /* We can be called for platform devices that have no of_node */ |
|---|
| 898 | 800 | np = of_node_get(dev->of_node); |
|---|
| 801 | + if (!np) |
|---|
| 802 | + goto out; |
|---|
| 803 | + |
|---|
| 899 | 804 | while (1) { |
|---|
| 900 | 805 | naddr = of_n_addr_cells(np); |
|---|
| 901 | 806 | nsize = of_n_size_cells(np); |
|---|
| .. | .. |
|---|
| 946 | 851 | return dev_addr; |
|---|
| 947 | 852 | } |
|---|
| 948 | 853 | |
|---|
| 949 | | -static int dma_suported_and_switch(struct device *dev, u64 dma_mask) |
|---|
| 854 | +static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask) |
|---|
| 950 | 855 | { |
|---|
| 951 | | - if (dma_mask == DMA_BIT_MASK(64) && |
|---|
| 952 | | - cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) { |
|---|
| 953 | | - u64 addr = cell_iommu_get_fixed_address(dev) + |
|---|
| 954 | | - dma_iommu_fixed_base; |
|---|
| 955 | | - dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); |
|---|
| 956 | | - dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); |
|---|
| 957 | | - set_dma_ops(dev, &dma_iommu_fixed_ops); |
|---|
| 958 | | - set_dma_offset(dev, addr); |
|---|
| 959 | | - return 1; |
|---|
| 960 | | - } |
|---|
| 961 | | - |
|---|
| 962 | | - if (dma_iommu_dma_supported(dev, dma_mask)) { |
|---|
| 963 | | - dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); |
|---|
| 964 | | - set_dma_ops(dev, get_pci_dma_ops()); |
|---|
| 965 | | - cell_dma_dev_setup(dev); |
|---|
| 966 | | - return 1; |
|---|
| 967 | | - } |
|---|
| 968 | | - |
|---|
| 969 | | - return 0; |
|---|
| 856 | + return mask == DMA_BIT_MASK(64) && |
|---|
| 857 | + cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR; |
|---|
| 970 | 858 | } |
|---|
| 971 | 859 | |
|---|
| 972 | 860 | static void insert_16M_pte(unsigned long addr, unsigned long *ptab, |
|---|
| .. | .. |
|---|
| 1055 | 943 | fbase = max(fbase, dbase + dsize); |
|---|
| 1056 | 944 | } |
|---|
| 1057 | 945 | |
|---|
| 1058 | | - fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); |
|---|
| 946 | + fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT); |
|---|
| 1059 | 947 | fsize = memblock_phys_mem_size(); |
|---|
| 1060 | 948 | |
|---|
| 1061 | 949 | if ((fbase + fsize) <= 0x800000000ul) |
|---|
| .. | .. |
|---|
| 1075 | 963 | hend = hbase + htab_size_bytes; |
|---|
| 1076 | 964 | |
|---|
| 1077 | 965 | /* The window must start and end on a segment boundary */ |
|---|
| 1078 | | - if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) || |
|---|
| 1079 | | - (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) { |
|---|
| 966 | + if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) || |
|---|
| 967 | + (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) { |
|---|
| 1080 | 968 | pr_debug("iommu: hash window not segment aligned\n"); |
|---|
| 1081 | 969 | return -1; |
|---|
| 1082 | 970 | } |
|---|
| .. | .. |
|---|
| 1121 | 1009 | cell_iommu_setup_window(iommu, np, dbase, dsize, 0); |
|---|
| 1122 | 1010 | } |
|---|
| 1123 | 1011 | |
|---|
| 1124 | | - dma_iommu_ops.dma_supported = dma_suported_and_switch; |
|---|
| 1125 | | - set_pci_dma_ops(&dma_iommu_ops); |
|---|
| 1126 | | - |
|---|
| 1012 | + cell_pci_controller_ops.iommu_bypass_supported = |
|---|
| 1013 | + cell_pci_iommu_bypass_supported; |
|---|
| 1127 | 1014 | return 0; |
|---|
| 1128 | 1015 | } |
|---|
| 1129 | 1016 | |
|---|
| .. | .. |
|---|
| 1144 | 1031 | pciep = of_find_node_by_type(NULL, "pcie-endpoint"); |
|---|
| 1145 | 1032 | |
|---|
| 1146 | 1033 | if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) |
|---|
| 1147 | | - iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING; |
|---|
| 1034 | + iommu_fixed_is_weak = true; |
|---|
| 1148 | 1035 | |
|---|
| 1149 | 1036 | of_node_put(pciep); |
|---|
| 1150 | 1037 | |
|---|
| 1151 | 1038 | return 1; |
|---|
| 1152 | 1039 | } |
|---|
| 1153 | 1040 | __setup("iommu_fixed=", setup_iommu_fixed); |
|---|
| 1154 | | - |
|---|
| 1155 | | -static u64 cell_dma_get_required_mask(struct device *dev) |
|---|
| 1156 | | -{ |
|---|
| 1157 | | - const struct dma_map_ops *dma_ops; |
|---|
| 1158 | | - |
|---|
| 1159 | | - if (!dev->dma_mask) |
|---|
| 1160 | | - return 0; |
|---|
| 1161 | | - |
|---|
| 1162 | | - if (!iommu_fixed_disabled && |
|---|
| 1163 | | - cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) |
|---|
| 1164 | | - return DMA_BIT_MASK(64); |
|---|
| 1165 | | - |
|---|
| 1166 | | - dma_ops = get_dma_ops(dev); |
|---|
| 1167 | | - if (dma_ops->get_required_mask) |
|---|
| 1168 | | - return dma_ops->get_required_mask(dev); |
|---|
| 1169 | | - |
|---|
| 1170 | | - WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops); |
|---|
| 1171 | | - |
|---|
| 1172 | | - return DMA_BIT_MASK(64); |
|---|
| 1173 | | -} |
|---|
| 1174 | 1041 | |
|---|
| 1175 | 1042 | static int __init cell_iommu_init(void) |
|---|
| 1176 | 1043 | { |
|---|
| .. | .. |
|---|
| 1188 | 1055 | |
|---|
| 1189 | 1056 | /* Setup various callbacks */ |
|---|
| 1190 | 1057 | cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; |
|---|
| 1191 | | - ppc_md.dma_get_required_mask = cell_dma_get_required_mask; |
|---|
| 1192 | 1058 | |
|---|
| 1193 | 1059 | if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) |
|---|
| 1194 | | - goto bail; |
|---|
| 1060 | + goto done; |
|---|
| 1195 | 1061 | |
|---|
| 1196 | 1062 | /* Create an iommu for each /axon node. */ |
|---|
| 1197 | 1063 | for_each_node_by_name(np, "axon") { |
|---|
| .. | .. |
|---|
| 1208 | 1074 | continue; |
|---|
| 1209 | 1075 | cell_iommu_init_one(np, SPIDER_DMA_OFFSET); |
|---|
| 1210 | 1076 | } |
|---|
| 1211 | | - |
|---|
| 1077 | + done: |
|---|
| 1212 | 1078 | /* Setup default PCI iommu ops */ |
|---|
| 1213 | 1079 | set_pci_dma_ops(&dma_iommu_ops); |
|---|
| 1214 | | - |
|---|
| 1080 | + cell_iommu_enabled = true; |
|---|
| 1215 | 1081 | bail: |
|---|
| 1216 | 1082 | /* Register callbacks on OF platform device addition/removal |
|---|
| 1217 | 1083 | * to handle linking them to the right DMA operations |
|---|