hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/arch/powerpc/platforms/cell/iommu.c
....@@ -1,23 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * IOMMU implementation for Cell Broadband Processor Architecture
34 *
45 * (C) Copyright IBM Corporation 2006-2008
56 *
67 * Author: Jeremy Kerr <jk@ozlabs.org>
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2, or (at your option)
11
- * any later version.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program; if not, write to the Free Software
20
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
218 */
229
2310 #undef DEBUG
....@@ -499,7 +486,7 @@
499486 window->table.it_size = size >> window->table.it_page_shift;
500487 window->table.it_ops = &cell_iommu_ops;
501488
502
- iommu_init_table(&window->table, iommu->nid);
489
+ iommu_init_table(&window->table, iommu->nid, 0, 0);
503490
504491 pr_debug("\tioid %d\n", window->ioid);
505492 pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
....@@ -544,9 +531,10 @@
544531 static unsigned long cell_dma_nommu_offset;
545532
546533 static unsigned long dma_iommu_fixed_base;
534
+static bool cell_iommu_enabled;
547535
548536 /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */
549
-static int iommu_fixed_is_weak;
537
+bool iommu_fixed_is_weak;
550538
551539 static struct iommu_table *cell_get_iommu_table(struct device *dev)
552540 {
....@@ -568,103 +556,19 @@
568556 return &window->table;
569557 }
570558
571
-/* A coherent allocation implies strong ordering */
572
-
573
-static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
574
- dma_addr_t *dma_handle, gfp_t flag,
575
- unsigned long attrs)
576
-{
577
- if (iommu_fixed_is_weak)
578
- return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
579
- size, dma_handle,
580
- device_to_mask(dev), flag,
581
- dev_to_node(dev));
582
- else
583
- return dma_nommu_ops.alloc(dev, size, dma_handle, flag,
584
- attrs);
585
-}
586
-
587
-static void dma_fixed_free_coherent(struct device *dev, size_t size,
588
- void *vaddr, dma_addr_t dma_handle,
589
- unsigned long attrs)
590
-{
591
- if (iommu_fixed_is_weak)
592
- iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
593
- dma_handle);
594
- else
595
- dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs);
596
-}
597
-
598
-static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
599
- unsigned long offset, size_t size,
600
- enum dma_data_direction direction,
601
- unsigned long attrs)
602
-{
603
- if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
604
- return dma_nommu_ops.map_page(dev, page, offset, size,
605
- direction, attrs);
606
- else
607
- return iommu_map_page(dev, cell_get_iommu_table(dev), page,
608
- offset, size, device_to_mask(dev),
609
- direction, attrs);
610
-}
611
-
612
-static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
613
- size_t size, enum dma_data_direction direction,
614
- unsigned long attrs)
615
-{
616
- if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
617
- dma_nommu_ops.unmap_page(dev, dma_addr, size, direction,
618
- attrs);
619
- else
620
- iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
621
- direction, attrs);
622
-}
623
-
624
-static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
625
- int nents, enum dma_data_direction direction,
626
- unsigned long attrs)
627
-{
628
- if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
629
- return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs);
630
- else
631
- return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
632
- nents, device_to_mask(dev),
633
- direction, attrs);
634
-}
635
-
636
-static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
637
- int nents, enum dma_data_direction direction,
638
- unsigned long attrs)
639
-{
640
- if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
641
- dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs);
642
- else
643
- ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
644
- direction, attrs);
645
-}
646
-
647
-static int dma_suported_and_switch(struct device *dev, u64 dma_mask);
648
-
649
-static const struct dma_map_ops dma_iommu_fixed_ops = {
650
- .alloc = dma_fixed_alloc_coherent,
651
- .free = dma_fixed_free_coherent,
652
- .map_sg = dma_fixed_map_sg,
653
- .unmap_sg = dma_fixed_unmap_sg,
654
- .dma_supported = dma_suported_and_switch,
655
- .map_page = dma_fixed_map_page,
656
- .unmap_page = dma_fixed_unmap_page,
657
- .mapping_error = dma_iommu_mapping_error,
658
-};
559
+static u64 cell_iommu_get_fixed_address(struct device *dev);
659560
660561 static void cell_dma_dev_setup(struct device *dev)
661562 {
662
- if (get_pci_dma_ops() == &dma_iommu_ops)
563
+ if (cell_iommu_enabled) {
564
+ u64 addr = cell_iommu_get_fixed_address(dev);
565
+
566
+ if (addr != OF_BAD_ADDR)
567
+ dev->archdata.dma_offset = addr + dma_iommu_fixed_base;
663568 set_iommu_table_base(dev, cell_get_iommu_table(dev));
664
- else if (get_pci_dma_ops() == &dma_nommu_ops)
665
- set_dma_offset(dev, cell_dma_nommu_offset);
666
- else
667
- BUG();
569
+ } else {
570
+ dev->archdata.dma_offset = cell_dma_nommu_offset;
571
+ }
668572 }
669573
670574 static void cell_pci_dma_dev_setup(struct pci_dev *dev)
....@@ -681,11 +585,9 @@
681585 if (action != BUS_NOTIFY_ADD_DEVICE)
682586 return 0;
683587
684
- /* We use the PCI DMA ops */
685
- dev->dma_ops = get_pci_dma_ops();
686
-
588
+ if (cell_iommu_enabled)
589
+ dev->dma_ops = &dma_iommu_ops;
687590 cell_dma_dev_setup(dev);
688
-
689591 return 0;
690592 }
691593
....@@ -810,7 +712,6 @@
810712 unsigned long base = 0, size;
811713
812714 /* When no iommu is present, we use direct DMA ops */
813
- set_pci_dma_ops(&dma_nommu_ops);
814715
815716 /* First make sure all IOC translation is turned off */
816717 cell_disable_iommus();
....@@ -895,7 +796,11 @@
895796 const u32 *ranges = NULL;
896797 int i, len, best, naddr, nsize, pna, range_size;
897798
799
+ /* We can be called for platform devices that have no of_node */
898800 np = of_node_get(dev->of_node);
801
+ if (!np)
802
+ goto out;
803
+
899804 while (1) {
900805 naddr = of_n_addr_cells(np);
901806 nsize = of_n_size_cells(np);
....@@ -946,27 +851,10 @@
946851 return dev_addr;
947852 }
948853
949
-static int dma_suported_and_switch(struct device *dev, u64 dma_mask)
854
+static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask)
950855 {
951
- if (dma_mask == DMA_BIT_MASK(64) &&
952
- cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) {
953
- u64 addr = cell_iommu_get_fixed_address(dev) +
954
- dma_iommu_fixed_base;
955
- dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
956
- dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
957
- set_dma_ops(dev, &dma_iommu_fixed_ops);
958
- set_dma_offset(dev, addr);
959
- return 1;
960
- }
961
-
962
- if (dma_iommu_dma_supported(dev, dma_mask)) {
963
- dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
964
- set_dma_ops(dev, get_pci_dma_ops());
965
- cell_dma_dev_setup(dev);
966
- return 1;
967
- }
968
-
969
- return 0;
856
+ return mask == DMA_BIT_MASK(64) &&
857
+ cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR;
970858 }
971859
972860 static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
....@@ -1055,7 +943,7 @@
1055943 fbase = max(fbase, dbase + dsize);
1056944 }
1057945
1058
- fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
946
+ fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT);
1059947 fsize = memblock_phys_mem_size();
1060948
1061949 if ((fbase + fsize) <= 0x800000000ul)
....@@ -1075,8 +963,8 @@
1075963 hend = hbase + htab_size_bytes;
1076964
1077965 /* The window must start and end on a segment boundary */
1078
- if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
1079
- (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
966
+ if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) ||
967
+ (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) {
1080968 pr_debug("iommu: hash window not segment aligned\n");
1081969 return -1;
1082970 }
....@@ -1121,9 +1009,8 @@
11211009 cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
11221010 }
11231011
1124
- dma_iommu_ops.dma_supported = dma_suported_and_switch;
1125
- set_pci_dma_ops(&dma_iommu_ops);
1126
-
1012
+ cell_pci_controller_ops.iommu_bypass_supported =
1013
+ cell_pci_iommu_bypass_supported;
11271014 return 0;
11281015 }
11291016
....@@ -1144,33 +1031,13 @@
11441031 pciep = of_find_node_by_type(NULL, "pcie-endpoint");
11451032
11461033 if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
1147
- iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING;
1034
+ iommu_fixed_is_weak = true;
11481035
11491036 of_node_put(pciep);
11501037
11511038 return 1;
11521039 }
11531040 __setup("iommu_fixed=", setup_iommu_fixed);
1154
-
1155
-static u64 cell_dma_get_required_mask(struct device *dev)
1156
-{
1157
- const struct dma_map_ops *dma_ops;
1158
-
1159
- if (!dev->dma_mask)
1160
- return 0;
1161
-
1162
- if (!iommu_fixed_disabled &&
1163
- cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
1164
- return DMA_BIT_MASK(64);
1165
-
1166
- dma_ops = get_dma_ops(dev);
1167
- if (dma_ops->get_required_mask)
1168
- return dma_ops->get_required_mask(dev);
1169
-
1170
- WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
1171
-
1172
- return DMA_BIT_MASK(64);
1173
-}
11741041
11751042 static int __init cell_iommu_init(void)
11761043 {
....@@ -1188,10 +1055,9 @@
11881055
11891056 /* Setup various callbacks */
11901057 cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
1191
- ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
11921058
11931059 if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
1194
- goto bail;
1060
+ goto done;
11951061
11961062 /* Create an iommu for each /axon node. */
11971063 for_each_node_by_name(np, "axon") {
....@@ -1208,10 +1074,10 @@
12081074 continue;
12091075 cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
12101076 }
1211
-
1077
+ done:
12121078 /* Setup default PCI iommu ops */
12131079 set_pci_dma_ops(&dma_iommu_ops);
1214
-
1080
+ cell_iommu_enabled = true;
12151081 bail:
12161082 /* Register callbacks on OF platform device addition/removal
12171083 * to handle linking them to the right DMA operations