.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | ** IA64 System Bus Adapter (SBA) I/O MMU manager |
---|
3 | 4 | ** |
---|
.. | .. |
---|
8 | 9 | ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) |
---|
9 | 10 | ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) |
---|
10 | 11 | ** |
---|
11 | | -** This program is free software; you can redistribute it and/or modify |
---|
12 | | -** it under the terms of the GNU General Public License as published by |
---|
13 | | -** the Free Software Foundation; either version 2 of the License, or |
---|
14 | | -** (at your option) any later version. |
---|
15 | 12 | ** |
---|
16 | 13 | ** |
---|
17 | 14 | ** This module initializes the IOC (I/O Controller) found on HP |
---|
.. | .. |
---|
36 | 33 | #include <linux/bitops.h> /* hweight64() */ |
---|
37 | 34 | #include <linux/crash_dump.h> |
---|
38 | 35 | #include <linux/iommu-helper.h> |
---|
39 | | -#include <linux/dma-mapping.h> |
---|
| 36 | +#include <linux/dma-map-ops.h> |
---|
40 | 37 | #include <linux/prefetch.h> |
---|
| 38 | +#include <linux/swiotlb.h> |
---|
41 | 39 | |
---|
42 | 40 | #include <asm/delay.h> /* ia64_get_itc() */ |
---|
43 | 41 | #include <asm/io.h> |
---|
.. | .. |
---|
45 | 43 | #include <asm/dma.h> |
---|
46 | 44 | |
---|
47 | 45 | #include <asm/acpi-ext.h> |
---|
48 | | - |
---|
49 | | -extern int swiotlb_late_init_with_default_size (size_t size); |
---|
50 | 46 | |
---|
51 | 47 | #define PFX "IOC: " |
---|
52 | 48 | |
---|
.. | .. |
---|
254 | 250 | static u64 prefetch_spill_page; |
---|
255 | 251 | #endif |
---|
256 | 252 | |
---|
257 | | -#ifdef CONFIG_PCI |
---|
258 | | -# define GET_IOC(dev) ((dev_is_pci(dev)) \ |
---|
| 253 | +#define GET_IOC(dev) ((dev_is_pci(dev)) \ |
---|
259 | 254 | ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) |
---|
260 | | -#else |
---|
261 | | -# define GET_IOC(dev) NULL |
---|
262 | | -#endif |
---|
263 | 255 | |
---|
264 | 256 | /* |
---|
265 | 257 | ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up |
---|
.. | .. |
---|
493 | 485 | ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); |
---|
494 | 486 | ASSERT(res_ptr < res_end); |
---|
495 | 487 | |
---|
496 | | - boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1; |
---|
497 | | - boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift; |
---|
| 488 | + boundary_size = dma_get_seg_boundary_nr_pages(dev, iovp_shift); |
---|
498 | 489 | |
---|
499 | 490 | BUG_ON(ioc->ibase & ~iovp_mask); |
---|
500 | 491 | shift = ioc->ibase >> iovp_shift; |
---|
.. | .. |
---|
907 | 898 | } |
---|
908 | 899 | |
---|
909 | 900 | /** |
---|
910 | | - * sba_map_single_attrs - map one buffer and return IOVA for DMA |
---|
| 901 | + * sba_map_page - map one buffer and return IOVA for DMA |
---|
911 | 902 | * @dev: instance of PCI owned by the driver that's asking. |
---|
912 | | - * @addr: driver buffer to map. |
---|
913 | | - * @size: number of bytes to map in driver buffer. |
---|
914 | | - * @dir: R/W or both. |
---|
| 903 | + * @page: page to map |
---|
| 904 | + * @poff: offset into page |
---|
| 905 | + * @size: number of bytes to map |
---|
| 906 | + * @dir: dma direction |
---|
915 | 907 | * @attrs: optional dma attributes |
---|
916 | 908 | * |
---|
917 | | - * See Documentation/DMA-API-HOWTO.txt |
---|
| 909 | + * See Documentation/core-api/dma-api-howto.rst |
---|
918 | 910 | */ |
---|
919 | 911 | static dma_addr_t sba_map_page(struct device *dev, struct page *page, |
---|
920 | 912 | unsigned long poff, size_t size, |
---|
.. | .. |
---|
944 | 936 | ** Device is bit capable of DMA'ing to the buffer... |
---|
945 | 937 | ** just return the PCI address of ptr |
---|
946 | 938 | */ |
---|
947 | | - DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: " |
---|
| 939 | + DBG_BYPASS("sba_map_page() bypass mask/addr: " |
---|
948 | 940 | "0x%lx/0x%lx\n", |
---|
949 | 941 | to_pci_dev(dev)->dma_mask, pci_addr); |
---|
950 | 942 | return pci_addr; |
---|
.. | .. |
---|
966 | 958 | |
---|
967 | 959 | #ifdef ASSERT_PDIR_SANITY |
---|
968 | 960 | spin_lock_irqsave(&ioc->res_lock, flags); |
---|
969 | | - if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()")) |
---|
| 961 | + if (sba_check_pdir(ioc,"Check before sba_map_page()")) |
---|
970 | 962 | panic("Sanity check failed"); |
---|
971 | 963 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
---|
972 | 964 | #endif |
---|
973 | 965 | |
---|
974 | 966 | pide = sba_alloc_range(ioc, dev, size); |
---|
975 | 967 | if (pide < 0) |
---|
976 | | - return 0; |
---|
| 968 | + return DMA_MAPPING_ERROR; |
---|
977 | 969 | |
---|
978 | 970 | iovp = (dma_addr_t) pide << iovp_shift; |
---|
979 | 971 | |
---|
.. | .. |
---|
997 | 989 | /* form complete address */ |
---|
998 | 990 | #ifdef ASSERT_PDIR_SANITY |
---|
999 | 991 | spin_lock_irqsave(&ioc->res_lock, flags); |
---|
1000 | | - sba_check_pdir(ioc,"Check after sba_map_single_attrs()"); |
---|
| 992 | + sba_check_pdir(ioc,"Check after sba_map_page()"); |
---|
1001 | 993 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
---|
1002 | 994 | #endif |
---|
1003 | 995 | return SBA_IOVA(ioc, iovp, offset); |
---|
1004 | | -} |
---|
1005 | | - |
---|
1006 | | -static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, |
---|
1007 | | - size_t size, enum dma_data_direction dir, |
---|
1008 | | - unsigned long attrs) |
---|
1009 | | -{ |
---|
1010 | | - return sba_map_page(dev, virt_to_page(addr), |
---|
1011 | | - (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); |
---|
1012 | 996 | } |
---|
1013 | 997 | |
---|
1014 | 998 | #ifdef ENABLE_MARK_CLEAN |
---|
.. | .. |
---|
1036 | 1020 | #endif |
---|
1037 | 1021 | |
---|
1038 | 1022 | /** |
---|
1039 | | - * sba_unmap_single_attrs - unmap one IOVA and free resources |
---|
| 1023 | + * sba_unmap_page - unmap one IOVA and free resources |
---|
1040 | 1024 | * @dev: instance of PCI owned by the driver that's asking. |
---|
1041 | 1025 | * @iova: IOVA of driver buffer previously mapped. |
---|
1042 | 1026 | * @size: number of bytes mapped in driver buffer. |
---|
1043 | 1027 | * @dir: R/W or both. |
---|
1044 | 1028 | * @attrs: optional dma attributes |
---|
1045 | 1029 | * |
---|
1046 | | - * See Documentation/DMA-API-HOWTO.txt |
---|
| 1030 | + * See Documentation/core-api/dma-api-howto.rst |
---|
1047 | 1031 | */ |
---|
1048 | 1032 | static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
---|
1049 | 1033 | enum dma_data_direction dir, unsigned long attrs) |
---|
.. | .. |
---|
1063 | 1047 | /* |
---|
1064 | 1048 | ** Address does not fall w/in IOVA, must be bypassing |
---|
1065 | 1049 | */ |
---|
1066 | | - DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n", |
---|
| 1050 | + DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n", |
---|
1067 | 1051 | iova); |
---|
1068 | 1052 | |
---|
1069 | 1053 | #ifdef ENABLE_MARK_CLEAN |
---|
.. | .. |
---|
1114 | 1098 | #endif /* DELAYED_RESOURCE_CNT == 0 */ |
---|
1115 | 1099 | } |
---|
1116 | 1100 | |
---|
1117 | | -void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, |
---|
1118 | | - enum dma_data_direction dir, unsigned long attrs) |
---|
1119 | | -{ |
---|
1120 | | - sba_unmap_page(dev, iova, size, dir, attrs); |
---|
1121 | | -} |
---|
1122 | | - |
---|
1123 | 1101 | /** |
---|
1124 | 1102 | * sba_alloc_coherent - allocate/map shared mem for DMA |
---|
1125 | 1103 | * @dev: instance of PCI owned by the driver that's asking. |
---|
1126 | 1104 | * @size: number of bytes mapped in driver buffer. |
---|
1127 | 1105 | * @dma_handle: IOVA of new buffer. |
---|
1128 | 1106 | * |
---|
1129 | | - * See Documentation/DMA-API-HOWTO.txt |
---|
| 1107 | + * See Documentation/core-api/dma-api-howto.rst |
---|
1130 | 1108 | */ |
---|
1131 | 1109 | static void * |
---|
1132 | 1110 | sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
---|
1133 | 1111 | gfp_t flags, unsigned long attrs) |
---|
1134 | 1112 | { |
---|
| 1113 | + struct page *page; |
---|
1135 | 1114 | struct ioc *ioc; |
---|
| 1115 | + int node = -1; |
---|
1136 | 1116 | void *addr; |
---|
1137 | 1117 | |
---|
1138 | 1118 | ioc = GET_IOC(dev); |
---|
1139 | 1119 | ASSERT(ioc); |
---|
1140 | | - |
---|
1141 | 1120 | #ifdef CONFIG_NUMA |
---|
1142 | | - { |
---|
1143 | | - struct page *page; |
---|
1144 | | - |
---|
1145 | | - page = alloc_pages_node(ioc->node, flags, get_order(size)); |
---|
1146 | | - if (unlikely(!page)) |
---|
1147 | | - return NULL; |
---|
1148 | | - |
---|
1149 | | - addr = page_address(page); |
---|
1150 | | - } |
---|
1151 | | -#else |
---|
1152 | | - addr = (void *) __get_free_pages(flags, get_order(size)); |
---|
| 1121 | + node = ioc->node; |
---|
1153 | 1122 | #endif |
---|
1154 | | - if (unlikely(!addr)) |
---|
| 1123 | + |
---|
| 1124 | + page = alloc_pages_node(node, flags, get_order(size)); |
---|
| 1125 | + if (unlikely(!page)) |
---|
1155 | 1126 | return NULL; |
---|
1156 | 1127 | |
---|
| 1128 | + addr = page_address(page); |
---|
1157 | 1129 | memset(addr, 0, size); |
---|
1158 | | - *dma_handle = virt_to_phys(addr); |
---|
| 1130 | + *dma_handle = page_to_phys(page); |
---|
1159 | 1131 | |
---|
1160 | 1132 | #ifdef ALLOW_IOV_BYPASS |
---|
1161 | 1133 | ASSERT(dev->coherent_dma_mask); |
---|
.. | .. |
---|
1174 | 1146 | * If device can't bypass or bypass is disabled, pass the 32bit fake |
---|
1175 | 1147 | * device to map single to get an iova mapping. |
---|
1176 | 1148 | */ |
---|
1177 | | - *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, |
---|
1178 | | - size, 0, 0); |
---|
1179 | | - |
---|
| 1149 | + *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size, |
---|
| 1150 | + DMA_BIDIRECTIONAL, 0); |
---|
| 1151 | + if (dma_mapping_error(dev, *dma_handle)) |
---|
| 1152 | + return NULL; |
---|
1180 | 1153 | return addr; |
---|
1181 | 1154 | } |
---|
1182 | 1155 | |
---|
.. | .. |
---|
1188 | 1161 | * @vaddr: virtual address IOVA of "consistent" buffer. |
---|
1189 | 1162 | * @dma_handler: IO virtual address of "consistent" buffer. |
---|
1190 | 1163 | * |
---|
1191 | | - * See Documentation/DMA-API-HOWTO.txt |
---|
| 1164 | + * See Documentation/core-api/dma-api-howto.rst |
---|
1192 | 1165 | */ |
---|
1193 | 1166 | static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, |
---|
1194 | 1167 | dma_addr_t dma_handle, unsigned long attrs) |
---|
1195 | 1168 | { |
---|
1196 | | - sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); |
---|
| 1169 | + sba_unmap_page(dev, dma_handle, size, 0, 0); |
---|
1197 | 1170 | free_pages((unsigned long) vaddr, get_order(size)); |
---|
1198 | 1171 | } |
---|
1199 | 1172 | |
---|
.. | .. |
---|
1451 | 1424 | * @dir: R/W or both. |
---|
1452 | 1425 | * @attrs: optional dma attributes |
---|
1453 | 1426 | * |
---|
1454 | | - * See Documentation/DMA-API-HOWTO.txt |
---|
| 1427 | + * See Documentation/core-api/dma-api-howto.rst |
---|
1455 | 1428 | */ |
---|
1456 | 1429 | static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, |
---|
1457 | 1430 | int nents, enum dma_data_direction dir, |
---|
.. | .. |
---|
1483 | 1456 | /* Fast path single entry scatterlists. */ |
---|
1484 | 1457 | if (nents == 1) { |
---|
1485 | 1458 | sglist->dma_length = sglist->length; |
---|
1486 | | - sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs); |
---|
| 1459 | + sglist->dma_address = sba_map_page(dev, sg_page(sglist), |
---|
| 1460 | + sglist->offset, sglist->length, dir, attrs); |
---|
| 1461 | + if (dma_mapping_error(dev, sglist->dma_address)) |
---|
| 1462 | + return 0; |
---|
1487 | 1463 | return 1; |
---|
1488 | 1464 | } |
---|
1489 | 1465 | |
---|
.. | .. |
---|
1547 | 1523 | * @dir: R/W or both. |
---|
1548 | 1524 | * @attrs: optional dma attributes |
---|
1549 | 1525 | * |
---|
1550 | | - * See Documentation/DMA-API-HOWTO.txt |
---|
| 1526 | + * See Documentation/core-api/dma-api-howto.rst |
---|
1551 | 1527 | */ |
---|
1552 | 1528 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
---|
1553 | 1529 | int nents, enum dma_data_direction dir, |
---|
.. | .. |
---|
1572 | 1548 | |
---|
1573 | 1549 | while (nents && sglist->dma_length) { |
---|
1574 | 1550 | |
---|
1575 | | - sba_unmap_single_attrs(dev, sglist->dma_address, |
---|
1576 | | - sglist->dma_length, dir, attrs); |
---|
| 1551 | + sba_unmap_page(dev, sglist->dma_address, sglist->dma_length, |
---|
| 1552 | + dir, attrs); |
---|
1577 | 1553 | sglist = sg_next(sglist); |
---|
1578 | 1554 | nents--; |
---|
1579 | 1555 | } |
---|
.. | .. |
---|
1759 | 1735 | controller->iommu = ioc; |
---|
1760 | 1736 | sac->sysdata = controller; |
---|
1761 | 1737 | sac->dma_mask = 0xFFFFFFFFUL; |
---|
1762 | | -#ifdef CONFIG_PCI |
---|
1763 | 1738 | sac->dev.bus = &pci_bus_type; |
---|
1764 | | -#endif |
---|
1765 | 1739 | ioc->sac_only_dev = sac; |
---|
1766 | 1740 | } |
---|
1767 | 1741 | |
---|
.. | .. |
---|
2080 | 2054 | /* This has to run before acpi_scan_init(). */ |
---|
2081 | 2055 | arch_initcall(acpi_sba_ioc_init_acpi); |
---|
2082 | 2056 | |
---|
2083 | | -extern const struct dma_map_ops swiotlb_dma_ops; |
---|
| 2057 | +static int sba_dma_supported (struct device *dev, u64 mask) |
---|
| 2058 | +{ |
---|
| 2059 | + /* make sure it's at least 32bit capable */ |
---|
| 2060 | + return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); |
---|
| 2061 | +} |
---|
| 2062 | + |
---|
| 2063 | +static const struct dma_map_ops sba_dma_ops = { |
---|
| 2064 | + .alloc = sba_alloc_coherent, |
---|
| 2065 | + .free = sba_free_coherent, |
---|
| 2066 | + .map_page = sba_map_page, |
---|
| 2067 | + .unmap_page = sba_unmap_page, |
---|
| 2068 | + .map_sg = sba_map_sg_attrs, |
---|
| 2069 | + .unmap_sg = sba_unmap_sg_attrs, |
---|
| 2070 | + .dma_supported = sba_dma_supported, |
---|
| 2071 | + .mmap = dma_common_mmap, |
---|
| 2072 | + .get_sgtable = dma_common_get_sgtable, |
---|
| 2073 | + .alloc_pages = dma_common_alloc_pages, |
---|
| 2074 | + .free_pages = dma_common_free_pages, |
---|
| 2075 | +}; |
---|
2084 | 2076 | |
---|
2085 | 2077 | static int __init |
---|
2086 | 2078 | sba_init(void) |
---|
2087 | 2079 | { |
---|
2088 | | - if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) |
---|
2089 | | - return 0; |
---|
2090 | | - |
---|
2091 | | -#if defined(CONFIG_IA64_GENERIC) |
---|
2092 | | - /* If we are booting a kdump kernel, the sba_iommu will |
---|
2093 | | - * cause devices that were not shutdown properly to MCA |
---|
2094 | | - * as soon as they are turned back on. Our only option for |
---|
2095 | | - * a successful kdump kernel boot is to use the swiotlb. |
---|
| 2080 | + /* |
---|
| 2081 | + * If we are booting a kdump kernel, the sba_iommu will cause devices |
---|
| 2082 | + * that were not shutdown properly to MCA as soon as they are turned |
---|
| 2083 | + * back on. Our only option for a successful kdump kernel boot is to |
---|
| 2084 | + * use swiotlb. |
---|
2096 | 2085 | */ |
---|
2097 | | - if (is_kdump_kernel()) { |
---|
2098 | | - dma_ops = &swiotlb_dma_ops; |
---|
2099 | | - if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) |
---|
2100 | | - panic("Unable to initialize software I/O TLB:" |
---|
2101 | | - " Try machvec=dig boot option"); |
---|
2102 | | - machvec_init("dig"); |
---|
| 2086 | + if (is_kdump_kernel()) |
---|
2103 | 2087 | return 0; |
---|
2104 | | - } |
---|
2105 | | -#endif |
---|
2106 | 2088 | |
---|
2107 | 2089 | /* |
---|
2108 | 2090 | * ioc_found should be populated by the acpi_sba_ioc_handler's .attach() |
---|
.. | .. |
---|
2111 | 2093 | while (ioc_found) |
---|
2112 | 2094 | acpi_sba_ioc_add(ioc_found); |
---|
2113 | 2095 | |
---|
2114 | | - if (!ioc_list) { |
---|
2115 | | -#ifdef CONFIG_IA64_GENERIC |
---|
2116 | | - /* |
---|
2117 | | - * If we didn't find something sba_iommu can claim, we |
---|
2118 | | - * need to setup the swiotlb and switch to the dig machvec. |
---|
2119 | | - */ |
---|
2120 | | - dma_ops = &swiotlb_dma_ops; |
---|
2121 | | - if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) |
---|
2122 | | - panic("Unable to find SBA IOMMU or initialize " |
---|
2123 | | - "software I/O TLB: Try machvec=dig boot option"); |
---|
2124 | | - machvec_init("dig"); |
---|
2125 | | -#else |
---|
2126 | | - panic("Unable to find SBA IOMMU: Try a generic or DIG kernel"); |
---|
2127 | | -#endif |
---|
| 2096 | + if (!ioc_list) |
---|
2128 | 2097 | return 0; |
---|
2129 | | - } |
---|
2130 | 2098 | |
---|
2131 | | -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) |
---|
2132 | | - /* |
---|
2133 | | - * hpzx1_swiotlb needs to have a fairly small swiotlb bounce |
---|
2134 | | - * buffer setup to support devices with smaller DMA masks than |
---|
2135 | | - * sba_iommu can handle. |
---|
2136 | | - */ |
---|
2137 | | - if (ia64_platform_is("hpzx1_swiotlb")) { |
---|
2138 | | - extern void hwsw_init(void); |
---|
2139 | | - |
---|
2140 | | - hwsw_init(); |
---|
2141 | | - } |
---|
2142 | | -#endif |
---|
2143 | | - |
---|
2144 | | -#ifdef CONFIG_PCI |
---|
2145 | 2099 | { |
---|
2146 | 2100 | struct pci_bus *b = NULL; |
---|
2147 | 2101 | while ((b = pci_find_next_bus(b)) != NULL) |
---|
2148 | 2102 | sba_connect_bus(b); |
---|
2149 | 2103 | } |
---|
2150 | | -#endif |
---|
| 2104 | + |
---|
| 2105 | + /* no need for swiotlb with the iommu */ |
---|
| 2106 | + swiotlb_exit(); |
---|
| 2107 | + dma_ops = &sba_dma_ops; |
---|
2151 | 2108 | |
---|
2152 | 2109 | #ifdef CONFIG_PROC_FS |
---|
2153 | 2110 | ioc_proc_init(); |
---|
.. | .. |
---|
2162 | 2119 | { |
---|
2163 | 2120 | reserve_sba_gart = 0; |
---|
2164 | 2121 | return 1; |
---|
2165 | | -} |
---|
2166 | | - |
---|
2167 | | -static int sba_dma_supported (struct device *dev, u64 mask) |
---|
2168 | | -{ |
---|
2169 | | - /* make sure it's at least 32bit capable */ |
---|
2170 | | - return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); |
---|
2171 | | -} |
---|
2172 | | - |
---|
2173 | | -static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
2174 | | -{ |
---|
2175 | | - return 0; |
---|
2176 | 2122 | } |
---|
2177 | 2123 | |
---|
2178 | 2124 | __setup("nosbagart", nosbagart); |
---|
.. | .. |
---|
2199 | 2145 | } |
---|
2200 | 2146 | |
---|
2201 | 2147 | __setup("sbapagesize=",sba_page_override); |
---|
2202 | | - |
---|
2203 | | -const struct dma_map_ops sba_dma_ops = { |
---|
2204 | | - .alloc = sba_alloc_coherent, |
---|
2205 | | - .free = sba_free_coherent, |
---|
2206 | | - .map_page = sba_map_page, |
---|
2207 | | - .unmap_page = sba_unmap_page, |
---|
2208 | | - .map_sg = sba_map_sg_attrs, |
---|
2209 | | - .unmap_sg = sba_unmap_sg_attrs, |
---|
2210 | | - .sync_single_for_cpu = machvec_dma_sync_single, |
---|
2211 | | - .sync_sg_for_cpu = machvec_dma_sync_sg, |
---|
2212 | | - .sync_single_for_device = machvec_dma_sync_single, |
---|
2213 | | - .sync_sg_for_device = machvec_dma_sync_sg, |
---|
2214 | | - .dma_supported = sba_dma_supported, |
---|
2215 | | - .mapping_error = sba_dma_mapping_error, |
---|
2216 | | -}; |
---|
2217 | | - |
---|
2218 | | -void sba_dma_init(void) |
---|
2219 | | -{ |
---|
2220 | | - dma_ops = &sba_dma_ops; |
---|
2221 | | -} |
---|