.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | | - * IOMMU API for GART in Tegra20 |
---|
| 3 | + * IOMMU API for Graphics Address Relocation Table on Tegra20 |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved. |
---|
5 | 6 | * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify it |
---|
7 | | - * under the terms and conditions of the GNU General Public License, |
---|
8 | | - * version 2, as published by the Free Software Foundation. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
---|
11 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
12 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
13 | | - * more details. |
---|
14 | | - * |
---|
15 | | - * You should have received a copy of the GNU General Public License along with |
---|
16 | | - * this program; if not, write to the Free Software Foundation, Inc., |
---|
17 | | - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
---|
| 7 | + * Author: Hiroshi DOYU <hdoyu@nvidia.com> |
---|
18 | 8 | */ |
---|
19 | 9 | |
---|
20 | | -#define pr_fmt(fmt) "%s(): " fmt, __func__ |
---|
| 10 | +#define dev_fmt(fmt) "gart: " fmt |
---|
21 | 11 | |
---|
22 | | -#include <linux/module.h> |
---|
23 | | -#include <linux/platform_device.h> |
---|
24 | | -#include <linux/spinlock.h> |
---|
25 | | -#include <linux/slab.h> |
---|
26 | | -#include <linux/vmalloc.h> |
---|
27 | | -#include <linux/mm.h> |
---|
28 | | -#include <linux/list.h> |
---|
29 | | -#include <linux/device.h> |
---|
30 | 12 | #include <linux/io.h> |
---|
31 | 13 | #include <linux/iommu.h> |
---|
32 | | -#include <linux/of.h> |
---|
| 14 | +#include <linux/moduleparam.h> |
---|
| 15 | +#include <linux/platform_device.h> |
---|
| 16 | +#include <linux/slab.h> |
---|
| 17 | +#include <linux/spinlock.h> |
---|
| 18 | +#include <linux/vmalloc.h> |
---|
33 | 19 | |
---|
34 | | -#include <asm/cacheflush.h> |
---|
35 | | - |
---|
36 | | -/* bitmap of the page sizes currently supported */ |
---|
37 | | -#define GART_IOMMU_PGSIZES (SZ_4K) |
---|
| 20 | +#include <soc/tegra/mc.h> |
---|
38 | 21 | |
---|
39 | 22 | #define GART_REG_BASE 0x24 |
---|
40 | 23 | #define GART_CONFIG (0x24 - GART_REG_BASE) |
---|
41 | 24 | #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE) |
---|
42 | 25 | #define GART_ENTRY_DATA (0x2c - GART_REG_BASE) |
---|
43 | | -#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31) |
---|
| 26 | + |
---|
| 27 | +#define GART_ENTRY_PHYS_ADDR_VALID BIT(31) |
---|
44 | 28 | |
---|
45 | 29 | #define GART_PAGE_SHIFT 12 |
---|
46 | 30 | #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT) |
---|
47 | | -#define GART_PAGE_MASK \ |
---|
48 | | - (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID) |
---|
| 31 | +#define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT) |
---|
49 | 32 | |
---|
50 | | -struct gart_client { |
---|
51 | | - struct device *dev; |
---|
52 | | - struct list_head list; |
---|
53 | | -}; |
---|
| 33 | +/* bitmap of the page sizes currently supported */ |
---|
| 34 | +#define GART_IOMMU_PGSIZES (GART_PAGE_SIZE) |
---|
54 | 35 | |
---|
55 | 36 | struct gart_device { |
---|
56 | 37 | void __iomem *regs; |
---|
57 | 38 | u32 *savedata; |
---|
58 | | - u32 page_count; /* total remappable size */ |
---|
59 | | - dma_addr_t iovmm_base; /* offset to vmm_area */ |
---|
| 39 | + unsigned long iovmm_base; /* offset to vmm_area start */ |
---|
| 40 | + unsigned long iovmm_end; /* offset to vmm_area end */ |
---|
60 | 41 | spinlock_t pte_lock; /* for pagetable */ |
---|
61 | | - struct list_head client; |
---|
62 | | - spinlock_t client_lock; /* for client list */ |
---|
63 | | - struct device *dev; |
---|
64 | | - |
---|
| 42 | + spinlock_t dom_lock; /* for active domain */ |
---|
| 43 | + unsigned int active_devices; /* number of active devices */ |
---|
| 44 | + struct iommu_domain *active_domain; /* current active domain */ |
---|
65 | 45 | struct iommu_device iommu; /* IOMMU Core handle */ |
---|
66 | | -}; |
---|
67 | | - |
---|
68 | | -struct gart_domain { |
---|
69 | | - struct iommu_domain domain; /* generic domain handle */ |
---|
70 | | - struct gart_device *gart; /* link to gart device */ |
---|
| 46 | + struct device *dev; |
---|
71 | 47 | }; |
---|
72 | 48 | |
---|
73 | 49 | static struct gart_device *gart_handle; /* unique for a system */ |
---|
74 | 50 | |
---|
75 | 51 | static bool gart_debug; |
---|
76 | 52 | |
---|
77 | | -#define GART_PTE(_pfn) \ |
---|
78 | | - (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) |
---|
79 | | - |
---|
80 | | -static struct gart_domain *to_gart_domain(struct iommu_domain *dom) |
---|
81 | | -{ |
---|
82 | | - return container_of(dom, struct gart_domain, domain); |
---|
83 | | -} |
---|
84 | | - |
---|
85 | 53 | /* |
---|
86 | 54 | * Any interaction between any block on PPSB and a block on APB or AHB |
---|
87 | 55 | * must have these read-back to ensure the APB/AHB bus transaction is |
---|
88 | 56 | * complete before initiating activity on the PPSB block. |
---|
89 | 57 | */ |
---|
90 | | -#define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG)) |
---|
| 58 | +#define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG) |
---|
91 | 59 | |
---|
92 | 60 | #define for_each_gart_pte(gart, iova) \ |
---|
93 | 61 | for (iova = gart->iovmm_base; \ |
---|
94 | | - iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \ |
---|
| 62 | + iova < gart->iovmm_end; \ |
---|
95 | 63 | iova += GART_PAGE_SIZE) |
---|
96 | 64 | |
---|
97 | 65 | static inline void gart_set_pte(struct gart_device *gart, |
---|
98 | | - unsigned long offs, u32 pte) |
---|
| 66 | + unsigned long iova, unsigned long pte) |
---|
99 | 67 | { |
---|
100 | | - writel(offs, gart->regs + GART_ENTRY_ADDR); |
---|
101 | | - writel(pte, gart->regs + GART_ENTRY_DATA); |
---|
102 | | - |
---|
103 | | - dev_dbg(gart->dev, "%s %08lx:%08x\n", |
---|
104 | | - pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK); |
---|
| 68 | + writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); |
---|
| 69 | + writel_relaxed(pte, gart->regs + GART_ENTRY_DATA); |
---|
105 | 70 | } |
---|
106 | 71 | |
---|
107 | 72 | static inline unsigned long gart_read_pte(struct gart_device *gart, |
---|
108 | | - unsigned long offs) |
---|
| 73 | + unsigned long iova) |
---|
109 | 74 | { |
---|
110 | 75 | unsigned long pte; |
---|
111 | 76 | |
---|
112 | | - writel(offs, gart->regs + GART_ENTRY_ADDR); |
---|
113 | | - pte = readl(gart->regs + GART_ENTRY_DATA); |
---|
| 77 | + writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); |
---|
| 78 | + pte = readl_relaxed(gart->regs + GART_ENTRY_DATA); |
---|
114 | 79 | |
---|
115 | 80 | return pte; |
---|
116 | 81 | } |
---|
.. | .. |
---|
122 | 87 | for_each_gart_pte(gart, iova) |
---|
123 | 88 | gart_set_pte(gart, iova, data ? *(data++) : 0); |
---|
124 | 89 | |
---|
125 | | - writel(1, gart->regs + GART_CONFIG); |
---|
| 90 | + writel_relaxed(1, gart->regs + GART_CONFIG); |
---|
126 | 91 | FLUSH_GART_REGS(gart); |
---|
127 | 92 | } |
---|
128 | 93 | |
---|
129 | | -#ifdef DEBUG |
---|
130 | | -static void gart_dump_table(struct gart_device *gart) |
---|
| 94 | +static inline bool gart_iova_range_invalid(struct gart_device *gart, |
---|
| 95 | + unsigned long iova, size_t bytes) |
---|
131 | 96 | { |
---|
132 | | - unsigned long iova; |
---|
133 | | - unsigned long flags; |
---|
134 | | - |
---|
135 | | - spin_lock_irqsave(&gart->pte_lock, flags); |
---|
136 | | - for_each_gart_pte(gart, iova) { |
---|
137 | | - unsigned long pte; |
---|
138 | | - |
---|
139 | | - pte = gart_read_pte(gart, iova); |
---|
140 | | - |
---|
141 | | - dev_dbg(gart->dev, "%s %08lx:%08lx\n", |
---|
142 | | - (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ", |
---|
143 | | - iova, pte & GART_PAGE_MASK); |
---|
144 | | - } |
---|
145 | | - spin_unlock_irqrestore(&gart->pte_lock, flags); |
---|
| 97 | + return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE || |
---|
| 98 | + iova + bytes > gart->iovmm_end); |
---|
146 | 99 | } |
---|
147 | | -#else |
---|
148 | | -static inline void gart_dump_table(struct gart_device *gart) |
---|
| 100 | + |
---|
| 101 | +static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova) |
---|
149 | 102 | { |
---|
150 | | -} |
---|
151 | | -#endif |
---|
152 | | - |
---|
153 | | -static inline bool gart_iova_range_valid(struct gart_device *gart, |
---|
154 | | - unsigned long iova, size_t bytes) |
---|
155 | | -{ |
---|
156 | | - unsigned long iova_start, iova_end, gart_start, gart_end; |
---|
157 | | - |
---|
158 | | - iova_start = iova; |
---|
159 | | - iova_end = iova_start + bytes - 1; |
---|
160 | | - gart_start = gart->iovmm_base; |
---|
161 | | - gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; |
---|
162 | | - |
---|
163 | | - if (iova_start < gart_start) |
---|
164 | | - return false; |
---|
165 | | - if (iova_end > gart_end) |
---|
166 | | - return false; |
---|
167 | | - return true; |
---|
| 103 | + return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID); |
---|
168 | 104 | } |
---|
169 | 105 | |
---|
170 | 106 | static int gart_iommu_attach_dev(struct iommu_domain *domain, |
---|
171 | 107 | struct device *dev) |
---|
172 | 108 | { |
---|
173 | | - struct gart_domain *gart_domain = to_gart_domain(domain); |
---|
174 | | - struct gart_device *gart = gart_domain->gart; |
---|
175 | | - struct gart_client *client, *c; |
---|
176 | | - int err = 0; |
---|
| 109 | + struct gart_device *gart = gart_handle; |
---|
| 110 | + int ret = 0; |
---|
177 | 111 | |
---|
178 | | - client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); |
---|
179 | | - if (!client) |
---|
180 | | - return -ENOMEM; |
---|
181 | | - client->dev = dev; |
---|
| 112 | + spin_lock(&gart->dom_lock); |
---|
182 | 113 | |
---|
183 | | - spin_lock(&gart->client_lock); |
---|
184 | | - list_for_each_entry(c, &gart->client, list) { |
---|
185 | | - if (c->dev == dev) { |
---|
186 | | - dev_err(gart->dev, |
---|
187 | | - "%s is already attached\n", dev_name(dev)); |
---|
188 | | - err = -EINVAL; |
---|
189 | | - goto fail; |
---|
190 | | - } |
---|
| 114 | + if (gart->active_domain && gart->active_domain != domain) { |
---|
| 115 | + ret = -EBUSY; |
---|
| 116 | + } else if (dev_iommu_priv_get(dev) != domain) { |
---|
| 117 | + dev_iommu_priv_set(dev, domain); |
---|
| 118 | + gart->active_domain = domain; |
---|
| 119 | + gart->active_devices++; |
---|
191 | 120 | } |
---|
192 | | - list_add(&client->list, &gart->client); |
---|
193 | | - spin_unlock(&gart->client_lock); |
---|
194 | | - dev_dbg(gart->dev, "Attached %s\n", dev_name(dev)); |
---|
195 | | - return 0; |
---|
196 | 121 | |
---|
197 | | -fail: |
---|
198 | | - devm_kfree(gart->dev, client); |
---|
199 | | - spin_unlock(&gart->client_lock); |
---|
200 | | - return err; |
---|
| 122 | + spin_unlock(&gart->dom_lock); |
---|
| 123 | + |
---|
| 124 | + return ret; |
---|
201 | 125 | } |
---|
202 | 126 | |
---|
203 | 127 | static void gart_iommu_detach_dev(struct iommu_domain *domain, |
---|
204 | 128 | struct device *dev) |
---|
205 | 129 | { |
---|
206 | | - struct gart_domain *gart_domain = to_gart_domain(domain); |
---|
207 | | - struct gart_device *gart = gart_domain->gart; |
---|
208 | | - struct gart_client *c; |
---|
| 130 | + struct gart_device *gart = gart_handle; |
---|
209 | 131 | |
---|
210 | | - spin_lock(&gart->client_lock); |
---|
| 132 | + spin_lock(&gart->dom_lock); |
---|
211 | 133 | |
---|
212 | | - list_for_each_entry(c, &gart->client, list) { |
---|
213 | | - if (c->dev == dev) { |
---|
214 | | - list_del(&c->list); |
---|
215 | | - devm_kfree(gart->dev, c); |
---|
216 | | - dev_dbg(gart->dev, "Detached %s\n", dev_name(dev)); |
---|
217 | | - goto out; |
---|
218 | | - } |
---|
| 134 | + if (dev_iommu_priv_get(dev) == domain) { |
---|
| 135 | + dev_iommu_priv_set(dev, NULL); |
---|
| 136 | + |
---|
| 137 | + if (--gart->active_devices == 0) |
---|
| 138 | + gart->active_domain = NULL; |
---|
219 | 139 | } |
---|
220 | | - dev_err(gart->dev, "Couldn't find\n"); |
---|
221 | | -out: |
---|
222 | | - spin_unlock(&gart->client_lock); |
---|
| 140 | + |
---|
| 141 | + spin_unlock(&gart->dom_lock); |
---|
223 | 142 | } |
---|
224 | 143 | |
---|
225 | 144 | static struct iommu_domain *gart_iommu_domain_alloc(unsigned type) |
---|
226 | 145 | { |
---|
227 | | - struct gart_domain *gart_domain; |
---|
228 | | - struct gart_device *gart; |
---|
| 146 | + struct iommu_domain *domain; |
---|
229 | 147 | |
---|
230 | 148 | if (type != IOMMU_DOMAIN_UNMANAGED) |
---|
231 | 149 | return NULL; |
---|
232 | 150 | |
---|
233 | | - gart = gart_handle; |
---|
234 | | - if (!gart) |
---|
235 | | - return NULL; |
---|
| 151 | + domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
---|
| 152 | + if (domain) { |
---|
| 153 | + domain->geometry.aperture_start = gart_handle->iovmm_base; |
---|
| 154 | + domain->geometry.aperture_end = gart_handle->iovmm_end - 1; |
---|
| 155 | + domain->geometry.force_aperture = true; |
---|
| 156 | + } |
---|
236 | 157 | |
---|
237 | | - gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL); |
---|
238 | | - if (!gart_domain) |
---|
239 | | - return NULL; |
---|
240 | | - |
---|
241 | | - gart_domain->gart = gart; |
---|
242 | | - gart_domain->domain.geometry.aperture_start = gart->iovmm_base; |
---|
243 | | - gart_domain->domain.geometry.aperture_end = gart->iovmm_base + |
---|
244 | | - gart->page_count * GART_PAGE_SIZE - 1; |
---|
245 | | - gart_domain->domain.geometry.force_aperture = true; |
---|
246 | | - |
---|
247 | | - return &gart_domain->domain; |
---|
| 158 | + return domain; |
---|
248 | 159 | } |
---|
249 | 160 | |
---|
250 | 161 | static void gart_iommu_domain_free(struct iommu_domain *domain) |
---|
251 | 162 | { |
---|
252 | | - struct gart_domain *gart_domain = to_gart_domain(domain); |
---|
253 | | - struct gart_device *gart = gart_domain->gart; |
---|
| 163 | + WARN_ON(gart_handle->active_domain == domain); |
---|
| 164 | + kfree(domain); |
---|
| 165 | +} |
---|
254 | 166 | |
---|
255 | | - if (gart) { |
---|
256 | | - spin_lock(&gart->client_lock); |
---|
257 | | - if (!list_empty(&gart->client)) { |
---|
258 | | - struct gart_client *c; |
---|
259 | | - |
---|
260 | | - list_for_each_entry(c, &gart->client, list) |
---|
261 | | - gart_iommu_detach_dev(domain, c->dev); |
---|
262 | | - } |
---|
263 | | - spin_unlock(&gart->client_lock); |
---|
| 167 | +static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova, |
---|
| 168 | + unsigned long pa) |
---|
| 169 | +{ |
---|
| 170 | + if (unlikely(gart_debug && gart_pte_valid(gart, iova))) { |
---|
| 171 | + dev_err(gart->dev, "Page entry is in-use\n"); |
---|
| 172 | + return -EINVAL; |
---|
264 | 173 | } |
---|
265 | 174 | |
---|
266 | | - kfree(gart_domain); |
---|
| 175 | + gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa); |
---|
| 176 | + |
---|
| 177 | + return 0; |
---|
267 | 178 | } |
---|
268 | 179 | |
---|
269 | 180 | static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, |
---|
270 | | - phys_addr_t pa, size_t bytes, int prot) |
---|
| 181 | + phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) |
---|
271 | 182 | { |
---|
272 | | - struct gart_domain *gart_domain = to_gart_domain(domain); |
---|
273 | | - struct gart_device *gart = gart_domain->gart; |
---|
274 | | - unsigned long flags; |
---|
275 | | - unsigned long pfn; |
---|
276 | | - unsigned long pte; |
---|
| 183 | + struct gart_device *gart = gart_handle; |
---|
| 184 | + int ret; |
---|
277 | 185 | |
---|
278 | | - if (!gart_iova_range_valid(gart, iova, bytes)) |
---|
| 186 | + if (gart_iova_range_invalid(gart, iova, bytes)) |
---|
279 | 187 | return -EINVAL; |
---|
280 | 188 | |
---|
281 | | - spin_lock_irqsave(&gart->pte_lock, flags); |
---|
282 | | - pfn = __phys_to_pfn(pa); |
---|
283 | | - if (!pfn_valid(pfn)) { |
---|
284 | | - dev_err(gart->dev, "Invalid page: %pa\n", &pa); |
---|
285 | | - spin_unlock_irqrestore(&gart->pte_lock, flags); |
---|
| 189 | + spin_lock(&gart->pte_lock); |
---|
| 190 | + ret = __gart_iommu_map(gart, iova, (unsigned long)pa); |
---|
| 191 | + spin_unlock(&gart->pte_lock); |
---|
| 192 | + |
---|
| 193 | + return ret; |
---|
| 194 | +} |
---|
| 195 | + |
---|
| 196 | +static inline int __gart_iommu_unmap(struct gart_device *gart, |
---|
| 197 | + unsigned long iova) |
---|
| 198 | +{ |
---|
| 199 | + if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) { |
---|
| 200 | + dev_err(gart->dev, "Page entry is invalid\n"); |
---|
286 | 201 | return -EINVAL; |
---|
287 | 202 | } |
---|
288 | | - if (gart_debug) { |
---|
289 | | - pte = gart_read_pte(gart, iova); |
---|
290 | | - if (pte & GART_ENTRY_PHYS_ADDR_VALID) { |
---|
291 | | - spin_unlock_irqrestore(&gart->pte_lock, flags); |
---|
292 | | - dev_err(gart->dev, "Page entry is in-use\n"); |
---|
293 | | - return -EBUSY; |
---|
294 | | - } |
---|
295 | | - } |
---|
296 | | - gart_set_pte(gart, iova, GART_PTE(pfn)); |
---|
297 | | - FLUSH_GART_REGS(gart); |
---|
298 | | - spin_unlock_irqrestore(&gart->pte_lock, flags); |
---|
| 203 | + |
---|
| 204 | + gart_set_pte(gart, iova, 0); |
---|
| 205 | + |
---|
299 | 206 | return 0; |
---|
300 | 207 | } |
---|
301 | 208 | |
---|
302 | 209 | static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
---|
303 | | - size_t bytes) |
---|
| 210 | + size_t bytes, struct iommu_iotlb_gather *gather) |
---|
304 | 211 | { |
---|
305 | | - struct gart_domain *gart_domain = to_gart_domain(domain); |
---|
306 | | - struct gart_device *gart = gart_domain->gart; |
---|
307 | | - unsigned long flags; |
---|
| 212 | + struct gart_device *gart = gart_handle; |
---|
| 213 | + int err; |
---|
308 | 214 | |
---|
309 | | - if (!gart_iova_range_valid(gart, iova, bytes)) |
---|
| 215 | + if (gart_iova_range_invalid(gart, iova, bytes)) |
---|
310 | 216 | return 0; |
---|
311 | 217 | |
---|
312 | | - spin_lock_irqsave(&gart->pte_lock, flags); |
---|
313 | | - gart_set_pte(gart, iova, 0); |
---|
314 | | - FLUSH_GART_REGS(gart); |
---|
315 | | - spin_unlock_irqrestore(&gart->pte_lock, flags); |
---|
316 | | - return bytes; |
---|
| 218 | + spin_lock(&gart->pte_lock); |
---|
| 219 | + err = __gart_iommu_unmap(gart, iova); |
---|
| 220 | + spin_unlock(&gart->pte_lock); |
---|
| 221 | + |
---|
| 222 | + return err ? 0 : bytes; |
---|
317 | 223 | } |
---|
318 | 224 | |
---|
319 | 225 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, |
---|
320 | 226 | dma_addr_t iova) |
---|
321 | 227 | { |
---|
322 | | - struct gart_domain *gart_domain = to_gart_domain(domain); |
---|
323 | | - struct gart_device *gart = gart_domain->gart; |
---|
| 228 | + struct gart_device *gart = gart_handle; |
---|
324 | 229 | unsigned long pte; |
---|
325 | | - phys_addr_t pa; |
---|
326 | | - unsigned long flags; |
---|
327 | 230 | |
---|
328 | | - if (!gart_iova_range_valid(gart, iova, 0)) |
---|
| 231 | + if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE)) |
---|
329 | 232 | return -EINVAL; |
---|
330 | 233 | |
---|
331 | | - spin_lock_irqsave(&gart->pte_lock, flags); |
---|
| 234 | + spin_lock(&gart->pte_lock); |
---|
332 | 235 | pte = gart_read_pte(gart, iova); |
---|
333 | | - spin_unlock_irqrestore(&gart->pte_lock, flags); |
---|
| 236 | + spin_unlock(&gart->pte_lock); |
---|
334 | 237 | |
---|
335 | | - pa = (pte & GART_PAGE_MASK); |
---|
336 | | - if (!pfn_valid(__phys_to_pfn(pa))) { |
---|
337 | | - dev_err(gart->dev, "No entry for %08llx:%pa\n", |
---|
338 | | - (unsigned long long)iova, &pa); |
---|
339 | | - gart_dump_table(gart); |
---|
340 | | - return -EINVAL; |
---|
341 | | - } |
---|
342 | | - return pa; |
---|
| 238 | + return pte & GART_PAGE_MASK; |
---|
343 | 239 | } |
---|
344 | 240 | |
---|
345 | 241 | static bool gart_iommu_capable(enum iommu_cap cap) |
---|
.. | .. |
---|
347 | 243 | return false; |
---|
348 | 244 | } |
---|
349 | 245 | |
---|
350 | | -static int gart_iommu_add_device(struct device *dev) |
---|
| 246 | +static struct iommu_device *gart_iommu_probe_device(struct device *dev) |
---|
351 | 247 | { |
---|
352 | | - struct iommu_group *group = iommu_group_get_for_dev(dev); |
---|
| 248 | + if (!dev_iommu_fwspec_get(dev)) |
---|
| 249 | + return ERR_PTR(-ENODEV); |
---|
353 | 250 | |
---|
354 | | - if (IS_ERR(group)) |
---|
355 | | - return PTR_ERR(group); |
---|
| 251 | + return &gart_handle->iommu; |
---|
| 252 | +} |
---|
356 | 253 | |
---|
357 | | - iommu_group_put(group); |
---|
| 254 | +static void gart_iommu_release_device(struct device *dev) |
---|
| 255 | +{ |
---|
| 256 | +} |
---|
358 | 257 | |
---|
359 | | - iommu_device_link(&gart_handle->iommu, dev); |
---|
360 | | - |
---|
| 258 | +static int gart_iommu_of_xlate(struct device *dev, |
---|
| 259 | + struct of_phandle_args *args) |
---|
| 260 | +{ |
---|
361 | 261 | return 0; |
---|
362 | 262 | } |
---|
363 | 263 | |
---|
364 | | -static void gart_iommu_remove_device(struct device *dev) |
---|
| 264 | +static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, |
---|
| 265 | + size_t size) |
---|
365 | 266 | { |
---|
366 | | - iommu_group_remove_device(dev); |
---|
367 | | - iommu_device_unlink(&gart_handle->iommu, dev); |
---|
| 267 | + FLUSH_GART_REGS(gart_handle); |
---|
| 268 | +} |
---|
| 269 | + |
---|
| 270 | +static void gart_iommu_sync(struct iommu_domain *domain, |
---|
| 271 | + struct iommu_iotlb_gather *gather) |
---|
| 272 | +{ |
---|
| 273 | + size_t length = gather->end - gather->start; |
---|
| 274 | + |
---|
| 275 | + gart_iommu_sync_map(domain, gather->start, length); |
---|
368 | 276 | } |
---|
369 | 277 | |
---|
370 | 278 | static const struct iommu_ops gart_iommu_ops = { |
---|
.. | .. |
---|
373 | 281 | .domain_free = gart_iommu_domain_free, |
---|
374 | 282 | .attach_dev = gart_iommu_attach_dev, |
---|
375 | 283 | .detach_dev = gart_iommu_detach_dev, |
---|
376 | | - .add_device = gart_iommu_add_device, |
---|
377 | | - .remove_device = gart_iommu_remove_device, |
---|
| 284 | + .probe_device = gart_iommu_probe_device, |
---|
| 285 | + .release_device = gart_iommu_release_device, |
---|
378 | 286 | .device_group = generic_device_group, |
---|
379 | 287 | .map = gart_iommu_map, |
---|
380 | 288 | .unmap = gart_iommu_unmap, |
---|
381 | 289 | .iova_to_phys = gart_iommu_iova_to_phys, |
---|
382 | 290 | .pgsize_bitmap = GART_IOMMU_PGSIZES, |
---|
| 291 | + .of_xlate = gart_iommu_of_xlate, |
---|
| 292 | + .iotlb_sync_map = gart_iommu_sync_map, |
---|
| 293 | + .iotlb_sync = gart_iommu_sync, |
---|
383 | 294 | }; |
---|
384 | 295 | |
---|
385 | | -static int tegra_gart_suspend(struct device *dev) |
---|
| 296 | +int tegra_gart_suspend(struct gart_device *gart) |
---|
386 | 297 | { |
---|
387 | | - struct gart_device *gart = dev_get_drvdata(dev); |
---|
388 | | - unsigned long iova; |
---|
389 | 298 | u32 *data = gart->savedata; |
---|
390 | | - unsigned long flags; |
---|
| 299 | + unsigned long iova; |
---|
391 | 300 | |
---|
392 | | - spin_lock_irqsave(&gart->pte_lock, flags); |
---|
| 301 | + /* |
---|
| 302 | + * All GART users shall be suspended at this point. Disable |
---|
| 303 | + * address translation to trap all GART accesses as invalid |
---|
| 304 | + * memory accesses. |
---|
| 305 | + */ |
---|
| 306 | + writel_relaxed(0, gart->regs + GART_CONFIG); |
---|
| 307 | + FLUSH_GART_REGS(gart); |
---|
| 308 | + |
---|
393 | 309 | for_each_gart_pte(gart, iova) |
---|
394 | 310 | *(data++) = gart_read_pte(gart, iova); |
---|
395 | | - spin_unlock_irqrestore(&gart->pte_lock, flags); |
---|
| 311 | + |
---|
396 | 312 | return 0; |
---|
397 | 313 | } |
---|
398 | 314 | |
---|
399 | | -static int tegra_gart_resume(struct device *dev) |
---|
| 315 | +int tegra_gart_resume(struct gart_device *gart) |
---|
400 | 316 | { |
---|
401 | | - struct gart_device *gart = dev_get_drvdata(dev); |
---|
402 | | - unsigned long flags; |
---|
403 | | - |
---|
404 | | - spin_lock_irqsave(&gart->pte_lock, flags); |
---|
405 | 317 | do_gart_setup(gart, gart->savedata); |
---|
406 | | - spin_unlock_irqrestore(&gart->pte_lock, flags); |
---|
| 318 | + |
---|
407 | 319 | return 0; |
---|
408 | 320 | } |
---|
409 | 321 | |
---|
410 | | -static int tegra_gart_probe(struct platform_device *pdev) |
---|
| 322 | +struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc) |
---|
411 | 323 | { |
---|
412 | 324 | struct gart_device *gart; |
---|
413 | | - struct resource *res, *res_remap; |
---|
414 | | - void __iomem *gart_regs; |
---|
415 | | - struct device *dev = &pdev->dev; |
---|
416 | | - int ret; |
---|
417 | | - |
---|
418 | | - if (gart_handle) |
---|
419 | | - return -EIO; |
---|
| 325 | + struct resource *res; |
---|
| 326 | + int err; |
---|
420 | 327 | |
---|
421 | 328 | BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT); |
---|
422 | 329 | |
---|
423 | 330 | /* the GART memory aperture is required */ |
---|
424 | | - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
---|
425 | | - res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
---|
426 | | - if (!res || !res_remap) { |
---|
427 | | - dev_err(dev, "GART memory aperture expected\n"); |
---|
428 | | - return -ENXIO; |
---|
| 331 | + res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1); |
---|
| 332 | + if (!res) { |
---|
| 333 | + dev_err(dev, "Memory aperture resource unavailable\n"); |
---|
| 334 | + return ERR_PTR(-ENXIO); |
---|
429 | 335 | } |
---|
430 | 336 | |
---|
431 | | - gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL); |
---|
432 | | - if (!gart) { |
---|
433 | | - dev_err(dev, "failed to allocate gart_device\n"); |
---|
434 | | - return -ENOMEM; |
---|
435 | | - } |
---|
436 | | - |
---|
437 | | - gart_regs = devm_ioremap(dev, res->start, resource_size(res)); |
---|
438 | | - if (!gart_regs) { |
---|
439 | | - dev_err(dev, "failed to remap GART registers\n"); |
---|
440 | | - return -ENXIO; |
---|
441 | | - } |
---|
442 | | - |
---|
443 | | - ret = iommu_device_sysfs_add(&gart->iommu, &pdev->dev, NULL, |
---|
444 | | - dev_name(&pdev->dev)); |
---|
445 | | - if (ret) { |
---|
446 | | - dev_err(dev, "Failed to register IOMMU in sysfs\n"); |
---|
447 | | - return ret; |
---|
448 | | - } |
---|
449 | | - |
---|
450 | | - iommu_device_set_ops(&gart->iommu, &gart_iommu_ops); |
---|
451 | | - |
---|
452 | | - ret = iommu_device_register(&gart->iommu); |
---|
453 | | - if (ret) { |
---|
454 | | - dev_err(dev, "Failed to register IOMMU\n"); |
---|
455 | | - iommu_device_sysfs_remove(&gart->iommu); |
---|
456 | | - return ret; |
---|
457 | | - } |
---|
458 | | - |
---|
459 | | - gart->dev = &pdev->dev; |
---|
460 | | - spin_lock_init(&gart->pte_lock); |
---|
461 | | - spin_lock_init(&gart->client_lock); |
---|
462 | | - INIT_LIST_HEAD(&gart->client); |
---|
463 | | - gart->regs = gart_regs; |
---|
464 | | - gart->iovmm_base = (dma_addr_t)res_remap->start; |
---|
465 | | - gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); |
---|
466 | | - |
---|
467 | | - gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count)); |
---|
468 | | - if (!gart->savedata) { |
---|
469 | | - dev_err(dev, "failed to allocate context save area\n"); |
---|
470 | | - return -ENOMEM; |
---|
471 | | - } |
---|
472 | | - |
---|
473 | | - platform_set_drvdata(pdev, gart); |
---|
474 | | - do_gart_setup(gart, NULL); |
---|
| 337 | + gart = kzalloc(sizeof(*gart), GFP_KERNEL); |
---|
| 338 | + if (!gart) |
---|
| 339 | + return ERR_PTR(-ENOMEM); |
---|
475 | 340 | |
---|
476 | 341 | gart_handle = gart; |
---|
477 | 342 | |
---|
478 | | - return 0; |
---|
479 | | -} |
---|
| 343 | + gart->dev = dev; |
---|
| 344 | + gart->regs = mc->regs + GART_REG_BASE; |
---|
| 345 | + gart->iovmm_base = res->start; |
---|
| 346 | + gart->iovmm_end = res->end + 1; |
---|
| 347 | + spin_lock_init(&gart->pte_lock); |
---|
| 348 | + spin_lock_init(&gart->dom_lock); |
---|
480 | 349 | |
---|
481 | | -static int tegra_gart_remove(struct platform_device *pdev) |
---|
482 | | -{ |
---|
483 | | - struct gart_device *gart = platform_get_drvdata(pdev); |
---|
| 350 | + do_gart_setup(gart, NULL); |
---|
484 | 351 | |
---|
| 352 | + err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart"); |
---|
| 353 | + if (err) |
---|
| 354 | + goto free_gart; |
---|
| 355 | + |
---|
| 356 | + iommu_device_set_ops(&gart->iommu, &gart_iommu_ops); |
---|
| 357 | + iommu_device_set_fwnode(&gart->iommu, dev->fwnode); |
---|
| 358 | + |
---|
| 359 | + err = iommu_device_register(&gart->iommu); |
---|
| 360 | + if (err) |
---|
| 361 | + goto remove_sysfs; |
---|
| 362 | + |
---|
| 363 | + gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE * |
---|
| 364 | + sizeof(u32)); |
---|
| 365 | + if (!gart->savedata) { |
---|
| 366 | + err = -ENOMEM; |
---|
| 367 | + goto unregister_iommu; |
---|
| 368 | + } |
---|
| 369 | + |
---|
| 370 | + return gart; |
---|
| 371 | + |
---|
| 372 | +unregister_iommu: |
---|
485 | 373 | iommu_device_unregister(&gart->iommu); |
---|
| 374 | +remove_sysfs: |
---|
486 | 375 | iommu_device_sysfs_remove(&gart->iommu); |
---|
| 376 | +free_gart: |
---|
| 377 | + kfree(gart); |
---|
487 | 378 | |
---|
488 | | - writel(0, gart->regs + GART_CONFIG); |
---|
489 | | - if (gart->savedata) |
---|
490 | | - vfree(gart->savedata); |
---|
491 | | - gart_handle = NULL; |
---|
492 | | - return 0; |
---|
| 379 | + return ERR_PTR(err); |
---|
493 | 380 | } |
---|
494 | 381 | |
---|
495 | | -static const struct dev_pm_ops tegra_gart_pm_ops = { |
---|
496 | | - .suspend = tegra_gart_suspend, |
---|
497 | | - .resume = tegra_gart_resume, |
---|
498 | | -}; |
---|
499 | | - |
---|
500 | | -static const struct of_device_id tegra_gart_of_match[] = { |
---|
501 | | - { .compatible = "nvidia,tegra20-gart", }, |
---|
502 | | - { }, |
---|
503 | | -}; |
---|
504 | | -MODULE_DEVICE_TABLE(of, tegra_gart_of_match); |
---|
505 | | - |
---|
506 | | -static struct platform_driver tegra_gart_driver = { |
---|
507 | | - .probe = tegra_gart_probe, |
---|
508 | | - .remove = tegra_gart_remove, |
---|
509 | | - .driver = { |
---|
510 | | - .name = "tegra-gart", |
---|
511 | | - .pm = &tegra_gart_pm_ops, |
---|
512 | | - .of_match_table = tegra_gart_of_match, |
---|
513 | | - }, |
---|
514 | | -}; |
---|
515 | | - |
---|
516 | | -static int tegra_gart_init(void) |
---|
517 | | -{ |
---|
518 | | - return platform_driver_register(&tegra_gart_driver); |
---|
519 | | -} |
---|
520 | | - |
---|
521 | | -static void __exit tegra_gart_exit(void) |
---|
522 | | -{ |
---|
523 | | - platform_driver_unregister(&tegra_gart_driver); |
---|
524 | | -} |
---|
525 | | - |
---|
526 | | -subsys_initcall(tegra_gart_init); |
---|
527 | | -module_exit(tegra_gart_exit); |
---|
528 | 382 | module_param(gart_debug, bool, 0644); |
---|
529 | | - |
---|
530 | 383 | MODULE_PARM_DESC(gart_debug, "Enable GART debugging"); |
---|
531 | | -MODULE_DESCRIPTION("IOMMU API for GART in Tegra20"); |
---|
532 | | -MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); |
---|
533 | | -MODULE_ALIAS("platform:tegra-gart"); |
---|
534 | | -MODULE_LICENSE("GPL v2"); |
---|