.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Tegra host1x driver |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (c) 2010-2013, NVIDIA Corporation. |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify it |
---|
7 | | - * under the terms and conditions of the GNU General Public License, |
---|
8 | | - * version 2, as published by the Free Software Foundation. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
---|
11 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
12 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
13 | | - * more details. |
---|
14 | | - * |
---|
15 | | - * You should have received a copy of the GNU General Public License |
---|
16 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
17 | 6 | */ |
---|
18 | 7 | |
---|
19 | 8 | #include <linux/clk.h> |
---|
.. | .. |
---|
29 | 18 | #include <trace/events/host1x.h> |
---|
30 | 19 | #undef CREATE_TRACE_POINTS |
---|
31 | 20 | |
---|
| 21 | +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) |
---|
| 22 | +#include <asm/dma-iommu.h> |
---|
| 23 | +#endif |
---|
| 24 | + |
---|
32 | 25 | #include "bus.h" |
---|
33 | 26 | #include "channel.h" |
---|
34 | 27 | #include "debug.h" |
---|
.. | .. |
---|
40 | 33 | #include "hw/host1x04.h" |
---|
41 | 34 | #include "hw/host1x05.h" |
---|
42 | 35 | #include "hw/host1x06.h" |
---|
| 36 | +#include "hw/host1x07.h" |
---|
43 | 37 | |
---|
44 | 38 | void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r) |
---|
45 | 39 | { |
---|
.. | .. |
---|
83 | 77 | .init = host1x01_init, |
---|
84 | 78 | .sync_offset = 0x3000, |
---|
85 | 79 | .dma_mask = DMA_BIT_MASK(32), |
---|
| 80 | + .has_wide_gather = false, |
---|
| 81 | + .has_hypervisor = false, |
---|
| 82 | + .num_sid_entries = 0, |
---|
| 83 | + .sid_table = NULL, |
---|
86 | 84 | }; |
---|
87 | 85 | |
---|
88 | 86 | static const struct host1x_info host1x02_info = { |
---|
.. | .. |
---|
93 | 91 | .init = host1x02_init, |
---|
94 | 92 | .sync_offset = 0x3000, |
---|
95 | 93 | .dma_mask = DMA_BIT_MASK(32), |
---|
| 94 | + .has_wide_gather = false, |
---|
| 95 | + .has_hypervisor = false, |
---|
| 96 | + .num_sid_entries = 0, |
---|
| 97 | + .sid_table = NULL, |
---|
96 | 98 | }; |
---|
97 | 99 | |
---|
98 | 100 | static const struct host1x_info host1x04_info = { |
---|
.. | .. |
---|
103 | 105 | .init = host1x04_init, |
---|
104 | 106 | .sync_offset = 0x2100, |
---|
105 | 107 | .dma_mask = DMA_BIT_MASK(34), |
---|
| 108 | + .has_wide_gather = false, |
---|
| 109 | + .has_hypervisor = false, |
---|
| 110 | + .num_sid_entries = 0, |
---|
| 111 | + .sid_table = NULL, |
---|
106 | 112 | }; |
---|
107 | 113 | |
---|
108 | 114 | static const struct host1x_info host1x05_info = { |
---|
.. | .. |
---|
113 | 119 | .init = host1x05_init, |
---|
114 | 120 | .sync_offset = 0x2100, |
---|
115 | 121 | .dma_mask = DMA_BIT_MASK(34), |
---|
| 122 | + .has_wide_gather = false, |
---|
| 123 | + .has_hypervisor = false, |
---|
| 124 | + .num_sid_entries = 0, |
---|
| 125 | + .sid_table = NULL, |
---|
| 126 | +}; |
---|
| 127 | + |
---|
| 128 | +static const struct host1x_sid_entry tegra186_sid_table[] = { |
---|
| 129 | + { |
---|
| 130 | + /* VIC */ |
---|
| 131 | + .base = 0x1af0, |
---|
| 132 | + .offset = 0x30, |
---|
| 133 | + .limit = 0x34 |
---|
| 134 | + }, |
---|
116 | 135 | }; |
---|
117 | 136 | |
---|
118 | 137 | static const struct host1x_info host1x06_info = { |
---|
.. | .. |
---|
122 | 141 | .nb_bases = 16, |
---|
123 | 142 | .init = host1x06_init, |
---|
124 | 143 | .sync_offset = 0x0, |
---|
125 | | - .dma_mask = DMA_BIT_MASK(34), |
---|
| 144 | + .dma_mask = DMA_BIT_MASK(40), |
---|
| 145 | + .has_wide_gather = true, |
---|
126 | 146 | .has_hypervisor = true, |
---|
| 147 | + .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), |
---|
| 148 | + .sid_table = tegra186_sid_table, |
---|
| 149 | +}; |
---|
| 150 | + |
---|
| 151 | +static const struct host1x_sid_entry tegra194_sid_table[] = { |
---|
| 152 | + { |
---|
| 153 | + /* VIC */ |
---|
| 154 | + .base = 0x1af0, |
---|
| 155 | + .offset = 0x30, |
---|
| 156 | + .limit = 0x34 |
---|
| 157 | + }, |
---|
| 158 | +}; |
---|
| 159 | + |
---|
| 160 | +static const struct host1x_info host1x07_info = { |
---|
| 161 | + .nb_channels = 63, |
---|
| 162 | + .nb_pts = 704, |
---|
| 163 | + .nb_mlocks = 32, |
---|
| 164 | + .nb_bases = 0, |
---|
| 165 | + .init = host1x07_init, |
---|
| 166 | + .sync_offset = 0x0, |
---|
| 167 | + .dma_mask = DMA_BIT_MASK(40), |
---|
| 168 | + .has_wide_gather = true, |
---|
| 169 | + .has_hypervisor = true, |
---|
| 170 | + .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), |
---|
| 171 | + .sid_table = tegra194_sid_table, |
---|
127 | 172 | }; |
---|
128 | 173 | |
---|
129 | 174 | static const struct of_device_id host1x_of_match[] = { |
---|
| 175 | + { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, }, |
---|
130 | 176 | { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, }, |
---|
131 | 177 | { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, }, |
---|
132 | 178 | { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, }, |
---|
.. | .. |
---|
136 | 182 | { }, |
---|
137 | 183 | }; |
---|
138 | 184 | MODULE_DEVICE_TABLE(of, host1x_of_match); |
---|
| 185 | + |
---|
| 186 | +static void host1x_setup_sid_table(struct host1x *host) |
---|
| 187 | +{ |
---|
| 188 | + const struct host1x_info *info = host->info; |
---|
| 189 | + unsigned int i; |
---|
| 190 | + |
---|
| 191 | + for (i = 0; i < info->num_sid_entries; i++) { |
---|
| 192 | + const struct host1x_sid_entry *entry = &info->sid_table[i]; |
---|
| 193 | + |
---|
| 194 | + host1x_hypervisor_writel(host, entry->offset, entry->base); |
---|
| 195 | + host1x_hypervisor_writel(host, entry->limit, entry->base + 4); |
---|
| 196 | + } |
---|
| 197 | +} |
---|
| 198 | + |
---|
| 199 | +static bool host1x_wants_iommu(struct host1x *host1x) |
---|
| 200 | +{ |
---|
| 201 | + /* Our IOMMU usage policy doesn't currently play well with GART */ |
---|
| 202 | + if (of_machine_is_compatible("nvidia,tegra20")) |
---|
| 203 | + return false; |
---|
| 204 | + |
---|
| 205 | + /* |
---|
| 206 | + * If we support addressing a maximum of 32 bits of physical memory |
---|
| 207 | + * and if the host1x firewall is enabled, there's no need to enable |
---|
| 208 | + * IOMMU support. This can happen for example on Tegra20, Tegra30 |
---|
| 209 | + * and Tegra114. |
---|
| 210 | + * |
---|
| 211 | + * Tegra124 and later can address up to 34 bits of physical memory and |
---|
| 212 | + * many platforms come equipped with more than 2 GiB of system memory, |
---|
| 213 | + * which requires crossing the 4 GiB boundary. But there's a catch: on |
---|
| 214 | + * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can |
---|
| 215 | + * only address up to 32 bits of memory in GATHER opcodes, which means |
---|
| 216 | + * that command buffers need to either be in the first 2 GiB of system |
---|
| 217 | + * memory (which could quickly lead to memory exhaustion), or command |
---|
| 218 | + * buffers need to be treated differently from other buffers (which is |
---|
| 219 | + * not possible with the current ABI). |
---|
| 220 | + * |
---|
| 221 | + * A third option is to use the IOMMU in these cases to make sure all |
---|
| 222 | + * buffers will be mapped into a 32-bit IOVA space that host1x can |
---|
| 223 | + * address. This allows all of the system memory to be used and works |
---|
| 224 | + * within the limitations of the host1x on these SoCs. |
---|
| 225 | + * |
---|
| 226 | + * In summary, default to enable IOMMU on Tegra124 and later. For any |
---|
| 227 | + * of the earlier SoCs, only use the IOMMU for additional safety when |
---|
| 228 | + * the host1x firewall is disabled. |
---|
| 229 | + */ |
---|
| 230 | + if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) { |
---|
| 231 | + if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) |
---|
| 232 | + return false; |
---|
| 233 | + } |
---|
| 234 | + |
---|
| 235 | + return true; |
---|
| 236 | +} |
---|
| 237 | + |
---|
| 238 | +static struct iommu_domain *host1x_iommu_attach(struct host1x *host) |
---|
| 239 | +{ |
---|
| 240 | + struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); |
---|
| 241 | + int err; |
---|
| 242 | + |
---|
| 243 | +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) |
---|
| 244 | + if (host->dev->archdata.mapping) { |
---|
| 245 | + struct dma_iommu_mapping *mapping = |
---|
| 246 | + to_dma_iommu_mapping(host->dev); |
---|
| 247 | + arm_iommu_detach_device(host->dev); |
---|
| 248 | + arm_iommu_release_mapping(mapping); |
---|
| 249 | + |
---|
| 250 | + domain = iommu_get_domain_for_dev(host->dev); |
---|
| 251 | + } |
---|
| 252 | +#endif |
---|
| 253 | + |
---|
| 254 | + /* |
---|
| 255 | + * We may not always want to enable IOMMU support (for example if the |
---|
| 256 | + * host1x firewall is already enabled and we don't support addressing |
---|
| 257 | + * more than 32 bits of physical memory), so check for that first. |
---|
| 258 | + * |
---|
| 259 | + * Similarly, if host1x is already attached to an IOMMU (via the DMA |
---|
| 260 | + * API), don't try to attach again. |
---|
| 261 | + */ |
---|
| 262 | + if (!host1x_wants_iommu(host) || domain) |
---|
| 263 | + return domain; |
---|
| 264 | + |
---|
| 265 | + host->group = iommu_group_get(host->dev); |
---|
| 266 | + if (host->group) { |
---|
| 267 | + struct iommu_domain_geometry *geometry; |
---|
| 268 | + dma_addr_t start, end; |
---|
| 269 | + unsigned long order; |
---|
| 270 | + |
---|
| 271 | + err = iova_cache_get(); |
---|
| 272 | + if (err < 0) |
---|
| 273 | + goto put_group; |
---|
| 274 | + |
---|
| 275 | + host->domain = iommu_domain_alloc(&platform_bus_type); |
---|
| 276 | + if (!host->domain) { |
---|
| 277 | + err = -ENOMEM; |
---|
| 278 | + goto put_cache; |
---|
| 279 | + } |
---|
| 280 | + |
---|
| 281 | + err = iommu_attach_group(host->domain, host->group); |
---|
| 282 | + if (err) { |
---|
| 283 | + if (err == -ENODEV) |
---|
| 284 | + err = 0; |
---|
| 285 | + |
---|
| 286 | + goto free_domain; |
---|
| 287 | + } |
---|
| 288 | + |
---|
| 289 | + geometry = &host->domain->geometry; |
---|
| 290 | + start = geometry->aperture_start & host->info->dma_mask; |
---|
| 291 | + end = geometry->aperture_end & host->info->dma_mask; |
---|
| 292 | + |
---|
| 293 | + order = __ffs(host->domain->pgsize_bitmap); |
---|
| 294 | + init_iova_domain(&host->iova, 1UL << order, start >> order); |
---|
| 295 | + host->iova_end = end; |
---|
| 296 | + |
---|
| 297 | + domain = host->domain; |
---|
| 298 | + } |
---|
| 299 | + |
---|
| 300 | + return domain; |
---|
| 301 | + |
---|
| 302 | +free_domain: |
---|
| 303 | + iommu_domain_free(host->domain); |
---|
| 304 | + host->domain = NULL; |
---|
| 305 | +put_cache: |
---|
| 306 | + iova_cache_put(); |
---|
| 307 | +put_group: |
---|
| 308 | + iommu_group_put(host->group); |
---|
| 309 | + host->group = NULL; |
---|
| 310 | + |
---|
| 311 | + return ERR_PTR(err); |
---|
| 312 | +} |
---|
| 313 | + |
---|
| 314 | +static int host1x_iommu_init(struct host1x *host) |
---|
| 315 | +{ |
---|
| 316 | + u64 mask = host->info->dma_mask; |
---|
| 317 | + struct iommu_domain *domain; |
---|
| 318 | + int err; |
---|
| 319 | + |
---|
| 320 | + domain = host1x_iommu_attach(host); |
---|
| 321 | + if (IS_ERR(domain)) { |
---|
| 322 | + err = PTR_ERR(domain); |
---|
| 323 | + dev_err(host->dev, "failed to attach to IOMMU: %d\n", err); |
---|
| 324 | + return err; |
---|
| 325 | + } |
---|
| 326 | + |
---|
| 327 | + /* |
---|
| 328 | + * If we're not behind an IOMMU make sure we don't get push buffers |
---|
| 329 | + * that are allocated outside of the range addressable by the GATHER |
---|
| 330 | + * opcode. |
---|
| 331 | + * |
---|
| 332 | + * Newer generations of Tegra (Tegra186 and later) support a wide |
---|
| 333 | + * variant of the GATHER opcode that allows addressing more bits. |
---|
| 334 | + */ |
---|
| 335 | + if (!domain && !host->info->has_wide_gather) |
---|
| 336 | + mask = DMA_BIT_MASK(32); |
---|
| 337 | + |
---|
| 338 | + err = dma_coerce_mask_and_coherent(host->dev, mask); |
---|
| 339 | + if (err < 0) { |
---|
| 340 | + dev_err(host->dev, "failed to set DMA mask: %d\n", err); |
---|
| 341 | + return err; |
---|
| 342 | + } |
---|
| 343 | + |
---|
| 344 | + return 0; |
---|
| 345 | +} |
---|
| 346 | + |
---|
| 347 | +static void host1x_iommu_exit(struct host1x *host) |
---|
| 348 | +{ |
---|
| 349 | + if (host->domain) { |
---|
| 350 | + put_iova_domain(&host->iova); |
---|
| 351 | + iommu_detach_group(host->domain, host->group); |
---|
| 352 | + |
---|
| 353 | + iommu_domain_free(host->domain); |
---|
| 354 | + host->domain = NULL; |
---|
| 355 | + |
---|
| 356 | + iova_cache_put(); |
---|
| 357 | + |
---|
| 358 | + iommu_group_put(host->group); |
---|
| 359 | + host->group = NULL; |
---|
| 360 | + } |
---|
| 361 | +} |
---|
139 | 362 | |
---|
140 | 363 | static int host1x_probe(struct platform_device *pdev) |
---|
141 | 364 | { |
---|
.. | .. |
---|
173 | 396 | } |
---|
174 | 397 | |
---|
175 | 398 | syncpt_irq = platform_get_irq(pdev, 0); |
---|
176 | | - if (syncpt_irq < 0) { |
---|
177 | | - dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq); |
---|
| 399 | + if (syncpt_irq < 0) |
---|
178 | 400 | return syncpt_irq; |
---|
179 | | - } |
---|
180 | 401 | |
---|
181 | 402 | mutex_init(&host->devices_lock); |
---|
182 | 403 | INIT_LIST_HEAD(&host->devices); |
---|
.. | .. |
---|
196 | 417 | return PTR_ERR(host->hv_regs); |
---|
197 | 418 | } |
---|
198 | 419 | |
---|
199 | | - dma_set_mask_and_coherent(host->dev, host->info->dma_mask); |
---|
| 420 | + host->dev->dma_parms = &host->dma_parms; |
---|
| 421 | + dma_set_max_seg_size(host->dev, UINT_MAX); |
---|
200 | 422 | |
---|
201 | 423 | if (host->info->init) { |
---|
202 | 424 | err = host->info->init(host); |
---|
.. | .. |
---|
206 | 428 | |
---|
207 | 429 | host->clk = devm_clk_get(&pdev->dev, NULL); |
---|
208 | 430 | if (IS_ERR(host->clk)) { |
---|
209 | | - dev_err(&pdev->dev, "failed to get clock\n"); |
---|
210 | 431 | err = PTR_ERR(host->clk); |
---|
| 432 | + |
---|
| 433 | + if (err != -EPROBE_DEFER) |
---|
| 434 | + dev_err(&pdev->dev, "failed to get clock: %d\n", err); |
---|
| 435 | + |
---|
211 | 436 | return err; |
---|
212 | 437 | } |
---|
213 | 438 | |
---|
.. | .. |
---|
218 | 443 | return err; |
---|
219 | 444 | } |
---|
220 | 445 | |
---|
221 | | - if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) |
---|
222 | | - goto skip_iommu; |
---|
223 | | - |
---|
224 | | - host->group = iommu_group_get(&pdev->dev); |
---|
225 | | - if (host->group) { |
---|
226 | | - struct iommu_domain_geometry *geometry; |
---|
227 | | - unsigned long order; |
---|
228 | | - |
---|
229 | | - err = iova_cache_get(); |
---|
230 | | - if (err < 0) |
---|
231 | | - goto put_group; |
---|
232 | | - |
---|
233 | | - host->domain = iommu_domain_alloc(&platform_bus_type); |
---|
234 | | - if (!host->domain) { |
---|
235 | | - err = -ENOMEM; |
---|
236 | | - goto put_cache; |
---|
237 | | - } |
---|
238 | | - |
---|
239 | | - err = iommu_attach_group(host->domain, host->group); |
---|
240 | | - if (err) { |
---|
241 | | - if (err == -ENODEV) { |
---|
242 | | - iommu_domain_free(host->domain); |
---|
243 | | - host->domain = NULL; |
---|
244 | | - iova_cache_put(); |
---|
245 | | - iommu_group_put(host->group); |
---|
246 | | - host->group = NULL; |
---|
247 | | - goto skip_iommu; |
---|
248 | | - } |
---|
249 | | - |
---|
250 | | - goto fail_free_domain; |
---|
251 | | - } |
---|
252 | | - |
---|
253 | | - geometry = &host->domain->geometry; |
---|
254 | | - |
---|
255 | | - order = __ffs(host->domain->pgsize_bitmap); |
---|
256 | | - init_iova_domain(&host->iova, 1UL << order, |
---|
257 | | - geometry->aperture_start >> order); |
---|
258 | | - host->iova_end = geometry->aperture_end; |
---|
| 446 | + err = host1x_iommu_init(host); |
---|
| 447 | + if (err < 0) { |
---|
| 448 | + dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err); |
---|
| 449 | + return err; |
---|
259 | 450 | } |
---|
260 | 451 | |
---|
261 | | -skip_iommu: |
---|
262 | 452 | err = host1x_channel_list_init(&host->channel_list, |
---|
263 | 453 | host->info->nb_channels); |
---|
264 | 454 | if (err) { |
---|
265 | 455 | dev_err(&pdev->dev, "failed to initialize channel list\n"); |
---|
266 | | - goto fail_detach_device; |
---|
| 456 | + goto iommu_exit; |
---|
267 | 457 | } |
---|
268 | 458 | |
---|
269 | 459 | err = clk_prepare_enable(host->clk); |
---|
270 | 460 | if (err < 0) { |
---|
271 | 461 | dev_err(&pdev->dev, "failed to enable clock\n"); |
---|
272 | | - goto fail_free_channels; |
---|
| 462 | + goto free_channels; |
---|
273 | 463 | } |
---|
274 | 464 | |
---|
275 | 465 | err = reset_control_deassert(host->rst); |
---|
276 | 466 | if (err < 0) { |
---|
277 | 467 | dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); |
---|
278 | | - goto fail_unprepare_disable; |
---|
| 468 | + goto unprepare_disable; |
---|
279 | 469 | } |
---|
280 | 470 | |
---|
281 | 471 | err = host1x_syncpt_init(host); |
---|
282 | 472 | if (err) { |
---|
283 | 473 | dev_err(&pdev->dev, "failed to initialize syncpts\n"); |
---|
284 | | - goto fail_reset_assert; |
---|
| 474 | + goto reset_assert; |
---|
285 | 475 | } |
---|
286 | 476 | |
---|
287 | 477 | err = host1x_intr_init(host, syncpt_irq); |
---|
288 | 478 | if (err) { |
---|
289 | 479 | dev_err(&pdev->dev, "failed to initialize interrupts\n"); |
---|
290 | | - goto fail_deinit_syncpt; |
---|
| 480 | + goto deinit_syncpt; |
---|
291 | 481 | } |
---|
292 | 482 | |
---|
293 | 483 | host1x_debug_init(host); |
---|
294 | 484 | |
---|
| 485 | + if (host->info->has_hypervisor) |
---|
| 486 | + host1x_setup_sid_table(host); |
---|
| 487 | + |
---|
295 | 488 | err = host1x_register(host); |
---|
296 | 489 | if (err < 0) |
---|
297 | | - goto fail_deinit_intr; |
---|
| 490 | + goto deinit_debugfs; |
---|
| 491 | + |
---|
| 492 | + err = devm_of_platform_populate(&pdev->dev); |
---|
| 493 | + if (err < 0) |
---|
| 494 | + goto unregister; |
---|
298 | 495 | |
---|
299 | 496 | return 0; |
---|
300 | 497 | |
---|
301 | | -fail_deinit_intr: |
---|
| 498 | +unregister: |
---|
| 499 | + host1x_unregister(host); |
---|
| 500 | +deinit_debugfs: |
---|
| 501 | + host1x_debug_deinit(host); |
---|
302 | 502 | host1x_intr_deinit(host); |
---|
303 | | -fail_deinit_syncpt: |
---|
| 503 | +deinit_syncpt: |
---|
304 | 504 | host1x_syncpt_deinit(host); |
---|
305 | | -fail_reset_assert: |
---|
| 505 | +reset_assert: |
---|
306 | 506 | reset_control_assert(host->rst); |
---|
307 | | -fail_unprepare_disable: |
---|
| 507 | +unprepare_disable: |
---|
308 | 508 | clk_disable_unprepare(host->clk); |
---|
309 | | -fail_free_channels: |
---|
| 509 | +free_channels: |
---|
310 | 510 | host1x_channel_list_free(&host->channel_list); |
---|
311 | | -fail_detach_device: |
---|
312 | | - if (host->group && host->domain) { |
---|
313 | | - put_iova_domain(&host->iova); |
---|
314 | | - iommu_detach_group(host->domain, host->group); |
---|
315 | | - } |
---|
316 | | -fail_free_domain: |
---|
317 | | - if (host->domain) |
---|
318 | | - iommu_domain_free(host->domain); |
---|
319 | | -put_cache: |
---|
320 | | - if (host->group) |
---|
321 | | - iova_cache_put(); |
---|
322 | | -put_group: |
---|
323 | | - iommu_group_put(host->group); |
---|
| 511 | +iommu_exit: |
---|
| 512 | + host1x_iommu_exit(host); |
---|
324 | 513 | |
---|
325 | 514 | return err; |
---|
326 | 515 | } |
---|
.. | .. |
---|
330 | 519 | struct host1x *host = platform_get_drvdata(pdev); |
---|
331 | 520 | |
---|
332 | 521 | host1x_unregister(host); |
---|
| 522 | + host1x_debug_deinit(host); |
---|
333 | 523 | host1x_intr_deinit(host); |
---|
334 | 524 | host1x_syncpt_deinit(host); |
---|
335 | 525 | reset_control_assert(host->rst); |
---|
336 | 526 | clk_disable_unprepare(host->clk); |
---|
337 | | - |
---|
338 | | - if (host->domain) { |
---|
339 | | - put_iova_domain(&host->iova); |
---|
340 | | - iommu_detach_group(host->domain, host->group); |
---|
341 | | - iommu_domain_free(host->domain); |
---|
342 | | - iova_cache_put(); |
---|
343 | | - iommu_group_put(host->group); |
---|
344 | | - } |
---|
| 527 | + host1x_channel_list_free(&host->channel_list); |
---|
| 528 | + host1x_iommu_exit(host); |
---|
345 | 529 | |
---|
346 | 530 | return 0; |
---|
347 | 531 | } |
---|
.. | .. |
---|
383 | 567 | } |
---|
384 | 568 | module_exit(tegra_host1x_exit); |
---|
385 | 569 | |
---|
| 570 | +/** |
---|
| 571 | + * host1x_get_dma_mask() - query the supported DMA mask for host1x |
---|
| 572 | + * @host1x: host1x instance |
---|
| 573 | + * |
---|
| 574 | + * Note that this returns the supported DMA mask for host1x, which can be |
---|
| 575 | + * different from the applicable DMA mask under certain circumstances. |
---|
| 576 | + */ |
---|
| 577 | +u64 host1x_get_dma_mask(struct host1x *host1x) |
---|
| 578 | +{ |
---|
| 579 | + return host1x->info->dma_mask; |
---|
| 580 | +} |
---|
| 581 | +EXPORT_SYMBOL(host1x_get_dma_mask); |
---|
| 582 | + |
---|
386 | 583 | MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); |
---|
387 | 584 | MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>"); |
---|
388 | 585 | MODULE_DESCRIPTION("Host1x driver for Tegra products"); |
---|