.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * A fairly generic DMA-API to IOMMU-API glue layer. |
---|
3 | 4 | * |
---|
.. | .. |
---|
5 | 6 | * |
---|
6 | 7 | * based in part on arch/arm/mm/dma-mapping.c: |
---|
7 | 8 | * Copyright (C) 2000-2004 Russell King |
---|
8 | | - * |
---|
9 | | - * This program is free software; you can redistribute it and/or modify |
---|
10 | | - * it under the terms of the GNU General Public License version 2 as |
---|
11 | | - * published by the Free Software Foundation. |
---|
12 | | - * |
---|
13 | | - * This program is distributed in the hope that it will be useful, |
---|
14 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
15 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
16 | | - * GNU General Public License for more details. |
---|
17 | | - * |
---|
18 | | - * You should have received a copy of the GNU General Public License |
---|
19 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
20 | 9 | */ |
---|
21 | 10 | |
---|
22 | 11 | #include <linux/acpi_iort.h> |
---|
23 | 12 | #include <linux/device.h> |
---|
| 13 | +#include <linux/dma-map-ops.h> |
---|
24 | 14 | #include <linux/dma-iommu.h> |
---|
25 | 15 | #include <linux/gfp.h> |
---|
26 | 16 | #include <linux/huge_mm.h> |
---|
.. | .. |
---|
28 | 18 | #include <linux/iova.h> |
---|
29 | 19 | #include <linux/irq.h> |
---|
30 | 20 | #include <linux/mm.h> |
---|
| 21 | +#include <linux/mutex.h> |
---|
31 | 22 | #include <linux/pci.h> |
---|
32 | 23 | #include <linux/scatterlist.h> |
---|
33 | 24 | #include <linux/vmalloc.h> |
---|
34 | | - |
---|
35 | | -#define IOMMU_MAPPING_ERROR 0 |
---|
| 25 | +#include <linux/crash_dump.h> |
---|
| 26 | +#include <trace/hooks/iommu.h> |
---|
36 | 27 | |
---|
37 | 28 | struct iommu_dma_msi_page { |
---|
38 | 29 | struct list_head list; |
---|
.. | .. |
---|
54 | 45 | dma_addr_t msi_iova; |
---|
55 | 46 | }; |
---|
56 | 47 | struct list_head msi_page_list; |
---|
57 | | - spinlock_t msi_lock; |
---|
| 48 | + |
---|
| 49 | + /* Domain for flush queue callback; NULL if flush queue not in use */ |
---|
| 50 | + struct iommu_domain *fq_domain; |
---|
| 51 | +}; |
---|
| 52 | + |
---|
| 53 | +struct iommu_dma_cookie_ext { |
---|
| 54 | + struct iommu_dma_cookie cookie; |
---|
| 55 | + struct mutex mutex; |
---|
58 | 56 | }; |
---|
59 | 57 | |
---|
60 | 58 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
---|
.. | .. |
---|
66 | 64 | |
---|
67 | 65 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
---|
68 | 66 | { |
---|
69 | | - struct iommu_dma_cookie *cookie; |
---|
| 67 | + struct iommu_dma_cookie_ext *cookie; |
---|
70 | 68 | |
---|
71 | 69 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
---|
72 | 70 | if (cookie) { |
---|
73 | | - spin_lock_init(&cookie->msi_lock); |
---|
74 | | - INIT_LIST_HEAD(&cookie->msi_page_list); |
---|
75 | | - cookie->type = type; |
---|
| 71 | + INIT_LIST_HEAD(&cookie->cookie.msi_page_list); |
---|
| 72 | + cookie->cookie.type = type; |
---|
| 73 | + mutex_init(&cookie->mutex); |
---|
76 | 74 | } |
---|
77 | | - return cookie; |
---|
78 | | -} |
---|
79 | | - |
---|
80 | | -int iommu_dma_init(void) |
---|
81 | | -{ |
---|
82 | | - return iova_cache_get(); |
---|
| 75 | + return &cookie->cookie; |
---|
83 | 76 | } |
---|
84 | 77 | |
---|
85 | 78 | /** |
---|
.. | .. |
---|
174 | 167 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) |
---|
175 | 168 | { |
---|
176 | 169 | |
---|
177 | | - if (!is_of_node(dev->iommu_fwspec->iommu_fwnode)) |
---|
| 170 | + if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) |
---|
178 | 171 | iort_iommu_msi_get_resv_regions(dev, list); |
---|
179 | 172 | |
---|
180 | 173 | } |
---|
.. | .. |
---|
205 | 198 | return 0; |
---|
206 | 199 | } |
---|
207 | 200 | |
---|
208 | | -static void iova_reserve_pci_windows(struct pci_dev *dev, |
---|
| 201 | +static int iova_reserve_pci_windows(struct pci_dev *dev, |
---|
209 | 202 | struct iova_domain *iovad) |
---|
210 | 203 | { |
---|
211 | 204 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); |
---|
212 | 205 | struct resource_entry *window; |
---|
213 | 206 | unsigned long lo, hi; |
---|
| 207 | + phys_addr_t start = 0, end; |
---|
214 | 208 | |
---|
215 | 209 | resource_list_for_each_entry(window, &bridge->windows) { |
---|
216 | 210 | if (resource_type(window->res) != IORESOURCE_MEM) |
---|
.. | .. |
---|
220 | 214 | hi = iova_pfn(iovad, window->res->end - window->offset); |
---|
221 | 215 | reserve_iova(iovad, lo, hi); |
---|
222 | 216 | } |
---|
| 217 | + |
---|
| 218 | + /* Get reserved DMA windows from host bridge */ |
---|
| 219 | + resource_list_for_each_entry(window, &bridge->dma_ranges) { |
---|
| 220 | + end = window->res->start - window->offset; |
---|
| 221 | +resv_iova: |
---|
| 222 | + if (end > start) { |
---|
| 223 | + lo = iova_pfn(iovad, start); |
---|
| 224 | + hi = iova_pfn(iovad, end); |
---|
| 225 | + reserve_iova(iovad, lo, hi); |
---|
| 226 | + } else if (end < start) { |
---|
| 227 | + /* dma_ranges list should be sorted */ |
---|
| 228 | + dev_err(&dev->dev, |
---|
| 229 | + "Failed to reserve IOVA [%pa-%pa]\n", |
---|
| 230 | + &start, &end); |
---|
| 231 | + return -EINVAL; |
---|
| 232 | + } |
---|
| 233 | + |
---|
| 234 | + start = window->res->end - window->offset + 1; |
---|
| 235 | + /* If window is last entry */ |
---|
| 236 | + if (window->node.next == &bridge->dma_ranges && |
---|
| 237 | + end != ~(phys_addr_t)0) { |
---|
| 238 | + end = ~(phys_addr_t)0; |
---|
| 239 | + goto resv_iova; |
---|
| 240 | + } |
---|
| 241 | + } |
---|
| 242 | + |
---|
| 243 | + return 0; |
---|
223 | 244 | } |
---|
224 | 245 | |
---|
225 | 246 | static int iova_reserve_iommu_regions(struct device *dev, |
---|
.. | .. |
---|
231 | 252 | LIST_HEAD(resv_regions); |
---|
232 | 253 | int ret = 0; |
---|
233 | 254 | |
---|
234 | | - if (dev_is_pci(dev)) |
---|
235 | | - iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
---|
| 255 | + if (dev_is_pci(dev)) { |
---|
| 256 | + ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
---|
| 257 | + if (ret) |
---|
| 258 | + return ret; |
---|
| 259 | + } |
---|
236 | 260 | |
---|
237 | 261 | iommu_get_resv_regions(dev, &resv_regions); |
---|
238 | 262 | list_for_each_entry(region, &resv_regions, list) { |
---|
.. | .. |
---|
257 | 281 | return ret; |
---|
258 | 282 | } |
---|
259 | 283 | |
---|
| 284 | +static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) |
---|
| 285 | +{ |
---|
| 286 | + struct iommu_dma_cookie *cookie; |
---|
| 287 | + struct iommu_domain *domain; |
---|
| 288 | + |
---|
| 289 | + cookie = container_of(iovad, struct iommu_dma_cookie, iovad); |
---|
| 290 | + domain = cookie->fq_domain; |
---|
| 291 | + /* |
---|
| 292 | + * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE |
---|
| 293 | + * implies that ops->flush_iotlb_all must be non-NULL. |
---|
| 294 | + */ |
---|
| 295 | + domain->ops->flush_iotlb_all(domain); |
---|
| 296 | +} |
---|
| 297 | + |
---|
260 | 298 | /** |
---|
261 | 299 | * iommu_dma_init_domain - Initialise a DMA mapping domain |
---|
262 | 300 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() |
---|
.. | .. |
---|
269 | 307 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but |
---|
270 | 308 | * any change which could make prior IOVAs invalid will fail. |
---|
271 | 309 | */ |
---|
272 | | -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
---|
| 310 | +static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
---|
273 | 311 | u64 size, struct device *dev) |
---|
274 | 312 | { |
---|
275 | 313 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
---|
276 | | - struct iova_domain *iovad = &cookie->iovad; |
---|
277 | | - unsigned long order, base_pfn, end_pfn; |
---|
| 314 | + struct iommu_dma_cookie_ext *cookie_ext; |
---|
| 315 | + unsigned long order, base_pfn; |
---|
| 316 | + struct iova_domain *iovad; |
---|
| 317 | + int attr; |
---|
| 318 | + int ret; |
---|
278 | 319 | |
---|
279 | 320 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
---|
280 | 321 | return -EINVAL; |
---|
281 | 322 | |
---|
| 323 | + iovad = &cookie->iovad; |
---|
| 324 | + |
---|
282 | 325 | /* Use the smallest supported page size for IOVA granularity */ |
---|
283 | 326 | order = __ffs(domain->pgsize_bitmap); |
---|
284 | 327 | base_pfn = max_t(unsigned long, 1, base >> order); |
---|
285 | | - end_pfn = (base + size - 1) >> order; |
---|
286 | 328 | |
---|
287 | 329 | /* Check the domain allows at least some access to the device... */ |
---|
288 | 330 | if (domain->geometry.force_aperture) { |
---|
.. | .. |
---|
297 | 339 | } |
---|
298 | 340 | |
---|
299 | 341 | /* start_pfn is always nonzero for an already-initialised domain */ |
---|
| 342 | + cookie_ext = container_of(cookie, struct iommu_dma_cookie_ext, cookie); |
---|
| 343 | + mutex_lock(&cookie_ext->mutex); |
---|
300 | 344 | if (iovad->start_pfn) { |
---|
301 | 345 | if (1UL << order != iovad->granule || |
---|
302 | 346 | base_pfn != iovad->start_pfn) { |
---|
303 | 347 | pr_warn("Incompatible range for DMA domain\n"); |
---|
304 | | - return -EFAULT; |
---|
| 348 | + ret = -EFAULT; |
---|
| 349 | + goto done_unlock; |
---|
305 | 350 | } |
---|
306 | 351 | |
---|
307 | | - return 0; |
---|
| 352 | + ret = 0; |
---|
| 353 | + goto done_unlock; |
---|
308 | 354 | } |
---|
309 | 355 | |
---|
310 | | - iovad->end_pfn = end_pfn; |
---|
311 | 356 | init_iova_domain(iovad, 1UL << order, base_pfn); |
---|
312 | | - if (!dev) |
---|
| 357 | + |
---|
| 358 | + if (!cookie->fq_domain && !iommu_domain_get_attr(domain, |
---|
| 359 | + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { |
---|
| 360 | + if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, |
---|
| 361 | + NULL)) |
---|
| 362 | + pr_warn("iova flush queue initialization failed\n"); |
---|
| 363 | + else |
---|
| 364 | + cookie->fq_domain = domain; |
---|
| 365 | + } |
---|
| 366 | + |
---|
| 367 | + if (!dev) { |
---|
| 368 | + ret = 0; |
---|
| 369 | + goto done_unlock; |
---|
| 370 | + } |
---|
| 371 | + |
---|
| 372 | + ret = iova_reserve_iommu_regions(dev, domain); |
---|
| 373 | + |
---|
| 374 | +done_unlock: |
---|
| 375 | + mutex_unlock(&cookie_ext->mutex); |
---|
| 376 | + return ret; |
---|
| 377 | +} |
---|
| 378 | + |
---|
| 379 | +static int iommu_dma_deferred_attach(struct device *dev, |
---|
| 380 | + struct iommu_domain *domain) |
---|
| 381 | +{ |
---|
| 382 | + const struct iommu_ops *ops = domain->ops; |
---|
| 383 | + |
---|
| 384 | + if (!is_kdump_kernel()) |
---|
313 | 385 | return 0; |
---|
314 | 386 | |
---|
315 | | - return iova_reserve_iommu_regions(dev, domain); |
---|
| 387 | + if (unlikely(ops->is_attach_deferred && |
---|
| 388 | + ops->is_attach_deferred(domain, dev))) |
---|
| 389 | + return iommu_attach_device(domain, dev); |
---|
| 390 | + |
---|
| 391 | + return 0; |
---|
316 | 392 | } |
---|
317 | | -EXPORT_SYMBOL(iommu_dma_init_domain); |
---|
318 | 393 | |
---|
319 | 394 | /* |
---|
320 | 395 | * Should be called prior to using dma-apis |
---|
.. | .. |
---|
323 | 398 | u64 size) |
---|
324 | 399 | { |
---|
325 | 400 | struct iommu_domain *domain; |
---|
| 401 | + struct iommu_dma_cookie *cookie; |
---|
326 | 402 | struct iova_domain *iovad; |
---|
327 | 403 | unsigned long pfn_lo, pfn_hi; |
---|
328 | 404 | |
---|
.. | .. |
---|
330 | 406 | if (!domain || !domain->iova_cookie) |
---|
331 | 407 | return -EINVAL; |
---|
332 | 408 | |
---|
333 | | - iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; |
---|
| 409 | + cookie = domain->iova_cookie; |
---|
| 410 | + iovad = &cookie->iovad; |
---|
334 | 411 | |
---|
335 | 412 | /* iova will be freed automatically by put_iova_domain() */ |
---|
336 | 413 | pfn_lo = iova_pfn(iovad, base); |
---|
.. | .. |
---|
340 | 417 | |
---|
341 | 418 | return 0; |
---|
342 | 419 | } |
---|
343 | | -EXPORT_SYMBOL_GPL(iommu_dma_reserve_iova); |
---|
| 420 | +EXPORT_SYMBOL(iommu_dma_reserve_iova); |
---|
344 | 421 | |
---|
345 | 422 | /* |
---|
346 | 423 | * Should be called prior to using dma-apis. |
---|
.. | .. |
---|
358 | 435 | iovad->best_fit = true; |
---|
359 | 436 | return 0; |
---|
360 | 437 | } |
---|
361 | | -EXPORT_SYMBOL_GPL(iommu_dma_enable_best_fit_algo); |
---|
| 438 | +EXPORT_SYMBOL(iommu_dma_enable_best_fit_algo); |
---|
362 | 439 | |
---|
363 | 440 | /** |
---|
364 | 441 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
---|
.. | .. |
---|
369 | 446 | * |
---|
370 | 447 | * Return: corresponding IOMMU API page protection flags |
---|
371 | 448 | */ |
---|
372 | | -int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
---|
| 449 | +static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
---|
373 | 450 | unsigned long attrs) |
---|
374 | 451 | { |
---|
375 | 452 | int prot = coherent ? IOMMU_CACHE : 0; |
---|
376 | 453 | |
---|
377 | 454 | if (attrs & DMA_ATTR_PRIVILEGED) |
---|
378 | 455 | prot |= IOMMU_PRIV; |
---|
379 | | - |
---|
380 | | - if (!(attrs & DMA_ATTR_EXEC_MAPPING)) |
---|
381 | | - prot |= IOMMU_NOEXEC; |
---|
382 | | - |
---|
383 | | - if (attrs & DMA_ATTR_IOMMU_USE_UPSTREAM_HINT) |
---|
384 | | - prot |= IOMMU_USE_UPSTREAM_HINT; |
---|
385 | | - |
---|
386 | | - if (attrs & DMA_ATTR_IOMMU_USE_LLC_NWA) |
---|
387 | | - prot |= IOMMU_USE_LLC_NWA; |
---|
| 456 | + if (attrs & DMA_ATTR_SYS_CACHE_ONLY) |
---|
| 457 | + prot |= IOMMU_SYS_CACHE; |
---|
| 458 | + if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA) |
---|
| 459 | + prot |= IOMMU_SYS_CACHE_NWA; |
---|
388 | 460 | |
---|
389 | 461 | switch (dir) { |
---|
390 | 462 | case DMA_BIDIRECTIONAL: |
---|
.. | .. |
---|
399 | 471 | } |
---|
400 | 472 | |
---|
401 | 473 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
---|
402 | | - size_t size, dma_addr_t dma_limit, struct device *dev) |
---|
| 474 | + size_t size, u64 dma_limit, struct device *dev) |
---|
403 | 475 | { |
---|
404 | 476 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
---|
405 | 477 | struct iova_domain *iovad = &cookie->iovad; |
---|
406 | 478 | unsigned long shift, iova_len, iova = 0; |
---|
407 | | - dma_addr_t limit; |
---|
408 | 479 | |
---|
409 | 480 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
---|
410 | 481 | cookie->msi_iova += size; |
---|
.. | .. |
---|
422 | 493 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) |
---|
423 | 494 | iova_len = roundup_pow_of_two(iova_len); |
---|
424 | 495 | |
---|
425 | | - if (dev->bus_dma_mask) |
---|
426 | | - dma_limit &= dev->bus_dma_mask; |
---|
| 496 | + dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); |
---|
427 | 497 | |
---|
428 | 498 | if (domain->geometry.force_aperture) |
---|
429 | | - dma_limit = min(dma_limit, domain->geometry.aperture_end); |
---|
430 | | - |
---|
431 | | - /* |
---|
432 | | - * Ensure iova is within range specified in iommu_dma_init_domain(). |
---|
433 | | - * This also prevents unnecessary work iterating through the entire |
---|
434 | | - * rb_tree. |
---|
435 | | - */ |
---|
436 | | - limit = min_t(dma_addr_t, DMA_BIT_MASK(32) >> shift, |
---|
437 | | - iovad->end_pfn); |
---|
| 499 | + dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); |
---|
438 | 500 | |
---|
439 | 501 | /* Try to get PCI devices a SAC address */ |
---|
440 | 502 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) |
---|
441 | | - iova = alloc_iova_fast(iovad, iova_len, limit, false); |
---|
| 503 | + iova = alloc_iova_fast(iovad, iova_len, |
---|
| 504 | + DMA_BIT_MASK(32) >> shift, false); |
---|
442 | 505 | |
---|
443 | | - if (!iova) { |
---|
444 | | - limit = min_t(dma_addr_t, dma_limit >> shift, |
---|
445 | | - iovad->end_pfn); |
---|
| 506 | + if (!iova) |
---|
| 507 | + iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
---|
| 508 | + true); |
---|
446 | 509 | |
---|
447 | | - iova = alloc_iova_fast(iovad, iova_len, limit, true); |
---|
448 | | - } |
---|
| 510 | + trace_android_vh_iommu_alloc_iova(dev, (dma_addr_t)iova << shift, size); |
---|
| 511 | + trace_android_vh_iommu_iovad_alloc_iova(dev, iovad, (dma_addr_t)iova << shift, size); |
---|
449 | 512 | |
---|
450 | 513 | return (dma_addr_t)iova << shift; |
---|
451 | | - |
---|
452 | 514 | } |
---|
453 | 515 | |
---|
454 | 516 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
---|
.. | .. |
---|
459 | 521 | /* The MSI case is only ever cleaning up its most recent allocation */ |
---|
460 | 522 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
---|
461 | 523 | cookie->msi_iova -= size; |
---|
| 524 | + else if (cookie->fq_domain) /* non-strict mode */ |
---|
| 525 | + queue_iova(iovad, iova_pfn(iovad, iova), |
---|
| 526 | + size >> iova_shift(iovad), 0); |
---|
462 | 527 | else |
---|
463 | 528 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
---|
464 | 529 | size >> iova_shift(iovad)); |
---|
| 530 | + |
---|
| 531 | + trace_android_vh_iommu_free_iova(iova, size); |
---|
| 532 | + trace_android_vh_iommu_iovad_free_iova(iovad, iova, size); |
---|
465 | 533 | } |
---|
466 | 534 | |
---|
467 | | -static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, |
---|
| 535 | +static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, |
---|
468 | 536 | size_t size) |
---|
469 | 537 | { |
---|
| 538 | + struct iommu_domain *domain = iommu_get_dma_domain(dev); |
---|
470 | 539 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
---|
471 | 540 | struct iova_domain *iovad = &cookie->iovad; |
---|
472 | 541 | size_t iova_off = iova_offset(iovad, dma_addr); |
---|
| 542 | + struct iommu_iotlb_gather iotlb_gather; |
---|
| 543 | + size_t unmapped; |
---|
473 | 544 | |
---|
474 | 545 | dma_addr -= iova_off; |
---|
475 | 546 | size = iova_align(iovad, size + iova_off); |
---|
| 547 | + iommu_iotlb_gather_init(&iotlb_gather); |
---|
476 | 548 | |
---|
477 | | - WARN_ON(iommu_unmap(domain, dma_addr, size) != size); |
---|
| 549 | + unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); |
---|
| 550 | + WARN_ON(unmapped != size); |
---|
| 551 | + |
---|
| 552 | + if (!cookie->fq_domain) |
---|
| 553 | + iommu_iotlb_sync(domain, &iotlb_gather); |
---|
478 | 554 | iommu_dma_free_iova(cookie, dma_addr, size); |
---|
| 555 | +} |
---|
| 556 | + |
---|
| 557 | +static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
---|
| 558 | + size_t size, int prot, u64 dma_mask) |
---|
| 559 | +{ |
---|
| 560 | + struct iommu_domain *domain = iommu_get_dma_domain(dev); |
---|
| 561 | + struct iommu_dma_cookie *cookie = domain->iova_cookie; |
---|
| 562 | + struct iova_domain *iovad = &cookie->iovad; |
---|
| 563 | + size_t iova_off = iova_offset(iovad, phys); |
---|
| 564 | + dma_addr_t iova; |
---|
| 565 | + |
---|
| 566 | + if (unlikely(iommu_dma_deferred_attach(dev, domain))) |
---|
| 567 | + return DMA_MAPPING_ERROR; |
---|
| 568 | + |
---|
| 569 | + size = iova_align(iovad, size + iova_off); |
---|
| 570 | + |
---|
| 571 | + iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); |
---|
| 572 | + if (!iova) |
---|
| 573 | + return DMA_MAPPING_ERROR; |
---|
| 574 | + |
---|
| 575 | + if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { |
---|
| 576 | + iommu_dma_free_iova(cookie, iova, size); |
---|
| 577 | + return DMA_MAPPING_ERROR; |
---|
| 578 | + } |
---|
| 579 | + return iova + iova_off; |
---|
479 | 580 | } |
---|
480 | 581 | |
---|
481 | 582 | static void __iommu_dma_free_pages(struct page **pages, int count) |
---|
.. | .. |
---|
485 | 586 | kvfree(pages); |
---|
486 | 587 | } |
---|
487 | 588 | |
---|
488 | | -static struct page **__iommu_dma_alloc_pages(unsigned int count, |
---|
489 | | - unsigned long order_mask, gfp_t gfp) |
---|
| 589 | +static struct page **__iommu_dma_alloc_pages(struct device *dev, |
---|
| 590 | + unsigned int count, unsigned long order_mask, gfp_t gfp) |
---|
490 | 591 | { |
---|
491 | 592 | struct page **pages; |
---|
492 | | - unsigned int i = 0, array_size = count * sizeof(*pages); |
---|
| 593 | + unsigned int i = 0, nid = dev_to_node(dev); |
---|
493 | 594 | |
---|
494 | 595 | order_mask &= (2U << MAX_ORDER) - 1; |
---|
495 | 596 | if (!order_mask) |
---|
496 | 597 | return NULL; |
---|
497 | 598 | |
---|
498 | | - if (array_size <= PAGE_SIZE) |
---|
499 | | - pages = kzalloc(array_size, GFP_KERNEL); |
---|
500 | | - else |
---|
501 | | - pages = vzalloc(array_size); |
---|
| 599 | + pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); |
---|
502 | 600 | if (!pages) |
---|
503 | 601 | return NULL; |
---|
504 | 602 | |
---|
505 | 603 | /* IOMMU can map any pages, so himem can also be used here */ |
---|
506 | 604 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; |
---|
| 605 | + |
---|
| 606 | + /* It makes no sense to muck about with huge pages */ |
---|
| 607 | + gfp &= ~__GFP_COMP; |
---|
507 | 608 | |
---|
508 | 609 | while (count) { |
---|
509 | 610 | struct page *page = NULL; |
---|
.. | .. |
---|
517 | 618 | for (order_mask &= (2U << __fls(count)) - 1; |
---|
518 | 619 | order_mask; order_mask &= ~order_size) { |
---|
519 | 620 | unsigned int order = __fls(order_mask); |
---|
| 621 | + gfp_t alloc_flags = gfp; |
---|
520 | 622 | |
---|
521 | 623 | order_size = 1U << order; |
---|
522 | | - page = alloc_pages((order_mask - order_size) ? |
---|
523 | | - gfp | __GFP_NORETRY : gfp, order); |
---|
| 624 | + if (order_mask > order_size) |
---|
| 625 | + alloc_flags |= __GFP_NORETRY; |
---|
| 626 | + page = alloc_pages_node(nid, alloc_flags, order); |
---|
524 | 627 | if (!page) |
---|
525 | 628 | continue; |
---|
526 | | - if (!order) |
---|
527 | | - break; |
---|
528 | | - if (!PageCompound(page)) { |
---|
| 629 | + if (order) |
---|
529 | 630 | split_page(page, order); |
---|
530 | | - break; |
---|
531 | | - } else if (!split_huge_page(page)) { |
---|
532 | | - break; |
---|
533 | | - } |
---|
534 | | - __free_pages(page, order); |
---|
| 631 | + break; |
---|
535 | 632 | } |
---|
536 | 633 | if (!page) { |
---|
537 | 634 | __iommu_dma_free_pages(pages, i); |
---|
.. | .. |
---|
545 | 642 | } |
---|
546 | 643 | |
---|
547 | 644 | /** |
---|
548 | | - * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() |
---|
549 | | - * @dev: Device which owns this buffer |
---|
550 | | - * @pages: Array of buffer pages as returned by iommu_dma_alloc() |
---|
551 | | - * @size: Size of buffer in bytes |
---|
552 | | - * @handle: DMA address of buffer |
---|
553 | | - * |
---|
554 | | - * Frees both the pages associated with the buffer, and the array |
---|
555 | | - * describing them |
---|
556 | | - */ |
---|
557 | | -void iommu_dma_free(struct device *dev, struct page **pages, size_t size, |
---|
558 | | - dma_addr_t *handle) |
---|
559 | | -{ |
---|
560 | | - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); |
---|
561 | | - __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
---|
562 | | - *handle = IOMMU_MAPPING_ERROR; |
---|
563 | | -} |
---|
564 | | - |
---|
565 | | -/** |
---|
566 | | - * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space |
---|
| 645 | + * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space |
---|
567 | 646 | * @dev: Device to allocate memory for. Must be a real device |
---|
568 | 647 | * attached to an iommu_dma_domain |
---|
569 | 648 | * @size: Size of buffer in bytes |
---|
| 649 | + * @dma_handle: Out argument for allocated DMA handle |
---|
570 | 650 | * @gfp: Allocation flags |
---|
| 651 | + * @prot: pgprot_t to use for the remapped mapping |
---|
571 | 652 | * @attrs: DMA attributes for this allocation |
---|
572 | | - * @prot: IOMMU mapping flags |
---|
573 | | - * @handle: Out argument for allocated DMA handle |
---|
574 | | - * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the |
---|
575 | | - * given VA/PA are visible to the given non-coherent device. |
---|
576 | 653 | * |
---|
577 | 654 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, |
---|
578 | 655 | * but an IOMMU which supports smaller pages might not map the whole thing. |
---|
579 | 656 | * |
---|
580 | | - * Return: Array of struct page pointers describing the buffer, |
---|
581 | | - * or NULL on failure. |
---|
| 657 | + * Return: Mapped virtual address, or NULL on failure. |
---|
582 | 658 | */ |
---|
583 | | -struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
---|
584 | | - unsigned long attrs, int prot, dma_addr_t *handle, |
---|
585 | | - void (*flush_page)(struct device *, const void *, phys_addr_t)) |
---|
| 659 | +static void *iommu_dma_alloc_remap(struct device *dev, size_t size, |
---|
| 660 | + dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, |
---|
| 661 | + unsigned long attrs) |
---|
586 | 662 | { |
---|
587 | | - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
---|
| 663 | + struct iommu_domain *domain = iommu_get_dma_domain(dev); |
---|
588 | 664 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
---|
589 | 665 | struct iova_domain *iovad = &cookie->iovad; |
---|
| 666 | + bool coherent = dev_is_dma_coherent(dev); |
---|
| 667 | + int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); |
---|
| 668 | + unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
---|
590 | 669 | struct page **pages; |
---|
591 | 670 | struct sg_table sgt; |
---|
592 | 671 | dma_addr_t iova; |
---|
593 | | - unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
---|
| 672 | + void *vaddr; |
---|
594 | 673 | |
---|
595 | | - *handle = IOMMU_MAPPING_ERROR; |
---|
| 674 | + *dma_handle = DMA_MAPPING_ERROR; |
---|
| 675 | + |
---|
| 676 | + if (unlikely(iommu_dma_deferred_attach(dev, domain))) |
---|
| 677 | + return NULL; |
---|
596 | 678 | |
---|
597 | 679 | min_size = alloc_sizes & -alloc_sizes; |
---|
598 | 680 | if (min_size < PAGE_SIZE) { |
---|
.. | .. |
---|
605 | 687 | alloc_sizes = min_size; |
---|
606 | 688 | |
---|
607 | 689 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
---|
608 | | - pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); |
---|
| 690 | + pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, |
---|
| 691 | + gfp); |
---|
609 | 692 | if (!pages) |
---|
610 | 693 | return NULL; |
---|
611 | 694 | |
---|
.. | .. |
---|
617 | 700 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
---|
618 | 701 | goto out_free_iova; |
---|
619 | 702 | |
---|
620 | | - if (!(prot & IOMMU_CACHE)) { |
---|
621 | | - struct sg_mapping_iter miter; |
---|
622 | | - /* |
---|
623 | | - * The CPU-centric flushing implied by SG_MITER_TO_SG isn't |
---|
624 | | - * sufficient here, so skip it by using the "wrong" direction. |
---|
625 | | - */ |
---|
626 | | - sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); |
---|
627 | | - while (sg_miter_next(&miter)) |
---|
628 | | - flush_page(dev, miter.addr, page_to_phys(miter.page)); |
---|
629 | | - sg_miter_stop(&miter); |
---|
| 703 | + if (!(ioprot & IOMMU_CACHE)) { |
---|
| 704 | + struct scatterlist *sg; |
---|
| 705 | + int i; |
---|
| 706 | + |
---|
| 707 | + for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) |
---|
| 708 | + arch_dma_prep_coherent(sg_page(sg), sg->length); |
---|
630 | 709 | } |
---|
631 | 710 | |
---|
632 | | - if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) |
---|
| 711 | + if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) |
---|
633 | 712 | < size) |
---|
634 | 713 | goto out_free_sg; |
---|
635 | 714 | |
---|
636 | | - *handle = iova; |
---|
637 | | - sg_free_table(&sgt); |
---|
638 | | - return pages; |
---|
| 715 | + vaddr = dma_common_pages_remap(pages, size, prot, |
---|
| 716 | + __builtin_return_address(0)); |
---|
| 717 | + if (!vaddr) |
---|
| 718 | + goto out_unmap; |
---|
639 | 719 | |
---|
| 720 | + *dma_handle = iova; |
---|
| 721 | + sg_free_table(&sgt); |
---|
| 722 | + return vaddr; |
---|
| 723 | + |
---|
| 724 | +out_unmap: |
---|
| 725 | + __iommu_dma_unmap(dev, iova, size); |
---|
640 | 726 | out_free_sg: |
---|
641 | 727 | sg_free_table(&sgt); |
---|
642 | 728 | out_free_iova: |
---|
.. | .. |
---|
647 | 733 | } |
---|
648 | 734 | |
---|
649 | 735 | /** |
---|
650 | | - * iommu_dma_mmap - Map a buffer into provided user VMA |
---|
651 | | - * @pages: Array representing buffer from iommu_dma_alloc() |
---|
| 736 | + * __iommu_dma_mmap - Map a buffer into provided user VMA |
---|
| 737 | + * @pages: Array representing buffer from __iommu_dma_alloc() |
---|
652 | 738 | * @size: Size of buffer in bytes |
---|
653 | 739 | * @vma: VMA describing requested userspace mapping |
---|
654 | 740 | * |
---|
655 | 741 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible |
---|
656 | 742 | * for verifying the correct size and protection of @vma beforehand. |
---|
657 | 743 | */ |
---|
658 | | - |
---|
659 | | -int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) |
---|
| 744 | +static int __iommu_dma_mmap(struct page **pages, size_t size, |
---|
| 745 | + struct vm_area_struct *vma) |
---|
660 | 746 | { |
---|
661 | | - unsigned long uaddr = vma->vm_start; |
---|
662 | | - unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
---|
663 | | - int ret = -ENXIO; |
---|
664 | | - |
---|
665 | | - for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { |
---|
666 | | - ret = vm_insert_page(vma, uaddr, pages[i]); |
---|
667 | | - if (ret) |
---|
668 | | - break; |
---|
669 | | - uaddr += PAGE_SIZE; |
---|
670 | | - } |
---|
671 | | - return ret; |
---|
| 747 | + return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
---|
672 | 748 | } |
---|
673 | 749 | |
---|
674 | | -static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
---|
675 | | - size_t size, int prot) |
---|
| 750 | +static void iommu_dma_sync_single_for_cpu(struct device *dev, |
---|
| 751 | + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
---|
676 | 752 | { |
---|
677 | | - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
---|
678 | | - struct iommu_dma_cookie *cookie = domain->iova_cookie; |
---|
679 | | - size_t iova_off = 0; |
---|
680 | | - dma_addr_t iova; |
---|
| 753 | + phys_addr_t phys; |
---|
681 | 754 | |
---|
682 | | - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { |
---|
683 | | - iova_off = iova_offset(&cookie->iovad, phys); |
---|
684 | | - size = iova_align(&cookie->iovad, size + iova_off); |
---|
685 | | - } |
---|
| 755 | + if (dev_is_dma_coherent(dev)) |
---|
| 756 | + return; |
---|
686 | 757 | |
---|
687 | | - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
---|
688 | | - if (!iova) |
---|
689 | | - return IOMMU_MAPPING_ERROR; |
---|
690 | | - |
---|
691 | | - if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
---|
692 | | - iommu_dma_free_iova(cookie, iova, size); |
---|
693 | | - return IOMMU_MAPPING_ERROR; |
---|
694 | | - } |
---|
695 | | - return iova + iova_off; |
---|
| 758 | + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
---|
| 759 | + arch_sync_dma_for_cpu(phys, size, dir); |
---|
696 | 760 | } |
---|
697 | 761 | |
---|
698 | | -dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
---|
699 | | - unsigned long offset, size_t size, int prot) |
---|
| 762 | +static void iommu_dma_sync_single_for_device(struct device *dev, |
---|
| 763 | + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
---|
700 | 764 | { |
---|
701 | | - return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); |
---|
| 765 | + phys_addr_t phys; |
---|
| 766 | + |
---|
| 767 | + if (dev_is_dma_coherent(dev)) |
---|
| 768 | + return; |
---|
| 769 | + |
---|
| 770 | + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
---|
| 771 | + arch_sync_dma_for_device(phys, size, dir); |
---|
702 | 772 | } |
---|
703 | 773 | |
---|
704 | | -void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
---|
705 | | - enum dma_data_direction dir, unsigned long attrs) |
---|
| 774 | +static void iommu_dma_sync_sg_for_cpu(struct device *dev, |
---|
| 775 | + struct scatterlist *sgl, int nelems, |
---|
| 776 | + enum dma_data_direction dir) |
---|
706 | 777 | { |
---|
707 | | - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
---|
| 778 | + struct scatterlist *sg; |
---|
| 779 | + int i; |
---|
| 780 | + |
---|
| 781 | + if (dev_is_dma_coherent(dev)) |
---|
| 782 | + return; |
---|
| 783 | + |
---|
| 784 | + for_each_sg(sgl, sg, nelems, i) |
---|
| 785 | + arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); |
---|
| 786 | +} |
---|
| 787 | + |
---|
| 788 | +static void iommu_dma_sync_sg_for_device(struct device *dev, |
---|
| 789 | + struct scatterlist *sgl, int nelems, |
---|
| 790 | + enum dma_data_direction dir) |
---|
| 791 | +{ |
---|
| 792 | + struct scatterlist *sg; |
---|
| 793 | + int i; |
---|
| 794 | + |
---|
| 795 | + if (dev_is_dma_coherent(dev)) |
---|
| 796 | + return; |
---|
| 797 | + |
---|
| 798 | + for_each_sg(sgl, sg, nelems, i) |
---|
| 799 | + arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); |
---|
| 800 | +} |
---|
| 801 | + |
---|
| 802 | +static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
---|
| 803 | + unsigned long offset, size_t size, enum dma_data_direction dir, |
---|
| 804 | + unsigned long attrs) |
---|
| 805 | +{ |
---|
| 806 | + phys_addr_t phys = page_to_phys(page) + offset; |
---|
| 807 | + bool coherent = dev_is_dma_coherent(dev); |
---|
| 808 | + int prot = dma_info_to_prot(dir, coherent, attrs); |
---|
| 809 | + dma_addr_t dma_handle; |
---|
| 810 | + |
---|
| 811 | + dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); |
---|
| 812 | + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && |
---|
| 813 | + dma_handle != DMA_MAPPING_ERROR) |
---|
| 814 | + arch_sync_dma_for_device(phys, size, dir); |
---|
| 815 | + return dma_handle; |
---|
| 816 | +} |
---|
| 817 | + |
---|
| 818 | +static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
---|
| 819 | + size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
| 820 | +{ |
---|
| 821 | + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
---|
| 822 | + iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); |
---|
| 823 | + __iommu_dma_unmap(dev, dma_handle, size); |
---|
708 | 824 | } |
---|
709 | 825 | |
---|
710 | 826 | /* |
---|
.. | .. |
---|
714 | 830 | * avoid individually crossing any boundaries, so we merely need to check a |
---|
715 | 831 | * segment's start address to avoid concatenating across one. |
---|
716 | 832 | */ |
---|
717 | | -int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg, int nents, |
---|
| 833 | +static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, |
---|
718 | 834 | dma_addr_t dma_addr) |
---|
719 | 835 | { |
---|
720 | 836 | struct scatterlist *s, *cur = sg; |
---|
.. | .. |
---|
730 | 846 | |
---|
731 | 847 | s->offset += s_iova_off; |
---|
732 | 848 | s->length = s_length; |
---|
733 | | - sg_dma_address(s) = IOMMU_MAPPING_ERROR; |
---|
| 849 | + sg_dma_address(s) = DMA_MAPPING_ERROR; |
---|
734 | 850 | sg_dma_len(s) = 0; |
---|
735 | 851 | |
---|
736 | 852 | /* |
---|
.. | .. |
---|
767 | 883 | * If mapping failed, then just restore the original list, |
---|
768 | 884 | * but making sure the DMA fields are invalidated. |
---|
769 | 885 | */ |
---|
770 | | -void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents) |
---|
| 886 | +static void __invalidate_sg(struct scatterlist *sg, int nents) |
---|
771 | 887 | { |
---|
772 | 888 | struct scatterlist *s; |
---|
773 | 889 | int i; |
---|
774 | 890 | |
---|
775 | 891 | for_each_sg(sg, s, nents, i) { |
---|
776 | | - if (sg_dma_address(s) != IOMMU_MAPPING_ERROR) |
---|
| 892 | + if (sg_dma_address(s) != DMA_MAPPING_ERROR) |
---|
777 | 893 | s->offset += sg_dma_address(s); |
---|
778 | 894 | if (sg_dma_len(s)) |
---|
779 | 895 | s->length = sg_dma_len(s); |
---|
780 | | - sg_dma_address(s) = IOMMU_MAPPING_ERROR; |
---|
| 896 | + sg_dma_address(s) = DMA_MAPPING_ERROR; |
---|
781 | 897 | sg_dma_len(s) = 0; |
---|
782 | 898 | } |
---|
783 | 899 | } |
---|
.. | .. |
---|
789 | 905 | * impedance-matching, to be able to hand off a suitably-aligned list, |
---|
790 | 906 | * but still preserve the original offsets and sizes for the caller. |
---|
791 | 907 | */ |
---|
792 | | -size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad, |
---|
793 | | - struct scatterlist *sg, int nents) |
---|
| 908 | +static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
---|
| 909 | + int nents, enum dma_data_direction dir, unsigned long attrs) |
---|
794 | 910 | { |
---|
| 911 | + struct iommu_domain *domain = iommu_get_dma_domain(dev); |
---|
| 912 | + struct iommu_dma_cookie *cookie = domain->iova_cookie; |
---|
| 913 | + struct iova_domain *iovad = &cookie->iovad; |
---|
795 | 914 | struct scatterlist *s, *prev = NULL; |
---|
| 915 | + int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); |
---|
| 916 | + dma_addr_t iova; |
---|
796 | 917 | size_t iova_len = 0; |
---|
797 | 918 | unsigned long mask = dma_get_seg_boundary(dev); |
---|
798 | 919 | int i; |
---|
| 920 | + |
---|
| 921 | + if (unlikely(iommu_dma_deferred_attach(dev, domain))) |
---|
| 922 | + return 0; |
---|
| 923 | + |
---|
| 924 | + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
---|
| 925 | + iommu_dma_sync_sg_for_device(dev, sg, nents, dir); |
---|
799 | 926 | |
---|
800 | 927 | /* |
---|
801 | 928 | * Work out how much IOVA space we need, and align the segments to |
---|
.. | .. |
---|
836 | 963 | prev = s; |
---|
837 | 964 | } |
---|
838 | 965 | |
---|
839 | | - return iova_len; |
---|
840 | | -} |
---|
841 | | - |
---|
842 | | -int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
---|
843 | | - int nents, int prot) |
---|
844 | | -{ |
---|
845 | | - struct iommu_domain *domain; |
---|
846 | | - struct iommu_dma_cookie *cookie; |
---|
847 | | - struct iova_domain *iovad; |
---|
848 | | - dma_addr_t iova; |
---|
849 | | - size_t iova_len; |
---|
850 | | - |
---|
851 | | - domain = iommu_get_domain_for_dev(dev); |
---|
852 | | - if (!domain) |
---|
853 | | - return 0; |
---|
854 | | - cookie = domain->iova_cookie; |
---|
855 | | - iovad = &cookie->iovad; |
---|
856 | | - |
---|
857 | | - iova_len = iommu_dma_prepare_map_sg(dev, iovad, sg, nents); |
---|
858 | | - |
---|
859 | 966 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
---|
860 | 967 | if (!iova) |
---|
861 | 968 | goto out_restore_sg; |
---|
.. | .. |
---|
864 | 971 | * We'll leave any physical concatenation to the IOMMU driver's |
---|
865 | 972 | * implementation - it knows better than we do. |
---|
866 | 973 | */ |
---|
867 | | - if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
---|
| 974 | + if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) |
---|
868 | 975 | goto out_free_iova; |
---|
869 | 976 | |
---|
870 | | - return iommu_dma_finalise_sg(dev, sg, nents, iova); |
---|
| 977 | + return __finalise_sg(dev, sg, nents, iova); |
---|
871 | 978 | |
---|
872 | 979 | out_free_iova: |
---|
873 | 980 | iommu_dma_free_iova(cookie, iova, iova_len); |
---|
874 | 981 | out_restore_sg: |
---|
875 | | - iommu_dma_invalidate_sg(sg, nents); |
---|
| 982 | + __invalidate_sg(sg, nents); |
---|
876 | 983 | return 0; |
---|
877 | 984 | } |
---|
878 | 985 | |
---|
879 | | -void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
---|
880 | | - enum dma_data_direction dir, unsigned long attrs) |
---|
| 986 | +static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
---|
| 987 | + int nents, enum dma_data_direction dir, unsigned long attrs) |
---|
881 | 988 | { |
---|
882 | 989 | dma_addr_t start, end; |
---|
883 | 990 | struct scatterlist *tmp; |
---|
884 | 991 | int i; |
---|
| 992 | + |
---|
| 993 | + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
---|
| 994 | + iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); |
---|
| 995 | + |
---|
885 | 996 | /* |
---|
886 | 997 | * The scatterlist segments are mapped into a single |
---|
887 | 998 | * contiguous IOVA allocation, so this is incredibly easy. |
---|
.. | .. |
---|
893 | 1004 | sg = tmp; |
---|
894 | 1005 | } |
---|
895 | 1006 | end = sg_dma_address(sg) + sg_dma_len(sg); |
---|
896 | | - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); |
---|
| 1007 | + __iommu_dma_unmap(dev, start, end - start); |
---|
897 | 1008 | } |
---|
898 | 1009 | |
---|
899 | | -dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
---|
| 1010 | +static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
---|
900 | 1011 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
901 | 1012 | { |
---|
902 | 1013 | return __iommu_dma_map(dev, phys, size, |
---|
903 | | - dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); |
---|
| 1014 | + dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, |
---|
| 1015 | + dma_get_mask(dev)); |
---|
904 | 1016 | } |
---|
905 | 1017 | |
---|
906 | | -void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
---|
| 1018 | +static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
---|
907 | 1019 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
---|
908 | 1020 | { |
---|
909 | | - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
---|
| 1021 | + __iommu_dma_unmap(dev, handle, size); |
---|
910 | 1022 | } |
---|
911 | 1023 | |
---|
912 | | -int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
| 1024 | +static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) |
---|
913 | 1025 | { |
---|
914 | | - return dma_addr == IOMMU_MAPPING_ERROR; |
---|
| 1026 | + size_t alloc_size = PAGE_ALIGN(size); |
---|
| 1027 | + int count = alloc_size >> PAGE_SHIFT; |
---|
| 1028 | + struct page *page = NULL, **pages = NULL; |
---|
| 1029 | + |
---|
| 1030 | + /* Non-coherent atomic allocation? Easy */ |
---|
| 1031 | + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
---|
| 1032 | + dma_free_from_pool(dev, cpu_addr, alloc_size)) |
---|
| 1033 | + return; |
---|
| 1034 | + |
---|
| 1035 | + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
---|
| 1036 | + /* |
---|
| 1037 | + * If it the address is remapped, then it's either non-coherent |
---|
| 1038 | + * or highmem CMA, or an iommu_dma_alloc_remap() construction. |
---|
| 1039 | + */ |
---|
| 1040 | + pages = dma_common_find_pages(cpu_addr); |
---|
| 1041 | + if (!pages) |
---|
| 1042 | + page = vmalloc_to_page(cpu_addr); |
---|
| 1043 | + dma_common_free_remap(cpu_addr, alloc_size); |
---|
| 1044 | + } else { |
---|
| 1045 | + /* Lowmem means a coherent atomic or CMA allocation */ |
---|
| 1046 | + page = virt_to_page(cpu_addr); |
---|
| 1047 | + } |
---|
| 1048 | + |
---|
| 1049 | + if (pages) |
---|
| 1050 | + __iommu_dma_free_pages(pages, count); |
---|
| 1051 | + if (page) |
---|
| 1052 | + dma_free_contiguous(dev, page, alloc_size); |
---|
| 1053 | +} |
---|
| 1054 | + |
---|
| 1055 | +static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, |
---|
| 1056 | + dma_addr_t handle, unsigned long attrs) |
---|
| 1057 | +{ |
---|
| 1058 | + __iommu_dma_unmap(dev, handle, size); |
---|
| 1059 | + __iommu_dma_free(dev, size, cpu_addr); |
---|
| 1060 | +} |
---|
| 1061 | + |
---|
| 1062 | +static void *iommu_dma_alloc_pages(struct device *dev, size_t size, |
---|
| 1063 | + struct page **pagep, gfp_t gfp, unsigned long attrs) |
---|
| 1064 | +{ |
---|
| 1065 | + bool coherent = dev_is_dma_coherent(dev); |
---|
| 1066 | + size_t alloc_size = PAGE_ALIGN(size); |
---|
| 1067 | + int node = dev_to_node(dev); |
---|
| 1068 | + struct page *page = NULL; |
---|
| 1069 | + void *cpu_addr; |
---|
| 1070 | + |
---|
| 1071 | + page = dma_alloc_contiguous(dev, alloc_size, gfp); |
---|
| 1072 | + if (!page) |
---|
| 1073 | + page = alloc_pages_node(node, gfp, get_order(alloc_size)); |
---|
| 1074 | + if (!page) |
---|
| 1075 | + return NULL; |
---|
| 1076 | + |
---|
| 1077 | + if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { |
---|
| 1078 | + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
---|
| 1079 | + |
---|
| 1080 | + cpu_addr = dma_common_contiguous_remap(page, alloc_size, |
---|
| 1081 | + prot, __builtin_return_address(0)); |
---|
| 1082 | + if (!cpu_addr) |
---|
| 1083 | + goto out_free_pages; |
---|
| 1084 | + |
---|
| 1085 | + if (!coherent) |
---|
| 1086 | + arch_dma_prep_coherent(page, size); |
---|
| 1087 | + } else { |
---|
| 1088 | + cpu_addr = page_address(page); |
---|
| 1089 | + } |
---|
| 1090 | + |
---|
| 1091 | + *pagep = page; |
---|
| 1092 | + memset(cpu_addr, 0, alloc_size); |
---|
| 1093 | + return cpu_addr; |
---|
| 1094 | +out_free_pages: |
---|
| 1095 | + dma_free_contiguous(dev, page, alloc_size); |
---|
| 1096 | + return NULL; |
---|
| 1097 | +} |
---|
| 1098 | + |
---|
| 1099 | +static void *iommu_dma_alloc(struct device *dev, size_t size, |
---|
| 1100 | + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
---|
| 1101 | +{ |
---|
| 1102 | + bool coherent = dev_is_dma_coherent(dev); |
---|
| 1103 | + int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); |
---|
| 1104 | + struct page *page = NULL; |
---|
| 1105 | + void *cpu_addr; |
---|
| 1106 | + |
---|
| 1107 | + gfp |= __GFP_ZERO; |
---|
| 1108 | + |
---|
| 1109 | + if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && |
---|
| 1110 | + !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { |
---|
| 1111 | + return iommu_dma_alloc_remap(dev, size, handle, gfp, |
---|
| 1112 | + dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); |
---|
| 1113 | + } |
---|
| 1114 | + |
---|
| 1115 | + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
---|
| 1116 | + !gfpflags_allow_blocking(gfp) && !coherent) |
---|
| 1117 | + page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, |
---|
| 1118 | + gfp, NULL); |
---|
| 1119 | + else |
---|
| 1120 | + cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); |
---|
| 1121 | + if (!cpu_addr) |
---|
| 1122 | + return NULL; |
---|
| 1123 | + |
---|
| 1124 | + *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, |
---|
| 1125 | + dev->coherent_dma_mask); |
---|
| 1126 | + if (*handle == DMA_MAPPING_ERROR) { |
---|
| 1127 | + __iommu_dma_free(dev, size, cpu_addr); |
---|
| 1128 | + return NULL; |
---|
| 1129 | + } |
---|
| 1130 | + |
---|
| 1131 | + return cpu_addr; |
---|
| 1132 | +} |
---|
| 1133 | + |
---|
| 1134 | +#ifdef CONFIG_DMA_REMAP |
---|
| 1135 | +static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size, |
---|
| 1136 | + dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp) |
---|
| 1137 | +{ |
---|
| 1138 | + if (!gfpflags_allow_blocking(gfp)) { |
---|
| 1139 | + struct page *page; |
---|
| 1140 | + |
---|
| 1141 | + page = dma_common_alloc_pages(dev, size, handle, dir, gfp); |
---|
| 1142 | + if (!page) |
---|
| 1143 | + return NULL; |
---|
| 1144 | + return page_address(page); |
---|
| 1145 | + } |
---|
| 1146 | + |
---|
| 1147 | + return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, |
---|
| 1148 | + PAGE_KERNEL, 0); |
---|
| 1149 | +} |
---|
| 1150 | + |
---|
| 1151 | +static void iommu_dma_free_noncoherent(struct device *dev, size_t size, |
---|
| 1152 | + void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir) |
---|
| 1153 | +{ |
---|
| 1154 | + __iommu_dma_unmap(dev, handle, size); |
---|
| 1155 | + __iommu_dma_free(dev, size, cpu_addr); |
---|
| 1156 | +} |
---|
| 1157 | +#else |
---|
| 1158 | +#define iommu_dma_alloc_noncoherent NULL |
---|
| 1159 | +#define iommu_dma_free_noncoherent NULL |
---|
| 1160 | +#endif /* CONFIG_DMA_REMAP */ |
---|
| 1161 | + |
---|
| 1162 | +static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
---|
| 1163 | + void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
| 1164 | + unsigned long attrs) |
---|
| 1165 | +{ |
---|
| 1166 | + unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
---|
| 1167 | + unsigned long pfn, off = vma->vm_pgoff; |
---|
| 1168 | + int ret; |
---|
| 1169 | + |
---|
| 1170 | + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
---|
| 1171 | + |
---|
| 1172 | + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
---|
| 1173 | + return ret; |
---|
| 1174 | + |
---|
| 1175 | + if (off >= nr_pages || vma_pages(vma) > nr_pages - off) |
---|
| 1176 | + return -ENXIO; |
---|
| 1177 | + |
---|
| 1178 | + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
---|
| 1179 | + struct page **pages = dma_common_find_pages(cpu_addr); |
---|
| 1180 | + |
---|
| 1181 | + if (pages) |
---|
| 1182 | + return __iommu_dma_mmap(pages, size, vma); |
---|
| 1183 | + pfn = vmalloc_to_pfn(cpu_addr); |
---|
| 1184 | + } else { |
---|
| 1185 | + pfn = page_to_pfn(virt_to_page(cpu_addr)); |
---|
| 1186 | + } |
---|
| 1187 | + |
---|
| 1188 | + return remap_pfn_range(vma, vma->vm_start, pfn + off, |
---|
| 1189 | + vma->vm_end - vma->vm_start, |
---|
| 1190 | + vma->vm_page_prot); |
---|
| 1191 | +} |
---|
| 1192 | + |
---|
| 1193 | +static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
---|
| 1194 | + void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
| 1195 | + unsigned long attrs) |
---|
| 1196 | +{ |
---|
| 1197 | + struct page *page; |
---|
| 1198 | + int ret; |
---|
| 1199 | + |
---|
| 1200 | + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
---|
| 1201 | + struct page **pages = dma_common_find_pages(cpu_addr); |
---|
| 1202 | + |
---|
| 1203 | + if (pages) { |
---|
| 1204 | + return sg_alloc_table_from_pages(sgt, pages, |
---|
| 1205 | + PAGE_ALIGN(size) >> PAGE_SHIFT, |
---|
| 1206 | + 0, size, GFP_KERNEL); |
---|
| 1207 | + } |
---|
| 1208 | + |
---|
| 1209 | + page = vmalloc_to_page(cpu_addr); |
---|
| 1210 | + } else { |
---|
| 1211 | + page = virt_to_page(cpu_addr); |
---|
| 1212 | + } |
---|
| 1213 | + |
---|
| 1214 | + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
---|
| 1215 | + if (!ret) |
---|
| 1216 | + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); |
---|
| 1217 | + return ret; |
---|
| 1218 | +} |
---|
| 1219 | + |
---|
| 1220 | +static unsigned long iommu_dma_get_merge_boundary(struct device *dev) |
---|
| 1221 | +{ |
---|
| 1222 | + struct iommu_domain *domain = iommu_get_dma_domain(dev); |
---|
| 1223 | + |
---|
| 1224 | + return (1UL << __ffs(domain->pgsize_bitmap)) - 1; |
---|
| 1225 | +} |
---|
| 1226 | + |
---|
| 1227 | +static const struct dma_map_ops iommu_dma_ops = { |
---|
| 1228 | + .alloc = iommu_dma_alloc, |
---|
| 1229 | + .free = iommu_dma_free, |
---|
| 1230 | + .alloc_pages = dma_common_alloc_pages, |
---|
| 1231 | + .free_pages = dma_common_free_pages, |
---|
| 1232 | + .alloc_noncoherent = iommu_dma_alloc_noncoherent, |
---|
| 1233 | + .free_noncoherent = iommu_dma_free_noncoherent, |
---|
| 1234 | + .mmap = iommu_dma_mmap, |
---|
| 1235 | + .get_sgtable = iommu_dma_get_sgtable, |
---|
| 1236 | + .map_page = iommu_dma_map_page, |
---|
| 1237 | + .unmap_page = iommu_dma_unmap_page, |
---|
| 1238 | + .map_sg = iommu_dma_map_sg, |
---|
| 1239 | + .unmap_sg = iommu_dma_unmap_sg, |
---|
| 1240 | + .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, |
---|
| 1241 | + .sync_single_for_device = iommu_dma_sync_single_for_device, |
---|
| 1242 | + .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, |
---|
| 1243 | + .sync_sg_for_device = iommu_dma_sync_sg_for_device, |
---|
| 1244 | + .map_resource = iommu_dma_map_resource, |
---|
| 1245 | + .unmap_resource = iommu_dma_unmap_resource, |
---|
| 1246 | + .get_merge_boundary = iommu_dma_get_merge_boundary, |
---|
| 1247 | +}; |
---|
| 1248 | + |
---|
| 1249 | +/* |
---|
| 1250 | + * The IOMMU core code allocates the default DMA domain, which the underlying |
---|
| 1251 | + * IOMMU driver needs to support via the dma-iommu layer. |
---|
| 1252 | + */ |
---|
| 1253 | +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) |
---|
| 1254 | +{ |
---|
| 1255 | + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
---|
| 1256 | + |
---|
| 1257 | + if (!domain) |
---|
| 1258 | + goto out_err; |
---|
| 1259 | + |
---|
| 1260 | + /* |
---|
| 1261 | + * The IOMMU core code allocates the default DMA domain, which the |
---|
| 1262 | + * underlying IOMMU driver needs to support via the dma-iommu layer. |
---|
| 1263 | + */ |
---|
| 1264 | + if (domain->type == IOMMU_DOMAIN_DMA) { |
---|
| 1265 | + if (iommu_dma_init_domain(domain, dma_base, size, dev)) |
---|
| 1266 | + goto out_err; |
---|
| 1267 | + dev->dma_ops = &iommu_dma_ops; |
---|
| 1268 | + } |
---|
| 1269 | + |
---|
| 1270 | + return; |
---|
| 1271 | +out_err: |
---|
| 1272 | + pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", |
---|
| 1273 | + dev_name(dev)); |
---|
915 | 1274 | } |
---|
916 | 1275 | |
---|
917 | 1276 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
---|
.. | .. |
---|
928 | 1287 | if (msi_page->phys == msi_addr) |
---|
929 | 1288 | return msi_page; |
---|
930 | 1289 | |
---|
931 | | - msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); |
---|
| 1290 | + msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); |
---|
932 | 1291 | if (!msi_page) |
---|
933 | 1292 | return NULL; |
---|
934 | 1293 | |
---|
935 | | - iova = __iommu_dma_map(dev, msi_addr, size, prot); |
---|
936 | | - if (iommu_dma_mapping_error(dev, iova)) |
---|
| 1294 | + iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
---|
| 1295 | + if (!iova) |
---|
937 | 1296 | goto out_free_page; |
---|
| 1297 | + |
---|
| 1298 | + if (iommu_map(domain, iova, msi_addr, size, prot)) |
---|
| 1299 | + goto out_free_iova; |
---|
938 | 1300 | |
---|
939 | 1301 | INIT_LIST_HEAD(&msi_page->list); |
---|
940 | 1302 | msi_page->phys = msi_addr; |
---|
.. | .. |
---|
942 | 1304 | list_add(&msi_page->list, &cookie->msi_page_list); |
---|
943 | 1305 | return msi_page; |
---|
944 | 1306 | |
---|
| 1307 | +out_free_iova: |
---|
| 1308 | + iommu_dma_free_iova(cookie, iova, size); |
---|
945 | 1309 | out_free_page: |
---|
946 | 1310 | kfree(msi_page); |
---|
947 | 1311 | return NULL; |
---|
948 | 1312 | } |
---|
949 | 1313 | |
---|
950 | | -void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) |
---|
| 1314 | +int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) |
---|
951 | 1315 | { |
---|
952 | | - struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); |
---|
| 1316 | + struct device *dev = msi_desc_to_dev(desc); |
---|
953 | 1317 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
---|
954 | | - struct iommu_dma_cookie *cookie; |
---|
955 | 1318 | struct iommu_dma_msi_page *msi_page; |
---|
956 | | - phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; |
---|
957 | | - unsigned long flags; |
---|
| 1319 | + static DEFINE_MUTEX(msi_prepare_lock); /* see below */ |
---|
958 | 1320 | |
---|
959 | | - if (!domain || !domain->iova_cookie) |
---|
960 | | - return; |
---|
961 | | - |
---|
962 | | - cookie = domain->iova_cookie; |
---|
| 1321 | + if (!domain || !domain->iova_cookie) { |
---|
| 1322 | + desc->iommu_cookie = NULL; |
---|
| 1323 | + return 0; |
---|
| 1324 | + } |
---|
963 | 1325 | |
---|
964 | 1326 | /* |
---|
965 | | - * We disable IRQs to rule out a possible inversion against |
---|
966 | | - * irq_desc_lock if, say, someone tries to retarget the affinity |
---|
967 | | - * of an MSI from within an IPI handler. |
---|
| 1327 | + * In fact the whole prepare operation should already be serialised by |
---|
| 1328 | + * irq_domain_mutex further up the callchain, but that's pretty subtle |
---|
| 1329 | + * on its own, so consider this locking as failsafe documentation... |
---|
968 | 1330 | */ |
---|
969 | | - spin_lock_irqsave(&cookie->msi_lock, flags); |
---|
| 1331 | + mutex_lock(&msi_prepare_lock); |
---|
970 | 1332 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); |
---|
971 | | - spin_unlock_irqrestore(&cookie->msi_lock, flags); |
---|
| 1333 | + mutex_unlock(&msi_prepare_lock); |
---|
972 | 1334 | |
---|
973 | | - if (WARN_ON(!msi_page)) { |
---|
974 | | - /* |
---|
975 | | - * We're called from a void callback, so the best we can do is |
---|
976 | | - * 'fail' by filling the message with obviously bogus values. |
---|
977 | | - * Since we got this far due to an IOMMU being present, it's |
---|
978 | | - * not like the existing address would have worked anyway... |
---|
979 | | - */ |
---|
980 | | - msg->address_hi = ~0U; |
---|
981 | | - msg->address_lo = ~0U; |
---|
982 | | - msg->data = ~0U; |
---|
983 | | - } else { |
---|
984 | | - msg->address_hi = upper_32_bits(msi_page->iova); |
---|
985 | | - msg->address_lo &= cookie_msi_granule(cookie) - 1; |
---|
986 | | - msg->address_lo += lower_32_bits(msi_page->iova); |
---|
987 | | - } |
---|
| 1335 | + msi_desc_set_iommu_cookie(desc, msi_page); |
---|
| 1336 | + |
---|
| 1337 | + if (!msi_page) |
---|
| 1338 | + return -ENOMEM; |
---|
| 1339 | + return 0; |
---|
988 | 1340 | } |
---|
| 1341 | + |
---|
| 1342 | +void iommu_dma_compose_msi_msg(struct msi_desc *desc, |
---|
| 1343 | + struct msi_msg *msg) |
---|
| 1344 | +{ |
---|
| 1345 | + struct device *dev = msi_desc_to_dev(desc); |
---|
| 1346 | + const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
---|
| 1347 | + const struct iommu_dma_msi_page *msi_page; |
---|
| 1348 | + |
---|
| 1349 | + msi_page = msi_desc_get_iommu_cookie(desc); |
---|
| 1350 | + |
---|
| 1351 | + if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) |
---|
| 1352 | + return; |
---|
| 1353 | + |
---|
| 1354 | + msg->address_hi = upper_32_bits(msi_page->iova); |
---|
| 1355 | + msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; |
---|
| 1356 | + msg->address_lo += lower_32_bits(msi_page->iova); |
---|
| 1357 | +} |
---|
| 1358 | + |
---|
| 1359 | +static int iommu_dma_init(void) |
---|
| 1360 | +{ |
---|
| 1361 | + return iova_cache_get(); |
---|
| 1362 | +} |
---|
| 1363 | +arch_initcall(iommu_dma_init); |
---|