.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2013 Red Hat |
---|
3 | 4 | * Author: Rob Clark <robdclark@gmail.com> |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify it |
---|
6 | | - * under the terms of the GNU General Public License version 2 as published by |
---|
7 | | - * the Free Software Foundation. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, but WITHOUT |
---|
10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
12 | | - * more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License along with |
---|
15 | | - * this program. If not, see <http://www.gnu.org/licenses/>. |
---|
16 | 5 | */ |
---|
17 | 6 | |
---|
| 7 | +#include <linux/dma-map-ops.h> |
---|
18 | 8 | #include <linux/spinlock.h> |
---|
19 | 9 | #include <linux/shmem_fs.h> |
---|
20 | 10 | #include <linux/dma-buf.h> |
---|
21 | 11 | #include <linux/pfn_t.h> |
---|
| 12 | + |
---|
| 13 | +#include <drm/drm_prime.h> |
---|
22 | 14 | |
---|
23 | 15 | #include "msm_drv.h" |
---|
24 | 16 | #include "msm_fence.h" |
---|
.. | .. |
---|
61 | 53 | { |
---|
62 | 54 | struct device *dev = msm_obj->base.dev->dev; |
---|
63 | 55 | |
---|
64 | | - if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { |
---|
65 | | - dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, |
---|
66 | | - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
---|
67 | | - } else { |
---|
68 | | - dma_map_sg(dev, msm_obj->sgt->sgl, |
---|
69 | | - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
---|
70 | | - } |
---|
| 56 | + dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); |
---|
71 | 57 | } |
---|
72 | 58 | |
---|
73 | 59 | static void sync_for_cpu(struct msm_gem_object *msm_obj) |
---|
74 | 60 | { |
---|
75 | 61 | struct device *dev = msm_obj->base.dev->dev; |
---|
76 | 62 | |
---|
77 | | - if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { |
---|
78 | | - dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, |
---|
79 | | - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
---|
80 | | - } else { |
---|
81 | | - dma_unmap_sg(dev, msm_obj->sgt->sgl, |
---|
82 | | - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); |
---|
83 | | - } |
---|
| 63 | + dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); |
---|
84 | 64 | } |
---|
85 | 65 | |
---|
86 | 66 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
---|
.. | .. |
---|
128 | 108 | p = get_pages_vram(obj, npages); |
---|
129 | 109 | |
---|
130 | 110 | if (IS_ERR(p)) { |
---|
131 | | - dev_err(dev->dev, "could not get pages: %ld\n", |
---|
| 111 | + DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", |
---|
132 | 112 | PTR_ERR(p)); |
---|
133 | 113 | return p; |
---|
134 | 114 | } |
---|
135 | 115 | |
---|
136 | 116 | msm_obj->pages = p; |
---|
137 | 117 | |
---|
138 | | - msm_obj->sgt = drm_prime_pages_to_sg(p, npages); |
---|
| 118 | + msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); |
---|
139 | 119 | if (IS_ERR(msm_obj->sgt)) { |
---|
140 | 120 | void *ptr = ERR_CAST(msm_obj->sgt); |
---|
141 | 121 | |
---|
142 | | - dev_err(dev->dev, "failed to allocate sgt\n"); |
---|
| 122 | + DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); |
---|
143 | 123 | msm_obj->sgt = NULL; |
---|
144 | 124 | return ptr; |
---|
145 | 125 | } |
---|
.. | .. |
---|
317 | 297 | ret = drm_gem_create_mmap_offset(obj); |
---|
318 | 298 | |
---|
319 | 299 | if (ret) { |
---|
320 | | - dev_err(dev->dev, "could not allocate mmap offset\n"); |
---|
| 300 | + DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); |
---|
321 | 301 | return 0; |
---|
322 | 302 | } |
---|
323 | 303 | |
---|
.. | .. |
---|
389 | 369 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
---|
390 | 370 | |
---|
391 | 371 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
---|
392 | | - msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); |
---|
| 372 | + if (vma->aspace) { |
---|
| 373 | + msm_gem_purge_vma(vma->aspace, vma); |
---|
| 374 | + msm_gem_close_vma(vma->aspace, vma); |
---|
| 375 | + } |
---|
393 | 376 | del_vma(vma); |
---|
394 | 377 | } |
---|
395 | 378 | } |
---|
396 | 379 | |
---|
397 | | -/* get iova, taking a reference. Should have a matching put */ |
---|
398 | | -int msm_gem_get_iova(struct drm_gem_object *obj, |
---|
399 | | - struct msm_gem_address_space *aspace, uint64_t *iova) |
---|
| 380 | +static int msm_gem_get_iova_locked(struct drm_gem_object *obj, |
---|
| 381 | + struct msm_gem_address_space *aspace, uint64_t *iova, |
---|
| 382 | + u64 range_start, u64 range_end) |
---|
400 | 383 | { |
---|
401 | 384 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
402 | 385 | struct msm_gem_vma *vma; |
---|
403 | 386 | int ret = 0; |
---|
404 | 387 | |
---|
405 | | - mutex_lock(&msm_obj->lock); |
---|
406 | | - |
---|
407 | | - if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
---|
408 | | - mutex_unlock(&msm_obj->lock); |
---|
409 | | - return -EBUSY; |
---|
410 | | - } |
---|
| 388 | + WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
---|
411 | 389 | |
---|
412 | 390 | vma = lookup_vma(obj, aspace); |
---|
413 | 391 | |
---|
414 | 392 | if (!vma) { |
---|
415 | | - struct page **pages; |
---|
416 | | - |
---|
417 | 393 | vma = add_vma(obj, aspace); |
---|
418 | | - if (IS_ERR(vma)) { |
---|
419 | | - ret = PTR_ERR(vma); |
---|
420 | | - goto unlock; |
---|
421 | | - } |
---|
| 394 | + if (IS_ERR(vma)) |
---|
| 395 | + return PTR_ERR(vma); |
---|
422 | 396 | |
---|
423 | | - pages = get_pages(obj); |
---|
424 | | - if (IS_ERR(pages)) { |
---|
425 | | - ret = PTR_ERR(pages); |
---|
426 | | - goto fail; |
---|
| 397 | + ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, |
---|
| 398 | + range_start, range_end); |
---|
| 399 | + if (ret) { |
---|
| 400 | + del_vma(vma); |
---|
| 401 | + return ret; |
---|
427 | 402 | } |
---|
428 | | - |
---|
429 | | - ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt, |
---|
430 | | - obj->size >> PAGE_SHIFT); |
---|
431 | | - if (ret) |
---|
432 | | - goto fail; |
---|
433 | 403 | } |
---|
434 | 404 | |
---|
435 | 405 | *iova = vma->iova; |
---|
436 | | - |
---|
437 | | - mutex_unlock(&msm_obj->lock); |
---|
438 | 406 | return 0; |
---|
| 407 | +} |
---|
439 | 408 | |
---|
440 | | -fail: |
---|
441 | | - del_vma(vma); |
---|
442 | | -unlock: |
---|
| 409 | +static int msm_gem_pin_iova(struct drm_gem_object *obj, |
---|
| 410 | + struct msm_gem_address_space *aspace) |
---|
| 411 | +{ |
---|
| 412 | + struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
| 413 | + struct msm_gem_vma *vma; |
---|
| 414 | + struct page **pages; |
---|
| 415 | + int prot = IOMMU_READ; |
---|
| 416 | + |
---|
| 417 | + if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) |
---|
| 418 | + prot |= IOMMU_WRITE; |
---|
| 419 | + |
---|
| 420 | + if (msm_obj->flags & MSM_BO_MAP_PRIV) |
---|
| 421 | + prot |= IOMMU_PRIV; |
---|
| 422 | + |
---|
| 423 | + WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
---|
| 424 | + |
---|
| 425 | + if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) |
---|
| 426 | + return -EBUSY; |
---|
| 427 | + |
---|
| 428 | + vma = lookup_vma(obj, aspace); |
---|
| 429 | + if (WARN_ON(!vma)) |
---|
| 430 | + return -EINVAL; |
---|
| 431 | + |
---|
| 432 | + pages = get_pages(obj); |
---|
| 433 | + if (IS_ERR(pages)) |
---|
| 434 | + return PTR_ERR(pages); |
---|
| 435 | + |
---|
| 436 | + return msm_gem_map_vma(aspace, vma, prot, |
---|
| 437 | + msm_obj->sgt, obj->size >> PAGE_SHIFT); |
---|
| 438 | +} |
---|
| 439 | + |
---|
| 440 | +/* |
---|
| 441 | + * get iova and pin it. Should have a matching put |
---|
| 442 | + * limits iova to specified range (in pages) |
---|
| 443 | + */ |
---|
| 444 | +int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, |
---|
| 445 | + struct msm_gem_address_space *aspace, uint64_t *iova, |
---|
| 446 | + u64 range_start, u64 range_end) |
---|
| 447 | +{ |
---|
| 448 | + struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
| 449 | + u64 local; |
---|
| 450 | + int ret; |
---|
| 451 | + |
---|
| 452 | + mutex_lock(&msm_obj->lock); |
---|
| 453 | + |
---|
| 454 | + ret = msm_gem_get_iova_locked(obj, aspace, &local, |
---|
| 455 | + range_start, range_end); |
---|
| 456 | + |
---|
| 457 | + if (!ret) |
---|
| 458 | + ret = msm_gem_pin_iova(obj, aspace); |
---|
| 459 | + |
---|
| 460 | + if (!ret) |
---|
| 461 | + *iova = local; |
---|
| 462 | + |
---|
443 | 463 | mutex_unlock(&msm_obj->lock); |
---|
444 | 464 | return ret; |
---|
445 | 465 | } |
---|
446 | 466 | |
---|
| 467 | +/* get iova and pin it. Should have a matching put */ |
---|
| 468 | +int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, |
---|
| 469 | + struct msm_gem_address_space *aspace, uint64_t *iova) |
---|
| 470 | +{ |
---|
| 471 | + return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); |
---|
| 472 | +} |
---|
| 473 | + |
---|
| 474 | +/* |
---|
| 475 | + * Get an iova but don't pin it. Doesn't need a put because iovas are currently |
---|
| 476 | + * valid for the life of the object |
---|
| 477 | + */ |
---|
| 478 | +int msm_gem_get_iova(struct drm_gem_object *obj, |
---|
| 479 | + struct msm_gem_address_space *aspace, uint64_t *iova) |
---|
| 480 | +{ |
---|
| 481 | + struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
| 482 | + int ret; |
---|
| 483 | + |
---|
| 484 | + mutex_lock(&msm_obj->lock); |
---|
| 485 | + ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX); |
---|
| 486 | + mutex_unlock(&msm_obj->lock); |
---|
| 487 | + |
---|
| 488 | + return ret; |
---|
| 489 | +} |
---|
| 490 | + |
---|
447 | 491 | /* get iova without taking a reference, used in places where you have |
---|
448 | | - * already done a 'msm_gem_get_iova()'. |
---|
| 492 | + * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' |
---|
449 | 493 | */ |
---|
450 | 494 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
---|
451 | 495 | struct msm_gem_address_space *aspace) |
---|
.. | .. |
---|
461 | 505 | return vma ? vma->iova : 0; |
---|
462 | 506 | } |
---|
463 | 507 | |
---|
464 | | -void msm_gem_put_iova(struct drm_gem_object *obj, |
---|
| 508 | +/* |
---|
| 509 | + * Unpin a iova by updating the reference counts. The memory isn't actually |
---|
| 510 | + * purged until something else (shrinker, mm_notifier, destroy, etc) decides |
---|
| 511 | + * to get rid of it |
---|
| 512 | + */ |
---|
| 513 | +void msm_gem_unpin_iova(struct drm_gem_object *obj, |
---|
465 | 514 | struct msm_gem_address_space *aspace) |
---|
466 | 515 | { |
---|
467 | | - // XXX TODO .. |
---|
468 | | - // NOTE: probably don't need a _locked() version.. we wouldn't |
---|
469 | | - // normally unmap here, but instead just mark that it could be |
---|
470 | | - // unmapped (if the iova refcnt drops to zero), but then later |
---|
471 | | - // if another _get_iova_locked() fails we can start unmapping |
---|
472 | | - // things that are no longer needed.. |
---|
| 516 | + struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
| 517 | + struct msm_gem_vma *vma; |
---|
| 518 | + |
---|
| 519 | + mutex_lock(&msm_obj->lock); |
---|
| 520 | + vma = lookup_vma(obj, aspace); |
---|
| 521 | + |
---|
| 522 | + if (!WARN_ON(!vma)) |
---|
| 523 | + msm_gem_unmap_vma(aspace, vma); |
---|
| 524 | + |
---|
| 525 | + mutex_unlock(&msm_obj->lock); |
---|
473 | 526 | } |
---|
474 | 527 | |
---|
475 | 528 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
---|
.. | .. |
---|
478 | 531 | args->pitch = align_pitch(args->width, args->bpp); |
---|
479 | 532 | args->size = PAGE_ALIGN(args->pitch * args->height); |
---|
480 | 533 | return msm_gem_new_handle(dev, file, args->size, |
---|
481 | | - MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); |
---|
| 534 | + MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); |
---|
482 | 535 | } |
---|
483 | 536 | |
---|
484 | 537 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
---|
.. | .. |
---|
496 | 549 | |
---|
497 | 550 | *offset = msm_gem_mmap_offset(obj); |
---|
498 | 551 | |
---|
499 | | - drm_gem_object_put_unlocked(obj); |
---|
| 552 | + drm_gem_object_put(obj); |
---|
500 | 553 | |
---|
501 | 554 | fail: |
---|
502 | 555 | return ret; |
---|
.. | .. |
---|
507 | 560 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
508 | 561 | int ret = 0; |
---|
509 | 562 | |
---|
| 563 | + if (obj->import_attach) |
---|
| 564 | + return ERR_PTR(-ENODEV); |
---|
| 565 | + |
---|
510 | 566 | mutex_lock(&msm_obj->lock); |
---|
511 | 567 | |
---|
512 | 568 | if (WARN_ON(msm_obj->madv > madv)) { |
---|
513 | | - dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n", |
---|
| 569 | + DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", |
---|
514 | 570 | msm_obj->madv, madv); |
---|
515 | 571 | mutex_unlock(&msm_obj->lock); |
---|
516 | 572 | return ERR_PTR(-EBUSY); |
---|
.. | .. |
---|
655 | 711 | int msm_gem_sync_object(struct drm_gem_object *obj, |
---|
656 | 712 | struct msm_fence_context *fctx, bool exclusive) |
---|
657 | 713 | { |
---|
658 | | - struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
659 | | - struct reservation_object_list *fobj; |
---|
| 714 | + struct dma_resv_list *fobj; |
---|
660 | 715 | struct dma_fence *fence; |
---|
661 | 716 | int i, ret; |
---|
662 | 717 | |
---|
663 | | - fobj = reservation_object_get_list(msm_obj->resv); |
---|
| 718 | + fobj = dma_resv_get_list(obj->resv); |
---|
664 | 719 | if (!fobj || (fobj->shared_count == 0)) { |
---|
665 | | - fence = reservation_object_get_excl(msm_obj->resv); |
---|
| 720 | + fence = dma_resv_get_excl(obj->resv); |
---|
666 | 721 | /* don't need to wait on our own fences, since ring is fifo */ |
---|
667 | 722 | if (fence && (fence->context != fctx->context)) { |
---|
668 | 723 | ret = dma_fence_wait(fence, true); |
---|
.. | .. |
---|
676 | 731 | |
---|
677 | 732 | for (i = 0; i < fobj->shared_count; i++) { |
---|
678 | 733 | fence = rcu_dereference_protected(fobj->shared[i], |
---|
679 | | - reservation_object_held(msm_obj->resv)); |
---|
| 734 | + dma_resv_held(obj->resv)); |
---|
680 | 735 | if (fence->context != fctx->context) { |
---|
681 | 736 | ret = dma_fence_wait(fence, true); |
---|
682 | 737 | if (ret) |
---|
.. | .. |
---|
687 | 742 | return 0; |
---|
688 | 743 | } |
---|
689 | 744 | |
---|
690 | | -void msm_gem_move_to_active(struct drm_gem_object *obj, |
---|
691 | | - struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
---|
| 745 | +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) |
---|
692 | 746 | { |
---|
693 | 747 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
| 748 | + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
---|
694 | 749 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
---|
695 | | - msm_obj->gpu = gpu; |
---|
696 | | - if (exclusive) |
---|
697 | | - reservation_object_add_excl_fence(msm_obj->resv, fence); |
---|
698 | | - else |
---|
699 | | - reservation_object_add_shared_fence(msm_obj->resv, fence); |
---|
700 | | - list_del_init(&msm_obj->mm_list); |
---|
701 | | - list_add_tail(&msm_obj->mm_list, &gpu->active_list); |
---|
| 750 | + |
---|
| 751 | + if (!atomic_fetch_inc(&msm_obj->active_count)) { |
---|
| 752 | + msm_obj->gpu = gpu; |
---|
| 753 | + list_del_init(&msm_obj->mm_list); |
---|
| 754 | + list_add_tail(&msm_obj->mm_list, &gpu->active_list); |
---|
| 755 | + } |
---|
702 | 756 | } |
---|
703 | 757 | |
---|
704 | | -void msm_gem_move_to_inactive(struct drm_gem_object *obj) |
---|
| 758 | +void msm_gem_active_put(struct drm_gem_object *obj) |
---|
705 | 759 | { |
---|
706 | | - struct drm_device *dev = obj->dev; |
---|
707 | | - struct msm_drm_private *priv = dev->dev_private; |
---|
708 | 760 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
| 761 | + struct msm_drm_private *priv = obj->dev->dev_private; |
---|
709 | 762 | |
---|
710 | | - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
---|
| 763 | + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
---|
711 | 764 | |
---|
712 | | - msm_obj->gpu = NULL; |
---|
713 | | - list_del_init(&msm_obj->mm_list); |
---|
714 | | - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
---|
| 765 | + if (!atomic_dec_return(&msm_obj->active_count)) { |
---|
| 766 | + msm_obj->gpu = NULL; |
---|
| 767 | + list_del_init(&msm_obj->mm_list); |
---|
| 768 | + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
---|
| 769 | + } |
---|
715 | 770 | } |
---|
716 | 771 | |
---|
717 | 772 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
---|
718 | 773 | { |
---|
719 | | - struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
720 | 774 | bool write = !!(op & MSM_PREP_WRITE); |
---|
721 | 775 | unsigned long remain = |
---|
722 | 776 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); |
---|
723 | 777 | long ret; |
---|
724 | 778 | |
---|
725 | | - ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, |
---|
| 779 | + ret = dma_resv_wait_timeout_rcu(obj->resv, write, |
---|
726 | 780 | true, remain); |
---|
727 | 781 | if (ret == 0) |
---|
728 | 782 | return remain == 0 ? -EBUSY : -ETIMEDOUT; |
---|
.. | .. |
---|
745 | 799 | struct seq_file *m) |
---|
746 | 800 | { |
---|
747 | 801 | if (!dma_fence_is_signaled(fence)) |
---|
748 | | - seq_printf(m, "\t%9s: %s %s seq %u\n", type, |
---|
| 802 | + seq_printf(m, "\t%9s: %s %s seq %llu\n", type, |
---|
749 | 803 | fence->ops->get_driver_name(fence), |
---|
750 | 804 | fence->ops->get_timeline_name(fence), |
---|
751 | 805 | fence->seqno); |
---|
.. | .. |
---|
754 | 808 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
---|
755 | 809 | { |
---|
756 | 810 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
757 | | - struct reservation_object *robj = msm_obj->resv; |
---|
758 | | - struct reservation_object_list *fobj; |
---|
| 811 | + struct dma_resv *robj = obj->resv; |
---|
| 812 | + struct dma_resv_list *fobj; |
---|
759 | 813 | struct dma_fence *fence; |
---|
760 | 814 | struct msm_gem_vma *vma; |
---|
761 | 815 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
---|
.. | .. |
---|
776 | 830 | break; |
---|
777 | 831 | } |
---|
778 | 832 | |
---|
779 | | - seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", |
---|
| 833 | + seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", |
---|
780 | 834 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
---|
781 | 835 | obj->name, kref_read(&obj->refcount), |
---|
782 | 836 | off, msm_obj->vaddr); |
---|
783 | 837 | |
---|
784 | | - /* FIXME: we need to print the address space here too */ |
---|
785 | | - list_for_each_entry(vma, &msm_obj->vmas, list) |
---|
786 | | - seq_printf(m, " %08llx", vma->iova); |
---|
| 838 | + seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); |
---|
787 | 839 | |
---|
788 | | - seq_printf(m, " %zu%s\n", obj->size, madv); |
---|
| 840 | + if (!list_empty(&msm_obj->vmas)) { |
---|
| 841 | + |
---|
| 842 | + seq_puts(m, " vmas:"); |
---|
| 843 | + |
---|
| 844 | + list_for_each_entry(vma, &msm_obj->vmas, list) { |
---|
| 845 | + const char *name, *comm; |
---|
| 846 | + if (vma->aspace) { |
---|
| 847 | + struct msm_gem_address_space *aspace = vma->aspace; |
---|
| 848 | + struct task_struct *task = |
---|
| 849 | + get_pid_task(aspace->pid, PIDTYPE_PID); |
---|
| 850 | + if (task) { |
---|
| 851 | + comm = kstrdup(task->comm, GFP_KERNEL); |
---|
| 852 | + put_task_struct(task); |
---|
| 853 | + } else { |
---|
| 854 | + comm = NULL; |
---|
| 855 | + } |
---|
| 856 | + name = aspace->name; |
---|
| 857 | + } else { |
---|
| 858 | + name = comm = NULL; |
---|
| 859 | + } |
---|
| 860 | + seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", |
---|
| 861 | + name, comm ? ":" : "", comm ? comm : "", |
---|
| 862 | + vma->aspace, vma->iova, |
---|
| 863 | + vma->mapped ? "mapped" : "unmapped", |
---|
| 864 | + vma->inuse); |
---|
| 865 | + kfree(comm); |
---|
| 866 | + } |
---|
| 867 | + |
---|
| 868 | + seq_puts(m, "\n"); |
---|
| 869 | + } |
---|
789 | 870 | |
---|
790 | 871 | rcu_read_lock(); |
---|
791 | 872 | fobj = rcu_dereference(robj->fence); |
---|
.. | .. |
---|
812 | 893 | int count = 0; |
---|
813 | 894 | size_t size = 0; |
---|
814 | 895 | |
---|
| 896 | + seq_puts(m, " flags id ref offset kaddr size madv name\n"); |
---|
815 | 897 | list_for_each_entry(msm_obj, list, mm_list) { |
---|
816 | 898 | struct drm_gem_object *obj = &msm_obj->base; |
---|
817 | | - seq_printf(m, " "); |
---|
| 899 | + seq_puts(m, " "); |
---|
818 | 900 | msm_gem_describe(obj, m); |
---|
819 | 901 | count++; |
---|
820 | 902 | size += obj->size; |
---|
.. | .. |
---|
824 | 906 | } |
---|
825 | 907 | #endif |
---|
826 | 908 | |
---|
827 | | -/* don't call directly! Use drm_gem_object_put() and friends */ |
---|
| 909 | +/* don't call directly! Use drm_gem_object_put_locked() and friends */ |
---|
828 | 910 | void msm_gem_free_object(struct drm_gem_object *obj) |
---|
829 | 911 | { |
---|
830 | | - struct drm_device *dev = obj->dev; |
---|
831 | 912 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
| 913 | + struct drm_device *dev = obj->dev; |
---|
| 914 | + struct msm_drm_private *priv = dev->dev_private; |
---|
| 915 | + |
---|
| 916 | + if (llist_add(&msm_obj->freed, &priv->free_list)) |
---|
| 917 | + queue_work(priv->wq, &priv->free_work); |
---|
| 918 | +} |
---|
| 919 | + |
---|
| 920 | +static void free_object(struct msm_gem_object *msm_obj) |
---|
| 921 | +{ |
---|
| 922 | + struct drm_gem_object *obj = &msm_obj->base; |
---|
| 923 | + struct drm_device *dev = obj->dev; |
---|
832 | 924 | |
---|
833 | 925 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
---|
834 | 926 | |
---|
.. | .. |
---|
842 | 934 | put_iova(obj); |
---|
843 | 935 | |
---|
844 | 936 | if (obj->import_attach) { |
---|
845 | | - if (msm_obj->vaddr) |
---|
846 | | - dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); |
---|
| 937 | + WARN_ON(msm_obj->vaddr); |
---|
847 | 938 | |
---|
848 | 939 | /* Don't drop the pages for imported dmabuf, as they are not |
---|
849 | 940 | * ours, just free the array we allocated: |
---|
.. | .. |
---|
857 | 948 | put_pages(obj); |
---|
858 | 949 | } |
---|
859 | 950 | |
---|
860 | | - if (msm_obj->resv == &msm_obj->_resv) |
---|
861 | | - reservation_object_fini(msm_obj->resv); |
---|
862 | | - |
---|
863 | 951 | drm_gem_object_release(obj); |
---|
864 | 952 | |
---|
865 | 953 | mutex_unlock(&msm_obj->lock); |
---|
866 | 954 | kfree(msm_obj); |
---|
867 | 955 | } |
---|
868 | 956 | |
---|
| 957 | +void msm_gem_free_work(struct work_struct *work) |
---|
| 958 | +{ |
---|
| 959 | + struct msm_drm_private *priv = |
---|
| 960 | + container_of(work, struct msm_drm_private, free_work); |
---|
| 961 | + struct drm_device *dev = priv->dev; |
---|
| 962 | + struct llist_node *freed; |
---|
| 963 | + struct msm_gem_object *msm_obj, *next; |
---|
| 964 | + |
---|
| 965 | + while ((freed = llist_del_all(&priv->free_list))) { |
---|
| 966 | + |
---|
| 967 | + mutex_lock(&dev->struct_mutex); |
---|
| 968 | + |
---|
| 969 | + llist_for_each_entry_safe(msm_obj, next, |
---|
| 970 | + freed, freed) |
---|
| 971 | + free_object(msm_obj); |
---|
| 972 | + |
---|
| 973 | + mutex_unlock(&dev->struct_mutex); |
---|
| 974 | + |
---|
| 975 | + if (need_resched()) |
---|
| 976 | + break; |
---|
| 977 | + } |
---|
| 978 | +} |
---|
| 979 | + |
---|
869 | 980 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
---|
870 | 981 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
---|
871 | | - uint32_t size, uint32_t flags, uint32_t *handle) |
---|
| 982 | + uint32_t size, uint32_t flags, uint32_t *handle, |
---|
| 983 | + char *name) |
---|
872 | 984 | { |
---|
873 | 985 | struct drm_gem_object *obj; |
---|
874 | 986 | int ret; |
---|
.. | .. |
---|
878 | 990 | if (IS_ERR(obj)) |
---|
879 | 991 | return PTR_ERR(obj); |
---|
880 | 992 | |
---|
| 993 | + if (name) |
---|
| 994 | + msm_gem_object_set_name(obj, "%s", name); |
---|
| 995 | + |
---|
881 | 996 | ret = drm_gem_handle_create(file, obj, handle); |
---|
882 | 997 | |
---|
883 | 998 | /* drop reference from allocate - handle holds it now */ |
---|
884 | | - drm_gem_object_put_unlocked(obj); |
---|
| 999 | + drm_gem_object_put(obj); |
---|
885 | 1000 | |
---|
886 | 1001 | return ret; |
---|
887 | 1002 | } |
---|
888 | 1003 | |
---|
889 | 1004 | static int msm_gem_new_impl(struct drm_device *dev, |
---|
890 | 1005 | uint32_t size, uint32_t flags, |
---|
891 | | - struct reservation_object *resv, |
---|
892 | | - struct drm_gem_object **obj, |
---|
893 | | - bool struct_mutex_locked) |
---|
| 1006 | + struct drm_gem_object **obj) |
---|
894 | 1007 | { |
---|
895 | | - struct msm_drm_private *priv = dev->dev_private; |
---|
896 | 1008 | struct msm_gem_object *msm_obj; |
---|
897 | 1009 | |
---|
898 | 1010 | switch (flags & MSM_BO_CACHE_MASK) { |
---|
.. | .. |
---|
901 | 1013 | case MSM_BO_WC: |
---|
902 | 1014 | break; |
---|
903 | 1015 | default: |
---|
904 | | - dev_err(dev->dev, "invalid cache flag: %x\n", |
---|
| 1016 | + DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", |
---|
905 | 1017 | (flags & MSM_BO_CACHE_MASK)); |
---|
906 | 1018 | return -EINVAL; |
---|
907 | 1019 | } |
---|
.. | .. |
---|
915 | 1027 | msm_obj->flags = flags; |
---|
916 | 1028 | msm_obj->madv = MSM_MADV_WILLNEED; |
---|
917 | 1029 | |
---|
918 | | - if (resv) { |
---|
919 | | - msm_obj->resv = resv; |
---|
920 | | - } else { |
---|
921 | | - msm_obj->resv = &msm_obj->_resv; |
---|
922 | | - reservation_object_init(msm_obj->resv); |
---|
923 | | - } |
---|
924 | | - |
---|
925 | 1030 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
---|
926 | 1031 | INIT_LIST_HEAD(&msm_obj->vmas); |
---|
927 | | - |
---|
928 | | - if (struct_mutex_locked) { |
---|
929 | | - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
---|
930 | | - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
---|
931 | | - } else { |
---|
932 | | - mutex_lock(&dev->struct_mutex); |
---|
933 | | - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
---|
934 | | - mutex_unlock(&dev->struct_mutex); |
---|
935 | | - } |
---|
936 | 1032 | |
---|
937 | 1033 | *obj = &msm_obj->base; |
---|
938 | 1034 | |
---|
.. | .. |
---|
943 | 1039 | uint32_t size, uint32_t flags, bool struct_mutex_locked) |
---|
944 | 1040 | { |
---|
945 | 1041 | struct msm_drm_private *priv = dev->dev_private; |
---|
| 1042 | + struct msm_gem_object *msm_obj; |
---|
946 | 1043 | struct drm_gem_object *obj = NULL; |
---|
947 | 1044 | bool use_vram = false; |
---|
948 | 1045 | int ret; |
---|
949 | 1046 | |
---|
950 | 1047 | size = PAGE_ALIGN(size); |
---|
951 | 1048 | |
---|
952 | | - if (!iommu_present(&platform_bus_type)) |
---|
| 1049 | + if (!msm_use_mmu(dev)) |
---|
953 | 1050 | use_vram = true; |
---|
954 | | - else if ((flags & MSM_BO_STOLEN) && priv->vram.size) |
---|
| 1051 | + else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) |
---|
955 | 1052 | use_vram = true; |
---|
956 | 1053 | |
---|
957 | 1054 | if (WARN_ON(use_vram && !priv->vram.size)) |
---|
.. | .. |
---|
963 | 1060 | if (size == 0) |
---|
964 | 1061 | return ERR_PTR(-EINVAL); |
---|
965 | 1062 | |
---|
966 | | - ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); |
---|
| 1063 | + ret = msm_gem_new_impl(dev, size, flags, &obj); |
---|
967 | 1064 | if (ret) |
---|
968 | 1065 | return ERR_PTR(ret); |
---|
| 1066 | + |
---|
| 1067 | + msm_obj = to_msm_bo(obj); |
---|
969 | 1068 | |
---|
970 | 1069 | if (use_vram) { |
---|
971 | 1070 | struct msm_gem_vma *vma; |
---|
972 | 1071 | struct page **pages; |
---|
973 | | - struct msm_gem_object *msm_obj = to_msm_bo(obj); |
---|
974 | 1072 | |
---|
975 | 1073 | mutex_lock(&msm_obj->lock); |
---|
976 | 1074 | |
---|
.. | .. |
---|
996 | 1094 | ret = drm_gem_object_init(dev, obj, size); |
---|
997 | 1095 | if (ret) |
---|
998 | 1096 | goto fail; |
---|
| 1097 | + /* |
---|
| 1098 | + * Our buffers are kept pinned, so allocating them from the |
---|
| 1099 | + * MOVABLE zone is a really bad idea, and conflicts with CMA. |
---|
| 1100 | + * See comments above new_inode() why this is required _and_ |
---|
| 1101 | + * expected if you're going to pin these pages. |
---|
| 1102 | + */ |
---|
| 1103 | + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); |
---|
| 1104 | + } |
---|
| 1105 | + |
---|
| 1106 | + if (struct_mutex_locked) { |
---|
| 1107 | + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
---|
| 1108 | + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
---|
| 1109 | + } else { |
---|
| 1110 | + mutex_lock(&dev->struct_mutex); |
---|
| 1111 | + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
---|
| 1112 | + mutex_unlock(&dev->struct_mutex); |
---|
999 | 1113 | } |
---|
1000 | 1114 | |
---|
1001 | 1115 | return obj; |
---|
1002 | 1116 | |
---|
1003 | 1117 | fail: |
---|
1004 | | - drm_gem_object_put_unlocked(obj); |
---|
| 1118 | + drm_gem_object_put(obj); |
---|
1005 | 1119 | return ERR_PTR(ret); |
---|
1006 | 1120 | } |
---|
1007 | 1121 | |
---|
.. | .. |
---|
1020 | 1134 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
---|
1021 | 1135 | struct dma_buf *dmabuf, struct sg_table *sgt) |
---|
1022 | 1136 | { |
---|
| 1137 | + struct msm_drm_private *priv = dev->dev_private; |
---|
1023 | 1138 | struct msm_gem_object *msm_obj; |
---|
1024 | 1139 | struct drm_gem_object *obj; |
---|
1025 | 1140 | uint32_t size; |
---|
1026 | 1141 | int ret, npages; |
---|
1027 | 1142 | |
---|
1028 | 1143 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
---|
1029 | | - if (!iommu_present(&platform_bus_type)) { |
---|
1030 | | - dev_err(dev->dev, "cannot import without IOMMU\n"); |
---|
| 1144 | + if (!msm_use_mmu(dev)) { |
---|
| 1145 | + DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); |
---|
1031 | 1146 | return ERR_PTR(-EINVAL); |
---|
1032 | 1147 | } |
---|
1033 | 1148 | |
---|
1034 | 1149 | size = PAGE_ALIGN(dmabuf->size); |
---|
1035 | 1150 | |
---|
1036 | | - ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); |
---|
| 1151 | + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); |
---|
1037 | 1152 | if (ret) |
---|
1038 | 1153 | return ERR_PTR(ret); |
---|
1039 | 1154 | |
---|
.. | .. |
---|
1058 | 1173 | } |
---|
1059 | 1174 | |
---|
1060 | 1175 | mutex_unlock(&msm_obj->lock); |
---|
| 1176 | + |
---|
| 1177 | + mutex_lock(&dev->struct_mutex); |
---|
| 1178 | + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
---|
| 1179 | + mutex_unlock(&dev->struct_mutex); |
---|
| 1180 | + |
---|
1061 | 1181 | return obj; |
---|
1062 | 1182 | |
---|
1063 | 1183 | fail: |
---|
1064 | | - drm_gem_object_put_unlocked(obj); |
---|
| 1184 | + drm_gem_object_put(obj); |
---|
1065 | 1185 | return ERR_PTR(ret); |
---|
1066 | 1186 | } |
---|
1067 | 1187 | |
---|
.. | .. |
---|
1077 | 1197 | return ERR_CAST(obj); |
---|
1078 | 1198 | |
---|
1079 | 1199 | if (iova) { |
---|
1080 | | - ret = msm_gem_get_iova(obj, aspace, iova); |
---|
1081 | | - if (ret) { |
---|
1082 | | - drm_gem_object_put(obj); |
---|
1083 | | - return ERR_PTR(ret); |
---|
1084 | | - } |
---|
| 1200 | + ret = msm_gem_get_and_pin_iova(obj, aspace, iova); |
---|
| 1201 | + if (ret) |
---|
| 1202 | + goto err; |
---|
1085 | 1203 | } |
---|
1086 | 1204 | |
---|
1087 | 1205 | vaddr = msm_gem_get_vaddr(obj); |
---|
1088 | 1206 | if (IS_ERR(vaddr)) { |
---|
1089 | | - msm_gem_put_iova(obj, aspace); |
---|
1090 | | - drm_gem_object_put(obj); |
---|
1091 | | - return ERR_CAST(vaddr); |
---|
| 1207 | + msm_gem_unpin_iova(obj, aspace); |
---|
| 1208 | + ret = PTR_ERR(vaddr); |
---|
| 1209 | + goto err; |
---|
1092 | 1210 | } |
---|
1093 | 1211 | |
---|
1094 | 1212 | if (bo) |
---|
1095 | 1213 | *bo = obj; |
---|
1096 | 1214 | |
---|
1097 | 1215 | return vaddr; |
---|
| 1216 | +err: |
---|
| 1217 | + if (locked) |
---|
| 1218 | + drm_gem_object_put_locked(obj); |
---|
| 1219 | + else |
---|
| 1220 | + drm_gem_object_put(obj); |
---|
| 1221 | + |
---|
| 1222 | + return ERR_PTR(ret); |
---|
| 1223 | + |
---|
1098 | 1224 | } |
---|
1099 | 1225 | |
---|
1100 | 1226 | void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, |
---|
.. | .. |
---|
1110 | 1236 | { |
---|
1111 | 1237 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); |
---|
1112 | 1238 | } |
---|
| 1239 | + |
---|
| 1240 | +void msm_gem_kernel_put(struct drm_gem_object *bo, |
---|
| 1241 | + struct msm_gem_address_space *aspace, bool locked) |
---|
| 1242 | +{ |
---|
| 1243 | + if (IS_ERR_OR_NULL(bo)) |
---|
| 1244 | + return; |
---|
| 1245 | + |
---|
| 1246 | + msm_gem_put_vaddr(bo); |
---|
| 1247 | + msm_gem_unpin_iova(bo, aspace); |
---|
| 1248 | + |
---|
| 1249 | + if (locked) |
---|
| 1250 | + drm_gem_object_put_locked(bo); |
---|
| 1251 | + else |
---|
| 1252 | + drm_gem_object_put(bo); |
---|
| 1253 | +} |
---|
| 1254 | + |
---|
| 1255 | +void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) |
---|
| 1256 | +{ |
---|
| 1257 | + struct msm_gem_object *msm_obj = to_msm_bo(bo); |
---|
| 1258 | + va_list ap; |
---|
| 1259 | + |
---|
| 1260 | + if (!fmt) |
---|
| 1261 | + return; |
---|
| 1262 | + |
---|
| 1263 | + va_start(ap, fmt); |
---|
| 1264 | + vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); |
---|
| 1265 | + va_end(ap); |
---|
| 1266 | +} |
---|