.. | .. |
---|
3 | 3 | * Copyright (C) 2015-2018 Etnaviv Project |
---|
4 | 4 | */ |
---|
5 | 5 | |
---|
6 | | -#include <linux/spinlock.h> |
---|
| 6 | +#include <drm/drm_prime.h> |
---|
| 7 | +#include <linux/dma-mapping.h> |
---|
7 | 8 | #include <linux/shmem_fs.h> |
---|
8 | | -#include <linux/sched/mm.h> |
---|
9 | | -#include <linux/sched/task.h> |
---|
| 9 | +#include <linux/spinlock.h> |
---|
| 10 | +#include <linux/vmalloc.h> |
---|
10 | 11 | |
---|
11 | 12 | #include "etnaviv_drv.h" |
---|
12 | 13 | #include "etnaviv_gem.h" |
---|
.. | .. |
---|
26 | 27 | * because display controller, GPU, etc. are not coherent. |
---|
27 | 28 | */ |
---|
28 | 29 | if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) |
---|
29 | | - dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); |
---|
| 30 | + dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); |
---|
30 | 31 | } |
---|
31 | 32 | |
---|
32 | 33 | static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) |
---|
.. | .. |
---|
50 | 51 | * discard those writes. |
---|
51 | 52 | */ |
---|
52 | 53 | if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) |
---|
53 | | - dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); |
---|
| 54 | + dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); |
---|
54 | 55 | } |
---|
55 | 56 | |
---|
56 | 57 | /* called with etnaviv_obj->lock held */ |
---|
.. | .. |
---|
102 | 103 | int npages = etnaviv_obj->base.size >> PAGE_SHIFT; |
---|
103 | 104 | struct sg_table *sgt; |
---|
104 | 105 | |
---|
105 | | - sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); |
---|
| 106 | + sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev, |
---|
| 107 | + etnaviv_obj->pages, npages); |
---|
106 | 108 | if (IS_ERR(sgt)) { |
---|
107 | 109 | dev_err(dev->dev, "failed to allocate sgt: %ld\n", |
---|
108 | 110 | PTR_ERR(sgt)); |
---|
.. | .. |
---|
222 | 224 | |
---|
223 | 225 | static struct etnaviv_vram_mapping * |
---|
224 | 226 | etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, |
---|
225 | | - struct etnaviv_iommu *mmu) |
---|
| 227 | + struct etnaviv_iommu_context *context) |
---|
226 | 228 | { |
---|
227 | 229 | struct etnaviv_vram_mapping *mapping; |
---|
228 | 230 | |
---|
229 | 231 | list_for_each_entry(mapping, &obj->vram_list, obj_node) { |
---|
230 | | - if (mapping->mmu == mmu) |
---|
| 232 | + if (mapping->context == context) |
---|
231 | 233 | return mapping; |
---|
232 | 234 | } |
---|
233 | 235 | |
---|
234 | 236 | return NULL; |
---|
235 | | -} |
---|
236 | | - |
---|
237 | | -void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping) |
---|
238 | | -{ |
---|
239 | | - struct etnaviv_gem_object *etnaviv_obj = mapping->object; |
---|
240 | | - |
---|
241 | | - drm_gem_object_get(&etnaviv_obj->base); |
---|
242 | | - |
---|
243 | | - mutex_lock(&etnaviv_obj->lock); |
---|
244 | | - WARN_ON(mapping->use == 0); |
---|
245 | | - mapping->use += 1; |
---|
246 | | - mutex_unlock(&etnaviv_obj->lock); |
---|
247 | 237 | } |
---|
248 | 238 | |
---|
249 | 239 | void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) |
---|
.. | .. |
---|
255 | 245 | mapping->use -= 1; |
---|
256 | 246 | mutex_unlock(&etnaviv_obj->lock); |
---|
257 | 247 | |
---|
258 | | - drm_gem_object_put_unlocked(&etnaviv_obj->base); |
---|
| 248 | + drm_gem_object_put(&etnaviv_obj->base); |
---|
259 | 249 | } |
---|
260 | 250 | |
---|
261 | 251 | struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( |
---|
262 | | - struct drm_gem_object *obj, struct etnaviv_gpu *gpu) |
---|
| 252 | + struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context, |
---|
| 253 | + u64 va) |
---|
263 | 254 | { |
---|
264 | 255 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
---|
265 | 256 | struct etnaviv_vram_mapping *mapping; |
---|
.. | .. |
---|
267 | 258 | int ret = 0; |
---|
268 | 259 | |
---|
269 | 260 | mutex_lock(&etnaviv_obj->lock); |
---|
270 | | - mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); |
---|
| 261 | + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context); |
---|
271 | 262 | if (mapping) { |
---|
272 | 263 | /* |
---|
273 | 264 | * Holding the object lock prevents the use count changing |
---|
.. | .. |
---|
276 | 267 | * the MMU owns this mapping to close this race. |
---|
277 | 268 | */ |
---|
278 | 269 | if (mapping->use == 0) { |
---|
279 | | - mutex_lock(&gpu->mmu->lock); |
---|
280 | | - if (mapping->mmu == gpu->mmu) |
---|
| 270 | + mutex_lock(&mmu_context->lock); |
---|
| 271 | + if (mapping->context == mmu_context) |
---|
281 | 272 | mapping->use += 1; |
---|
282 | 273 | else |
---|
283 | 274 | mapping = NULL; |
---|
284 | | - mutex_unlock(&gpu->mmu->lock); |
---|
| 275 | + mutex_unlock(&mmu_context->lock); |
---|
285 | 276 | if (mapping) |
---|
286 | 277 | goto out; |
---|
287 | 278 | } else { |
---|
.. | .. |
---|
314 | 305 | list_del(&mapping->obj_node); |
---|
315 | 306 | } |
---|
316 | 307 | |
---|
317 | | - mapping->mmu = gpu->mmu; |
---|
| 308 | + mapping->context = etnaviv_iommu_context_get(mmu_context); |
---|
318 | 309 | mapping->use = 1; |
---|
319 | 310 | |
---|
320 | | - ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, |
---|
321 | | - mapping); |
---|
322 | | - if (ret < 0) |
---|
| 311 | + ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, |
---|
| 312 | + mmu_context->global->memory_base, |
---|
| 313 | + mapping, va); |
---|
| 314 | + if (ret < 0) { |
---|
| 315 | + etnaviv_iommu_context_put(mmu_context); |
---|
323 | 316 | kfree(mapping); |
---|
324 | | - else |
---|
| 317 | + } else { |
---|
325 | 318 | list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); |
---|
| 319 | + } |
---|
326 | 320 | |
---|
327 | 321 | out: |
---|
328 | 322 | mutex_unlock(&etnaviv_obj->lock); |
---|
.. | .. |
---|
379 | 373 | } |
---|
380 | 374 | |
---|
381 | 375 | int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, |
---|
382 | | - struct timespec *timeout) |
---|
| 376 | + struct drm_etnaviv_timespec *timeout) |
---|
383 | 377 | { |
---|
384 | 378 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
---|
385 | 379 | struct drm_device *dev = obj->dev; |
---|
.. | .. |
---|
397 | 391 | } |
---|
398 | 392 | |
---|
399 | 393 | if (op & ETNA_PREP_NOSYNC) { |
---|
400 | | - if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, |
---|
| 394 | + if (!dma_resv_test_signaled_rcu(obj->resv, |
---|
401 | 395 | write)) |
---|
402 | 396 | return -EBUSY; |
---|
403 | 397 | } else { |
---|
404 | 398 | unsigned long remain = etnaviv_timeout_to_jiffies(timeout); |
---|
405 | 399 | |
---|
406 | | - ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, |
---|
| 400 | + ret = dma_resv_wait_timeout_rcu(obj->resv, |
---|
407 | 401 | write, true, remain); |
---|
408 | 402 | if (ret <= 0) |
---|
409 | 403 | return ret == 0 ? -ETIMEDOUT : ret; |
---|
410 | 404 | } |
---|
411 | 405 | |
---|
412 | 406 | if (etnaviv_obj->flags & ETNA_BO_CACHED) { |
---|
413 | | - dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, |
---|
414 | | - etnaviv_obj->sgt->nents, |
---|
415 | | - etnaviv_op_to_dma_dir(op)); |
---|
| 407 | + dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt, |
---|
| 408 | + etnaviv_op_to_dma_dir(op)); |
---|
416 | 409 | etnaviv_obj->last_cpu_prep_op = op; |
---|
417 | 410 | } |
---|
418 | 411 | |
---|
.. | .. |
---|
427 | 420 | if (etnaviv_obj->flags & ETNA_BO_CACHED) { |
---|
428 | 421 | /* fini without a prep is almost certainly a userspace error */ |
---|
429 | 422 | WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); |
---|
430 | | - dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, |
---|
431 | | - etnaviv_obj->sgt->nents, |
---|
| 423 | + dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt, |
---|
432 | 424 | etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); |
---|
433 | 425 | etnaviv_obj->last_cpu_prep_op = 0; |
---|
434 | 426 | } |
---|
.. | .. |
---|
437 | 429 | } |
---|
438 | 430 | |
---|
439 | 431 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, |
---|
440 | | - struct timespec *timeout) |
---|
| 432 | + struct drm_etnaviv_timespec *timeout) |
---|
441 | 433 | { |
---|
442 | 434 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
---|
443 | 435 | |
---|
.. | .. |
---|
449 | 441 | const char *type, struct seq_file *m) |
---|
450 | 442 | { |
---|
451 | 443 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
---|
452 | | - seq_printf(m, "\t%9s: %s %s seq %u\n", |
---|
| 444 | + seq_printf(m, "\t%9s: %s %s seq %llu\n", |
---|
453 | 445 | type, |
---|
454 | 446 | fence->ops->get_driver_name(fence), |
---|
455 | 447 | fence->ops->get_timeline_name(fence), |
---|
.. | .. |
---|
459 | 451 | static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
---|
460 | 452 | { |
---|
461 | 453 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
---|
462 | | - struct reservation_object *robj = etnaviv_obj->resv; |
---|
463 | | - struct reservation_object_list *fobj; |
---|
| 454 | + struct dma_resv *robj = obj->resv; |
---|
| 455 | + struct dma_resv_list *fobj; |
---|
464 | 456 | struct dma_fence *fence; |
---|
465 | 457 | unsigned long off = drm_vma_node_start(&obj->vma_node); |
---|
466 | 458 | |
---|
.. | .. |
---|
536 | 528 | |
---|
537 | 529 | list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, |
---|
538 | 530 | obj_node) { |
---|
539 | | - struct etnaviv_iommu *mmu = mapping->mmu; |
---|
| 531 | + struct etnaviv_iommu_context *context = mapping->context; |
---|
540 | 532 | |
---|
541 | 533 | WARN_ON(mapping->use); |
---|
542 | 534 | |
---|
543 | | - if (mmu) |
---|
544 | | - etnaviv_iommu_unmap_gem(mmu, mapping); |
---|
| 535 | + if (context) { |
---|
| 536 | + etnaviv_iommu_unmap_gem(context, mapping); |
---|
| 537 | + etnaviv_iommu_context_put(context); |
---|
| 538 | + } |
---|
545 | 539 | |
---|
546 | 540 | list_del(&mapping->obj_node); |
---|
547 | 541 | kfree(mapping); |
---|
.. | .. |
---|
549 | 543 | |
---|
550 | 544 | drm_gem_free_mmap_offset(obj); |
---|
551 | 545 | etnaviv_obj->ops->release(etnaviv_obj); |
---|
552 | | - if (etnaviv_obj->resv == &etnaviv_obj->_resv) |
---|
553 | | - reservation_object_fini(&etnaviv_obj->_resv); |
---|
554 | 546 | drm_gem_object_release(obj); |
---|
555 | 547 | |
---|
556 | 548 | kfree(etnaviv_obj); |
---|
.. | .. |
---|
567 | 559 | } |
---|
568 | 560 | |
---|
569 | 561 | static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, |
---|
570 | | - struct reservation_object *robj, const struct etnaviv_gem_ops *ops, |
---|
571 | | - struct drm_gem_object **obj) |
---|
| 562 | + const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj) |
---|
572 | 563 | { |
---|
573 | 564 | struct etnaviv_gem_object *etnaviv_obj; |
---|
574 | 565 | unsigned sz = sizeof(*etnaviv_obj); |
---|
.. | .. |
---|
596 | 587 | |
---|
597 | 588 | etnaviv_obj->flags = flags; |
---|
598 | 589 | etnaviv_obj->ops = ops; |
---|
599 | | - if (robj) { |
---|
600 | | - etnaviv_obj->resv = robj; |
---|
601 | | - } else { |
---|
602 | | - etnaviv_obj->resv = &etnaviv_obj->_resv; |
---|
603 | | - reservation_object_init(&etnaviv_obj->_resv); |
---|
604 | | - } |
---|
605 | 590 | |
---|
606 | 591 | mutex_init(&etnaviv_obj->lock); |
---|
607 | 592 | INIT_LIST_HEAD(&etnaviv_obj->vram_list); |
---|
.. | .. |
---|
615 | 600 | int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
---|
616 | 601 | u32 size, u32 flags, u32 *handle) |
---|
617 | 602 | { |
---|
| 603 | + struct etnaviv_drm_private *priv = dev->dev_private; |
---|
618 | 604 | struct drm_gem_object *obj = NULL; |
---|
619 | 605 | int ret; |
---|
620 | 606 | |
---|
621 | 607 | size = PAGE_ALIGN(size); |
---|
622 | 608 | |
---|
623 | | - ret = etnaviv_gem_new_impl(dev, size, flags, NULL, |
---|
| 609 | + ret = etnaviv_gem_new_impl(dev, size, flags, |
---|
624 | 610 | &etnaviv_gem_shmem_ops, &obj); |
---|
625 | 611 | if (ret) |
---|
626 | 612 | goto fail; |
---|
.. | .. |
---|
628 | 614 | lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class); |
---|
629 | 615 | |
---|
630 | 616 | ret = drm_gem_object_init(dev, obj, size); |
---|
631 | | - if (ret == 0) { |
---|
632 | | - struct address_space *mapping; |
---|
633 | | - |
---|
634 | | - /* |
---|
635 | | - * Our buffers are kept pinned, so allocating them |
---|
636 | | - * from the MOVABLE zone is a really bad idea, and |
---|
637 | | - * conflicts with CMA. See comments above new_inode() |
---|
638 | | - * why this is required _and_ expected if you're |
---|
639 | | - * going to pin these pages. |
---|
640 | | - */ |
---|
641 | | - mapping = obj->filp->f_mapping; |
---|
642 | | - mapping_set_gfp_mask(mapping, GFP_HIGHUSER | |
---|
643 | | - __GFP_RETRY_MAYFAIL | __GFP_NOWARN); |
---|
644 | | - } |
---|
645 | | - |
---|
646 | 617 | if (ret) |
---|
647 | 618 | goto fail; |
---|
| 619 | + |
---|
| 620 | + /* |
---|
| 621 | + * Our buffers are kept pinned, so allocating them from the MOVABLE |
---|
| 622 | + * zone is a really bad idea, and conflicts with CMA. See comments |
---|
| 623 | + * above new_inode() why this is required _and_ expected if you're |
---|
| 624 | + * going to pin these pages. |
---|
| 625 | + */ |
---|
| 626 | + mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask); |
---|
648 | 627 | |
---|
649 | 628 | etnaviv_gem_obj_add(dev, obj); |
---|
650 | 629 | |
---|
.. | .. |
---|
652 | 631 | |
---|
653 | 632 | /* drop reference from allocate - handle holds it now */ |
---|
654 | 633 | fail: |
---|
655 | | - drm_gem_object_put_unlocked(obj); |
---|
| 634 | + drm_gem_object_put(obj); |
---|
656 | 635 | |
---|
657 | 636 | return ret; |
---|
658 | 637 | } |
---|
659 | 638 | |
---|
660 | 639 | int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, |
---|
661 | | - struct reservation_object *robj, const struct etnaviv_gem_ops *ops, |
---|
662 | | - struct etnaviv_gem_object **res) |
---|
| 640 | + const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res) |
---|
663 | 641 | { |
---|
664 | 642 | struct drm_gem_object *obj; |
---|
665 | 643 | int ret; |
---|
666 | 644 | |
---|
667 | | - ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj); |
---|
| 645 | + ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj); |
---|
668 | 646 | if (ret) |
---|
669 | 647 | return ret; |
---|
670 | 648 | |
---|
.. | .. |
---|
681 | 659 | struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; |
---|
682 | 660 | int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; |
---|
683 | 661 | |
---|
684 | | - might_lock_read(¤t->mm->mmap_sem); |
---|
| 662 | + might_lock_read(¤t->mm->mmap_lock); |
---|
685 | 663 | |
---|
686 | 664 | if (userptr->mm != current->mm) |
---|
687 | 665 | return -EPERM; |
---|
.. | .. |
---|
695 | 673 | uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE; |
---|
696 | 674 | struct page **pages = pvec + pinned; |
---|
697 | 675 | |
---|
698 | | - ret = get_user_pages_fast(ptr, num_pages, |
---|
699 | | - !userptr->ro ? FOLL_WRITE : 0, pages); |
---|
| 676 | + ret = pin_user_pages_fast(ptr, num_pages, |
---|
| 677 | + FOLL_WRITE | FOLL_FORCE, pages); |
---|
700 | 678 | if (ret < 0) { |
---|
701 | | - release_pages(pvec, pinned); |
---|
| 679 | + unpin_user_pages(pvec, pinned); |
---|
702 | 680 | kvfree(pvec); |
---|
703 | 681 | return ret; |
---|
704 | 682 | } |
---|
.. | .. |
---|
722 | 700 | if (etnaviv_obj->pages) { |
---|
723 | 701 | int npages = etnaviv_obj->base.size >> PAGE_SHIFT; |
---|
724 | 702 | |
---|
725 | | - release_pages(etnaviv_obj->pages, npages); |
---|
| 703 | + unpin_user_pages(etnaviv_obj->pages, npages); |
---|
726 | 704 | kvfree(etnaviv_obj->pages); |
---|
727 | 705 | } |
---|
728 | 706 | } |
---|
.. | .. |
---|
746 | 724 | struct etnaviv_gem_object *etnaviv_obj; |
---|
747 | 725 | int ret; |
---|
748 | 726 | |
---|
749 | | - ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL, |
---|
| 727 | + ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, |
---|
750 | 728 | &etnaviv_gem_userptr_ops, &etnaviv_obj); |
---|
751 | 729 | if (ret) |
---|
752 | 730 | return ret; |
---|
.. | .. |
---|
762 | 740 | ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); |
---|
763 | 741 | |
---|
764 | 742 | /* drop reference from allocate - handle holds it now */ |
---|
765 | | - drm_gem_object_put_unlocked(&etnaviv_obj->base); |
---|
| 743 | + drm_gem_object_put(&etnaviv_obj->base); |
---|
766 | 744 | return ret; |
---|
767 | 745 | } |
---|