.. | .. |
---|
23 | 23 | * Authors: Dave Airlie |
---|
24 | 24 | * Alex Deucher |
---|
25 | 25 | */ |
---|
26 | | -#include <drm/drmP.h> |
---|
| 26 | + |
---|
27 | 27 | #include <drm/amdgpu_drm.h> |
---|
28 | 28 | #include "amdgpu.h" |
---|
29 | 29 | #include "amdgpu_i2c.h" |
---|
.. | .. |
---|
32 | 32 | #include "amdgpu_display.h" |
---|
33 | 33 | #include <asm/div64.h> |
---|
34 | 34 | |
---|
| 35 | +#include <linux/pci.h> |
---|
35 | 36 | #include <linux/pm_runtime.h> |
---|
36 | 37 | #include <drm/drm_crtc_helper.h> |
---|
37 | 38 | #include <drm/drm_edid.h> |
---|
38 | 39 | #include <drm/drm_gem_framebuffer_helper.h> |
---|
39 | 40 | #include <drm/drm_fb_helper.h> |
---|
| 41 | +#include <drm/drm_vblank.h> |
---|
40 | 42 | |
---|
41 | 43 | static void amdgpu_display_flip_callback(struct dma_fence *f, |
---|
42 | 44 | struct dma_fence_cb *cb) |
---|
.. | .. |
---|
91 | 93 | * targeted by the flip |
---|
92 | 94 | */ |
---|
93 | 95 | if (amdgpu_crtc->enabled && |
---|
94 | | - (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, |
---|
| 96 | + (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0, |
---|
95 | 97 | &vpos, &hpos, NULL, NULL, |
---|
96 | 98 | &crtc->hwmode) |
---|
97 | 99 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == |
---|
98 | 100 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && |
---|
99 | 101 | (int)(work->target_vblank - |
---|
100 | | - amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) { |
---|
| 102 | + amdgpu_get_vblank_counter_kms(crtc)) > 0) { |
---|
101 | 103 | schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); |
---|
102 | 104 | return; |
---|
103 | 105 | } |
---|
.. | .. |
---|
150 | 152 | struct drm_modeset_acquire_ctx *ctx) |
---|
151 | 153 | { |
---|
152 | 154 | struct drm_device *dev = crtc->dev; |
---|
153 | | - struct amdgpu_device *adev = dev->dev_private; |
---|
| 155 | + struct amdgpu_device *adev = drm_to_adev(dev); |
---|
154 | 156 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
---|
155 | 157 | struct drm_gem_object *obj; |
---|
156 | 158 | struct amdgpu_flip_work *work; |
---|
.. | .. |
---|
188 | 190 | goto cleanup; |
---|
189 | 191 | } |
---|
190 | 192 | |
---|
191 | | - r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); |
---|
192 | | - if (unlikely(r != 0)) { |
---|
193 | | - DRM_ERROR("failed to pin new abo buffer before flip\n"); |
---|
194 | | - goto unreserve; |
---|
| 193 | + if (!adev->enable_virtual_display) { |
---|
| 194 | + r = amdgpu_bo_pin(new_abo, |
---|
| 195 | + amdgpu_display_supported_domains(adev, new_abo->flags)); |
---|
| 196 | + if (unlikely(r != 0)) { |
---|
| 197 | + DRM_ERROR("failed to pin new abo buffer before flip\n"); |
---|
| 198 | + goto unreserve; |
---|
| 199 | + } |
---|
195 | 200 | } |
---|
196 | 201 | |
---|
197 | 202 | r = amdgpu_ttm_alloc_gart(&new_abo->tbo); |
---|
.. | .. |
---|
200 | 205 | goto unpin; |
---|
201 | 206 | } |
---|
202 | 207 | |
---|
203 | | - r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl, |
---|
| 208 | + r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, |
---|
204 | 209 | &work->shared_count, |
---|
205 | 210 | &work->shared); |
---|
206 | 211 | if (unlikely(r != 0)) { |
---|
.. | .. |
---|
211 | 216 | amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); |
---|
212 | 217 | amdgpu_bo_unreserve(new_abo); |
---|
213 | 218 | |
---|
214 | | - work->base = amdgpu_bo_gpu_offset(new_abo); |
---|
| 219 | + if (!adev->enable_virtual_display) |
---|
| 220 | + work->base = amdgpu_bo_gpu_offset(new_abo); |
---|
215 | 221 | work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + |
---|
216 | | - amdgpu_get_vblank_counter_kms(dev, work->crtc_id); |
---|
| 222 | + amdgpu_get_vblank_counter_kms(crtc); |
---|
217 | 223 | |
---|
218 | 224 | /* we borrow the event spin lock for protecting flip_wrok */ |
---|
219 | 225 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
---|
.. | .. |
---|
242 | 248 | goto cleanup; |
---|
243 | 249 | } |
---|
244 | 250 | unpin: |
---|
245 | | - if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { |
---|
246 | | - DRM_ERROR("failed to unpin new abo in error path\n"); |
---|
247 | | - } |
---|
| 251 | + if (!adev->enable_virtual_display) |
---|
| 252 | + if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) |
---|
| 253 | + DRM_ERROR("failed to unpin new abo in error path\n"); |
---|
| 254 | + |
---|
248 | 255 | unreserve: |
---|
249 | 256 | amdgpu_bo_unreserve(new_abo); |
---|
250 | 257 | |
---|
.. | .. |
---|
285 | 292 | |
---|
286 | 293 | pm_runtime_mark_last_busy(dev->dev); |
---|
287 | 294 | |
---|
288 | | - adev = dev->dev_private; |
---|
| 295 | + adev = drm_to_adev(dev); |
---|
289 | 296 | /* if we have active crtcs and we don't have a power ref, |
---|
290 | 297 | take the current one */ |
---|
291 | 298 | if (active && !adev->have_disp_power_ref) { |
---|
.. | .. |
---|
364 | 371 | struct amdgpu_connector *amdgpu_connector; |
---|
365 | 372 | struct drm_encoder *encoder; |
---|
366 | 373 | struct amdgpu_encoder *amdgpu_encoder; |
---|
| 374 | + struct drm_connector_list_iter iter; |
---|
367 | 375 | uint32_t devices; |
---|
368 | 376 | int i = 0; |
---|
369 | 377 | |
---|
| 378 | + drm_connector_list_iter_begin(dev, &iter); |
---|
370 | 379 | DRM_INFO("AMDGPU Display Connectors\n"); |
---|
371 | | - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
---|
| 380 | + drm_for_each_connector_iter(connector, &iter) { |
---|
372 | 381 | amdgpu_connector = to_amdgpu_connector(connector); |
---|
373 | 382 | DRM_INFO("Connector %d:\n", i); |
---|
374 | 383 | DRM_INFO(" %s\n", connector->name); |
---|
.. | .. |
---|
432 | 441 | } |
---|
433 | 442 | i++; |
---|
434 | 443 | } |
---|
| 444 | + drm_connector_list_iter_end(&iter); |
---|
435 | 445 | } |
---|
436 | 446 | |
---|
437 | 447 | /** |
---|
.. | .. |
---|
490 | 500 | .create_handle = drm_gem_fb_create_handle, |
---|
491 | 501 | }; |
---|
492 | 502 | |
---|
493 | | -uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev) |
---|
| 503 | +uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, |
---|
| 504 | + uint64_t bo_flags) |
---|
494 | 505 | { |
---|
495 | 506 | uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; |
---|
496 | 507 | |
---|
497 | 508 | #if defined(CONFIG_DRM_AMD_DC) |
---|
498 | | - if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN && |
---|
499 | | - adev->flags & AMD_IS_APU && |
---|
500 | | - amdgpu_device_asic_has_dc_support(adev->asic_type)) |
---|
501 | | - domain |= AMDGPU_GEM_DOMAIN_GTT; |
---|
| 509 | + /* |
---|
| 510 | + * if amdgpu_bo_support_uswc returns false it means that USWC mappings |
---|
| 511 | + * is not supported for this board. But this mapping is required |
---|
| 512 | + * to avoid hang caused by placement of scanout BO in GTT on certain |
---|
| 513 | + * APUs. So force the BO placement to VRAM in case this architecture |
---|
| 514 | + * will not allow USWC mappings. |
---|
| 515 | + * Also, don't allow GTT domain if the BO doens't have USWC falg set. |
---|
| 516 | + */ |
---|
| 517 | + if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && |
---|
| 518 | + amdgpu_bo_support_uswc(bo_flags) && |
---|
| 519 | + amdgpu_device_asic_has_dc_support(adev->asic_type)) { |
---|
| 520 | + switch (adev->asic_type) { |
---|
| 521 | + case CHIP_CARRIZO: |
---|
| 522 | + case CHIP_STONEY: |
---|
| 523 | + domain |= AMDGPU_GEM_DOMAIN_GTT; |
---|
| 524 | + break; |
---|
| 525 | + case CHIP_RAVEN: |
---|
| 526 | + /* enable S/G on PCO and RV2 */ |
---|
| 527 | + if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || |
---|
| 528 | + (adev->apu_flags & AMD_APU_IS_PICASSO)) |
---|
| 529 | + domain |= AMDGPU_GEM_DOMAIN_GTT; |
---|
| 530 | + break; |
---|
| 531 | + default: |
---|
| 532 | + break; |
---|
| 533 | + } |
---|
| 534 | + } |
---|
502 | 535 | #endif |
---|
503 | 536 | |
---|
504 | 537 | return domain; |
---|
.. | .. |
---|
544 | 577 | |
---|
545 | 578 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); |
---|
546 | 579 | if (amdgpu_fb == NULL) { |
---|
547 | | - drm_gem_object_put_unlocked(obj); |
---|
| 580 | + drm_gem_object_put(obj); |
---|
548 | 581 | return ERR_PTR(-ENOMEM); |
---|
549 | 582 | } |
---|
550 | 583 | |
---|
551 | 584 | ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); |
---|
552 | 585 | if (ret) { |
---|
553 | 586 | kfree(amdgpu_fb); |
---|
554 | | - drm_gem_object_put_unlocked(obj); |
---|
| 587 | + drm_gem_object_put(obj); |
---|
555 | 588 | return ERR_PTR(ret); |
---|
556 | 589 | } |
---|
557 | 590 | |
---|
.. | .. |
---|
586 | 619 | int sz; |
---|
587 | 620 | |
---|
588 | 621 | adev->mode_info.coherent_mode_property = |
---|
589 | | - drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); |
---|
| 622 | + drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1); |
---|
590 | 623 | if (!adev->mode_info.coherent_mode_property) |
---|
591 | 624 | return -ENOMEM; |
---|
592 | 625 | |
---|
593 | 626 | adev->mode_info.load_detect_property = |
---|
594 | | - drm_property_create_range(adev->ddev, 0, "load detection", 0, 1); |
---|
| 627 | + drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1); |
---|
595 | 628 | if (!adev->mode_info.load_detect_property) |
---|
596 | 629 | return -ENOMEM; |
---|
597 | 630 | |
---|
598 | | - drm_mode_create_scaling_mode_property(adev->ddev); |
---|
| 631 | + drm_mode_create_scaling_mode_property(adev_to_drm(adev)); |
---|
599 | 632 | |
---|
600 | 633 | sz = ARRAY_SIZE(amdgpu_underscan_enum_list); |
---|
601 | 634 | adev->mode_info.underscan_property = |
---|
602 | | - drm_property_create_enum(adev->ddev, 0, |
---|
603 | | - "underscan", |
---|
604 | | - amdgpu_underscan_enum_list, sz); |
---|
| 635 | + drm_property_create_enum(adev_to_drm(adev), 0, |
---|
| 636 | + "underscan", |
---|
| 637 | + amdgpu_underscan_enum_list, sz); |
---|
605 | 638 | |
---|
606 | 639 | adev->mode_info.underscan_hborder_property = |
---|
607 | | - drm_property_create_range(adev->ddev, 0, |
---|
608 | | - "underscan hborder", 0, 128); |
---|
| 640 | + drm_property_create_range(adev_to_drm(adev), 0, |
---|
| 641 | + "underscan hborder", 0, 128); |
---|
609 | 642 | if (!adev->mode_info.underscan_hborder_property) |
---|
610 | 643 | return -ENOMEM; |
---|
611 | 644 | |
---|
612 | 645 | adev->mode_info.underscan_vborder_property = |
---|
613 | | - drm_property_create_range(adev->ddev, 0, |
---|
614 | | - "underscan vborder", 0, 128); |
---|
| 646 | + drm_property_create_range(adev_to_drm(adev), 0, |
---|
| 647 | + "underscan vborder", 0, 128); |
---|
615 | 648 | if (!adev->mode_info.underscan_vborder_property) |
---|
616 | 649 | return -ENOMEM; |
---|
617 | 650 | |
---|
618 | 651 | sz = ARRAY_SIZE(amdgpu_audio_enum_list); |
---|
619 | 652 | adev->mode_info.audio_property = |
---|
620 | | - drm_property_create_enum(adev->ddev, 0, |
---|
| 653 | + drm_property_create_enum(adev_to_drm(adev), 0, |
---|
621 | 654 | "audio", |
---|
622 | 655 | amdgpu_audio_enum_list, sz); |
---|
623 | 656 | |
---|
624 | 657 | sz = ARRAY_SIZE(amdgpu_dither_enum_list); |
---|
625 | 658 | adev->mode_info.dither_property = |
---|
626 | | - drm_property_create_enum(adev->ddev, 0, |
---|
| 659 | + drm_property_create_enum(adev_to_drm(adev), 0, |
---|
627 | 660 | "dither", |
---|
628 | 661 | amdgpu_dither_enum_list, sz); |
---|
629 | 662 | |
---|
630 | 663 | if (amdgpu_device_has_dc_support(adev)) { |
---|
631 | | - adev->mode_info.max_bpc_property = |
---|
632 | | - drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16); |
---|
633 | | - if (!adev->mode_info.max_bpc_property) |
---|
| 664 | + adev->mode_info.abm_level_property = |
---|
| 665 | + drm_property_create_range(adev_to_drm(adev), 0, |
---|
| 666 | + "abm level", 0, 4); |
---|
| 667 | + if (!adev->mode_info.abm_level_property) |
---|
634 | 668 | return -ENOMEM; |
---|
635 | 669 | } |
---|
636 | 670 | |
---|
.. | .. |
---|
668 | 702 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
---|
669 | 703 | struct amdgpu_encoder *amdgpu_encoder; |
---|
670 | 704 | struct drm_connector *connector; |
---|
671 | | - struct amdgpu_connector *amdgpu_connector; |
---|
672 | 705 | u32 src_v = 1, dst_v = 1; |
---|
673 | 706 | u32 src_h = 1, dst_h = 1; |
---|
674 | 707 | |
---|
.. | .. |
---|
680 | 713 | continue; |
---|
681 | 714 | amdgpu_encoder = to_amdgpu_encoder(encoder); |
---|
682 | 715 | connector = amdgpu_get_connector_for_encoder(encoder); |
---|
683 | | - amdgpu_connector = to_amdgpu_connector(connector); |
---|
684 | 716 | |
---|
685 | 717 | /* set scaling */ |
---|
686 | 718 | if (amdgpu_encoder->rmx_type == RMX_OFF) |
---|
.. | .. |
---|
781 | 813 | int vbl_start, vbl_end, vtotal, ret = 0; |
---|
782 | 814 | bool in_vbl = true; |
---|
783 | 815 | |
---|
784 | | - struct amdgpu_device *adev = dev->dev_private; |
---|
| 816 | + struct amdgpu_device *adev = drm_to_adev(dev); |
---|
785 | 817 | |
---|
786 | 818 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ |
---|
787 | 819 | |
---|
.. | .. |
---|
858 | 890 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ |
---|
859 | 891 | if (in_vbl && (*vpos >= vbl_start)) { |
---|
860 | 892 | vtotal = mode->crtc_vtotal; |
---|
861 | | - *vpos = *vpos - vtotal; |
---|
| 893 | + |
---|
| 894 | + /* With variable refresh rate displays the vpos can exceed |
---|
| 895 | + * the vtotal value. Clamp to 0 to return -vbl_end instead |
---|
| 896 | + * of guessing the remaining number of lines until scanout. |
---|
| 897 | + */ |
---|
| 898 | + *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0; |
---|
862 | 899 | } |
---|
863 | 900 | |
---|
864 | 901 | /* Correct for shifted end of vbl at vbl_end. */ |
---|
.. | .. |
---|
889 | 926 | return AMDGPU_CRTC_IRQ_NONE; |
---|
890 | 927 | } |
---|
891 | 928 | } |
---|
| 929 | + |
---|
| 930 | +bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, |
---|
| 931 | + bool in_vblank_irq, int *vpos, |
---|
| 932 | + int *hpos, ktime_t *stime, ktime_t *etime, |
---|
| 933 | + const struct drm_display_mode *mode) |
---|
| 934 | +{ |
---|
| 935 | + struct drm_device *dev = crtc->dev; |
---|
| 936 | + unsigned int pipe = crtc->index; |
---|
| 937 | + |
---|
| 938 | + return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, |
---|
| 939 | + stime, etime, mode); |
---|
| 940 | +} |
---|