hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
....@@ -23,21 +23,33 @@
2323 *
2424 */
2525
26
+/* The caprices of the preprocessor require that this be declared right here */
27
+#define CREATE_TRACE_POINTS
28
+
2629 #include "dm_services_types.h"
2730 #include "dc.h"
2831 #include "dc/inc/core_types.h"
32
+#include "dal_asic_id.h"
33
+#include "dmub/dmub_srv.h"
34
+#include "dc/inc/hw/dmcu.h"
35
+#include "dc/inc/hw/abm.h"
36
+#include "dc/dc_dmub_srv.h"
2937
3038 #include "vid.h"
3139 #include "amdgpu.h"
3240 #include "amdgpu_display.h"
41
+#include "amdgpu_ucode.h"
3342 #include "atom.h"
3443 #include "amdgpu_dm.h"
44
+#ifdef CONFIG_DRM_AMD_DC_HDCP
45
+#include "amdgpu_dm_hdcp.h"
46
+#include <drm/drm_hdcp.h>
47
+#endif
3548 #include "amdgpu_pm.h"
3649
3750 #include "amd_shared.h"
3851 #include "amdgpu_dm_irq.h"
3952 #include "dm_helpers.h"
40
-#include "dm_services_types.h"
4153 #include "amdgpu_dm_mst_types.h"
4254 #if defined(CONFIG_DEBUG_FS)
4355 #include "amdgpu_dm_debugfs.h"
....@@ -50,16 +62,23 @@
5062 #include <linux/version.h>
5163 #include <linux/types.h>
5264 #include <linux/pm_runtime.h>
65
+#include <linux/pci.h>
66
+#include <linux/firmware.h>
67
+#include <linux/component.h>
5368
54
-#include <drm/drmP.h>
5569 #include <drm/drm_atomic.h>
70
+#include <drm/drm_atomic_uapi.h>
5671 #include <drm/drm_atomic_helper.h>
5772 #include <drm/drm_dp_mst_helper.h>
5873 #include <drm/drm_fb_helper.h>
74
+#include <drm/drm_fourcc.h>
5975 #include <drm/drm_edid.h>
76
+#include <drm/drm_vblank.h>
77
+#include <drm/drm_audio_component.h>
78
+#include <drm/drm_hdcp.h>
6079
61
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
62
-#include "ivsrcid/irqsrcs_dcn_1_0.h"
80
+#if defined(CONFIG_DRM_AMD_DC_DCN)
81
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
6382
6483 #include "dcn/dcn_1_0_offset.h"
6584 #include "dcn/dcn_1_0_sh_mask.h"
....@@ -70,14 +89,84 @@
7089 #endif
7190
7291 #include "modules/inc/mod_freesync.h"
92
+#include "modules/power/power_helpers.h"
93
+#include "modules/inc/mod_info_packet.h"
7394
74
-#include "i2caux_interface.h"
95
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98
+#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100
+#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102
+#endif
103
+#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105
+
106
+#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
107
+MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108
+
109
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
110
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111
+
112
+/* Number of bytes in PSP header for firmware. */
113
+#define PSP_HEADER_BYTES 0x100
114
+
115
+/* Number of bytes in PSP footer for firmware. */
116
+#define PSP_FOOTER_BYTES 0x100
117
+
118
+/**
119
+ * DOC: overview
120
+ *
121
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122
+ * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123
+ * requests into DC requests, and DC responses into DRM responses.
124
+ *
125
+ * The root control structure is &struct amdgpu_display_manager.
126
+ */
75127
76128 /* basic init/fini API */
77129 static int amdgpu_dm_init(struct amdgpu_device *adev);
78130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
79131
80
-/* initializes drm_device display related structures, based on the information
132
+static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133
+{
134
+ switch (link->dpcd_caps.dongle_type) {
135
+ case DISPLAY_DONGLE_NONE:
136
+ return DRM_MODE_SUBCONNECTOR_Native;
137
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138
+ return DRM_MODE_SUBCONNECTOR_VGA;
139
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
141
+ return DRM_MODE_SUBCONNECTOR_DVID;
142
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144
+ return DRM_MODE_SUBCONNECTOR_HDMIA;
145
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146
+ default:
147
+ return DRM_MODE_SUBCONNECTOR_Unknown;
148
+ }
149
+}
150
+
151
+static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152
+{
153
+ struct dc_link *link = aconnector->dc_link;
154
+ struct drm_connector *connector = &aconnector->base;
155
+ enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156
+
157
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158
+ return;
159
+
160
+ if (aconnector->dc_sink)
161
+ subconnector = get_subconnector_type(link);
162
+
163
+ drm_object_property_set_value(&connector->base,
164
+ connector->dev->mode_config.dp_subconnector_property,
165
+ subconnector);
166
+}
167
+
168
+/*
169
+ * initializes drm_device display related structures, based on the information
81170 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
82171 * drm_encoder, drm_mode_config
83172 *
....@@ -87,12 +176,10 @@
87176 /* removes and deallocates the drm structures, created by the above function */
88177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
89178
90
-static void
91
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
92
-
93179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
94
- struct amdgpu_plane *aplane,
95
- unsigned long possible_crtcs);
180
+ struct drm_plane *plane,
181
+ unsigned long possible_crtcs,
182
+ const struct dc_plane_cap *plane_cap);
96183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
97184 struct drm_plane *plane,
98185 uint32_t link_index);
....@@ -115,30 +202,14 @@
115202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
116203 struct drm_atomic_state *state);
117204
205
+static void handle_cursor_update(struct drm_plane *plane,
206
+ struct drm_plane_state *old_plane_state);
118207
119
-
120
-
121
-static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
122
- DRM_PLANE_TYPE_PRIMARY,
123
- DRM_PLANE_TYPE_PRIMARY,
124
- DRM_PLANE_TYPE_PRIMARY,
125
- DRM_PLANE_TYPE_PRIMARY,
126
- DRM_PLANE_TYPE_PRIMARY,
127
- DRM_PLANE_TYPE_PRIMARY,
128
-};
129
-
130
-static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
131
- DRM_PLANE_TYPE_PRIMARY,
132
- DRM_PLANE_TYPE_PRIMARY,
133
- DRM_PLANE_TYPE_PRIMARY,
134
- DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
135
-};
136
-
137
-static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
138
- DRM_PLANE_TYPE_PRIMARY,
139
- DRM_PLANE_TYPE_PRIMARY,
140
- DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
141
-};
208
+static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209
+static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212
+static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
142213
143214 /*
144215 * dm_vblank_get_counter
....@@ -159,17 +230,14 @@
159230 return 0;
160231 else {
161232 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
162
- struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
163
- acrtc->base.state);
164233
165
-
166
- if (acrtc_state->stream == NULL) {
234
+ if (acrtc->dm_irq_params.stream == NULL) {
167235 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
168236 crtc);
169237 return 0;
170238 }
171239
172
- return dc_stream_get_vblank_counter(acrtc_state->stream);
240
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
173241 }
174242 }
175243
....@@ -182,10 +250,8 @@
182250 return -EINVAL;
183251 else {
184252 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
185
- struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
186
- acrtc->base.state);
187253
188
- if (acrtc_state->stream == NULL) {
254
+ if (acrtc->dm_irq_params.stream == NULL) {
189255 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
190256 crtc);
191257 return 0;
....@@ -195,7 +261,7 @@
195261 * TODO rework base driver to use values directly.
196262 * for now parse it back into reg-format
197263 */
198
- dc_stream_get_scanoutpos(acrtc_state->stream,
264
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
199265 &v_blank_start,
200266 &v_blank_end,
201267 &h_position,
....@@ -235,14 +301,10 @@
235301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
236302 int otg_inst)
237303 {
238
- struct drm_device *dev = adev->ddev;
304
+ struct drm_device *dev = adev_to_drm(adev);
239305 struct drm_crtc *crtc;
240306 struct amdgpu_crtc *amdgpu_crtc;
241307
242
- /*
243
- * following if is check inherited from both functions where this one is
244
- * used now. Need to be checked why it could happen.
245
- */
246308 if (otg_inst == -1) {
247309 WARN_ON(1);
248310 return adev->mode_info.crtcs[0];
....@@ -258,23 +320,47 @@
258320 return NULL;
259321 }
260322
323
+static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324
+{
325
+ return acrtc->dm_irq_params.freesync_config.state ==
326
+ VRR_STATE_ACTIVE_VARIABLE ||
327
+ acrtc->dm_irq_params.freesync_config.state ==
328
+ VRR_STATE_ACTIVE_FIXED;
329
+}
330
+
331
+static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332
+{
333
+ return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334
+ dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335
+}
336
+
337
+/**
338
+ * dm_pflip_high_irq() - Handle pageflip interrupt
339
+ * @interrupt_params: ignored
340
+ *
341
+ * Handles the pageflip interrupt by notifying all interested parties
342
+ * that the pageflip has been completed.
343
+ */
261344 static void dm_pflip_high_irq(void *interrupt_params)
262345 {
263346 struct amdgpu_crtc *amdgpu_crtc;
264347 struct common_irq_params *irq_params = interrupt_params;
265348 struct amdgpu_device *adev = irq_params->adev;
266349 unsigned long flags;
350
+ struct drm_pending_vblank_event *e;
351
+ uint32_t vpos, hpos, v_blank_start, v_blank_end;
352
+ bool vrr_active;
267353
268354 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
269355
270356 /* IRQ could occur when in initial stage */
271
- /*TODO work and BO cleanup */
357
+ /* TODO work and BO cleanup */
272358 if (amdgpu_crtc == NULL) {
273359 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
274360 return;
275361 }
276362
277
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
363
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
278364
279365 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
280366 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
....@@ -282,47 +368,201 @@
282368 AMDGPU_FLIP_SUBMITTED,
283369 amdgpu_crtc->crtc_id,
284370 amdgpu_crtc);
285
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
371
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
286372 return;
287373 }
288374
375
+ /* page flip completed. */
376
+ e = amdgpu_crtc->event;
377
+ amdgpu_crtc->event = NULL;
289378
290
- /* wakeup usersapce */
291
- if (amdgpu_crtc->event) {
292
- /* Update to correct count/ts if racing with vblank irq */
293
- drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
294
-
295
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
296
-
297
- /* page flip completed. clean up */
298
- amdgpu_crtc->event = NULL;
299
-
300
- } else
379
+ if (!e)
301380 WARN_ON(1);
302381
382
+ vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383
+
384
+ /* Fixed refresh rate, or VRR scanout position outside front-porch? */
385
+ if (!vrr_active ||
386
+ !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387
+ &v_blank_end, &hpos, &vpos) ||
388
+ (vpos < v_blank_start)) {
389
+ /* Update to correct count and vblank timestamp if racing with
390
+ * vblank irq. This also updates to the correct vblank timestamp
391
+ * even in VRR mode, as scanout is past the front-porch atm.
392
+ */
393
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394
+
395
+ /* Wake up userspace by sending the pageflip event with proper
396
+ * count and timestamp of vblank of flip completion.
397
+ */
398
+ if (e) {
399
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400
+
401
+ /* Event sent, so done with vblank for this flip */
402
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
403
+ }
404
+ } else if (e) {
405
+ /* VRR active and inside front-porch: vblank count and
406
+ * timestamp for pageflip event will only be up to date after
407
+ * drm_crtc_handle_vblank() has been executed from late vblank
408
+ * irq handler after start of back-porch (vline 0). We queue the
409
+ * pageflip event for send-out by drm_crtc_handle_vblank() with
410
+ * updated timestamp and count, once it runs after us.
411
+ *
412
+ * We need to open-code this instead of using the helper
413
+ * drm_crtc_arm_vblank_event(), as that helper would
414
+ * call drm_crtc_accurate_vblank_count(), which we must
415
+ * not call in VRR mode while we are in front-porch!
416
+ */
417
+
418
+ /* sequence will be replaced by real count during send-out. */
419
+ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420
+ e->pipe = amdgpu_crtc->crtc_id;
421
+
422
+ list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423
+ e = NULL;
424
+ }
425
+
426
+ /* Keep track of vblank of this flip for flip throttling. We use the
427
+ * cooked hw counter, as that one incremented at start of this vblank
428
+ * of pageflip completion, so last_flip_vblank is the forbidden count
429
+ * for queueing new pageflips if vsync + VRR is enabled.
430
+ */
431
+ amdgpu_crtc->dm_irq_params.last_flip_vblank =
432
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433
+
303434 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
304
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
435
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
305436
306
- DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
307
- __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
308
-
309
- drm_crtc_vblank_put(&amdgpu_crtc->base);
437
+ DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438
+ amdgpu_crtc->crtc_id, amdgpu_crtc,
439
+ vrr_active, (int) !e);
310440 }
311441
442
+static void dm_vupdate_high_irq(void *interrupt_params)
443
+{
444
+ struct common_irq_params *irq_params = interrupt_params;
445
+ struct amdgpu_device *adev = irq_params->adev;
446
+ struct amdgpu_crtc *acrtc;
447
+ unsigned long flags;
448
+ int vrr_active;
449
+
450
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451
+
452
+ if (acrtc) {
453
+ vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454
+
455
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456
+ acrtc->crtc_id,
457
+ vrr_active);
458
+
459
+ /* Core vblank handling is done here after end of front-porch in
460
+ * vrr mode, as vblank timestamping will give valid results
461
+ * while now done after front-porch. This will also deliver
462
+ * page-flip completion events that have been queued to us
463
+ * if a pageflip happened inside front-porch.
464
+ */
465
+ if (vrr_active) {
466
+ drm_crtc_handle_vblank(&acrtc->base);
467
+
468
+ /* BTR processing for pre-DCE12 ASICs */
469
+ if (acrtc->dm_irq_params.stream &&
470
+ adev->family < AMDGPU_FAMILY_AI) {
471
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472
+ mod_freesync_handle_v_update(
473
+ adev->dm.freesync_module,
474
+ acrtc->dm_irq_params.stream,
475
+ &acrtc->dm_irq_params.vrr_params);
476
+
477
+ dc_stream_adjust_vmin_vmax(
478
+ adev->dm.dc,
479
+ acrtc->dm_irq_params.stream,
480
+ &acrtc->dm_irq_params.vrr_params.adjust);
481
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482
+ }
483
+ }
484
+ }
485
+}
486
+
487
+/**
488
+ * dm_crtc_high_irq() - Handles CRTC interrupt
489
+ * @interrupt_params: used for determining the CRTC instance
490
+ *
491
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492
+ * event handler.
493
+ */
312494 static void dm_crtc_high_irq(void *interrupt_params)
313495 {
314496 struct common_irq_params *irq_params = interrupt_params;
315497 struct amdgpu_device *adev = irq_params->adev;
316
- uint8_t crtc_index = 0;
317498 struct amdgpu_crtc *acrtc;
499
+ unsigned long flags;
500
+ int vrr_active;
318501
319502 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503
+ if (!acrtc)
504
+ return;
320505
321
- if (acrtc)
322
- crtc_index = acrtc->crtc_id;
506
+ vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
323507
324
- drm_handle_vblank(adev->ddev, crtc_index);
508
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509
+ vrr_active, acrtc->dm_irq_params.active_planes);
510
+
511
+ /**
512
+ * Core vblank handling at start of front-porch is only possible
513
+ * in non-vrr mode, as only there vblank timestamping will give
514
+ * valid results while done in front-porch. Otherwise defer it
515
+ * to dm_vupdate_high_irq after end of front-porch.
516
+ */
517
+ if (!vrr_active)
518
+ drm_crtc_handle_vblank(&acrtc->base);
519
+
520
+ /**
521
+ * Following stuff must happen at start of vblank, for crc
522
+ * computation and below-the-range btr support in vrr mode.
523
+ */
325524 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525
+
526
+ /* BTR updates need to happen before VUPDATE on Vega and above. */
527
+ if (adev->family < AMDGPU_FAMILY_AI)
528
+ return;
529
+
530
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531
+
532
+ if (acrtc->dm_irq_params.stream &&
533
+ acrtc->dm_irq_params.vrr_params.supported &&
534
+ acrtc->dm_irq_params.freesync_config.state ==
535
+ VRR_STATE_ACTIVE_VARIABLE) {
536
+ mod_freesync_handle_v_update(adev->dm.freesync_module,
537
+ acrtc->dm_irq_params.stream,
538
+ &acrtc->dm_irq_params.vrr_params);
539
+
540
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541
+ &acrtc->dm_irq_params.vrr_params.adjust);
542
+ }
543
+
544
+ /*
545
+ * If there aren't any active_planes then DCH HUBP may be clock-gated.
546
+ * In that case, pageflip completion interrupts won't fire and pageflip
547
+ * completion events won't get delivered. Prevent this by sending
548
+ * pending pageflip events from here if a flip is still pending.
549
+ *
550
+ * If any planes are enabled, use dm_pflip_high_irq() instead, to
551
+ * avoid race conditions between flip programming and completion,
552
+ * which could cause too early flip completion events.
553
+ */
554
+ if (adev->family >= AMDGPU_FAMILY_RV &&
555
+ acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556
+ acrtc->dm_irq_params.active_planes == 0) {
557
+ if (acrtc->event) {
558
+ drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559
+ acrtc->event = NULL;
560
+ drm_crtc_vblank_put(&acrtc->base);
561
+ }
562
+ acrtc->pflip_status = AMDGPU_FLIP_NONE;
563
+ }
564
+
565
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
326566 }
327567
328568 static int dm_set_clockgating_state(void *handle,
....@@ -340,20 +580,12 @@
340580 /* Prototypes of private functions */
341581 static int dm_early_init(void* handle);
342582
343
-static void hotplug_notify_work_func(struct work_struct *work)
344
-{
345
- struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
346
- struct drm_device *dev = dm->ddev;
347
-
348
- drm_kms_helper_hotplug_event(dev);
349
-}
350
-
351583 /* Allocate memory for FBC compressed data */
352584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
353585 {
354586 struct drm_device *dev = connector->dev;
355
- struct amdgpu_device *adev = dev->dev_private;
356
- struct dm_comressor_info *compressor = &adev->dm.compressor;
587
+ struct amdgpu_device *adev = drm_to_adev(dev);
588
+ struct dm_compressor_info *compressor = &adev->dm.compressor;
357589 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
358590 struct drm_display_mode *mode;
359591 unsigned long max_size = 0;
....@@ -389,19 +621,357 @@
389621
390622 }
391623
624
+static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625
+ int pipe, bool *enabled,
626
+ unsigned char *buf, int max_bytes)
627
+{
628
+ struct drm_device *dev = dev_get_drvdata(kdev);
629
+ struct amdgpu_device *adev = drm_to_adev(dev);
630
+ struct drm_connector *connector;
631
+ struct drm_connector_list_iter conn_iter;
632
+ struct amdgpu_dm_connector *aconnector;
633
+ int ret = 0;
392634
393
-/* Init display KMS
394
- *
395
- * Returns 0 on success
396
- */
635
+ *enabled = false;
636
+
637
+ mutex_lock(&adev->dm.audio_lock);
638
+
639
+ drm_connector_list_iter_begin(dev, &conn_iter);
640
+ drm_for_each_connector_iter(connector, &conn_iter) {
641
+ aconnector = to_amdgpu_dm_connector(connector);
642
+ if (aconnector->audio_inst != port)
643
+ continue;
644
+
645
+ *enabled = true;
646
+ ret = drm_eld_size(connector->eld);
647
+ memcpy(buf, connector->eld, min(max_bytes, ret));
648
+
649
+ break;
650
+ }
651
+ drm_connector_list_iter_end(&conn_iter);
652
+
653
+ mutex_unlock(&adev->dm.audio_lock);
654
+
655
+ DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656
+
657
+ return ret;
658
+}
659
+
660
+static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661
+ .get_eld = amdgpu_dm_audio_component_get_eld,
662
+};
663
+
664
+static int amdgpu_dm_audio_component_bind(struct device *kdev,
665
+ struct device *hda_kdev, void *data)
666
+{
667
+ struct drm_device *dev = dev_get_drvdata(kdev);
668
+ struct amdgpu_device *adev = drm_to_adev(dev);
669
+ struct drm_audio_component *acomp = data;
670
+
671
+ acomp->ops = &amdgpu_dm_audio_component_ops;
672
+ acomp->dev = kdev;
673
+ adev->dm.audio_component = acomp;
674
+
675
+ return 0;
676
+}
677
+
678
+static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679
+ struct device *hda_kdev, void *data)
680
+{
681
+ struct drm_device *dev = dev_get_drvdata(kdev);
682
+ struct amdgpu_device *adev = drm_to_adev(dev);
683
+ struct drm_audio_component *acomp = data;
684
+
685
+ acomp->ops = NULL;
686
+ acomp->dev = NULL;
687
+ adev->dm.audio_component = NULL;
688
+}
689
+
690
+static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691
+ .bind = amdgpu_dm_audio_component_bind,
692
+ .unbind = amdgpu_dm_audio_component_unbind,
693
+};
694
+
695
+static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696
+{
697
+ int i, ret;
698
+
699
+ if (!amdgpu_audio)
700
+ return 0;
701
+
702
+ adev->mode_info.audio.enabled = true;
703
+
704
+ adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705
+
706
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707
+ adev->mode_info.audio.pin[i].channels = -1;
708
+ adev->mode_info.audio.pin[i].rate = -1;
709
+ adev->mode_info.audio.pin[i].bits_per_sample = -1;
710
+ adev->mode_info.audio.pin[i].status_bits = 0;
711
+ adev->mode_info.audio.pin[i].category_code = 0;
712
+ adev->mode_info.audio.pin[i].connected = false;
713
+ adev->mode_info.audio.pin[i].id =
714
+ adev->dm.dc->res_pool->audios[i]->inst;
715
+ adev->mode_info.audio.pin[i].offset = 0;
716
+ }
717
+
718
+ ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719
+ if (ret < 0)
720
+ return ret;
721
+
722
+ adev->dm.audio_registered = true;
723
+
724
+ return 0;
725
+}
726
+
727
+static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728
+{
729
+ if (!amdgpu_audio)
730
+ return;
731
+
732
+ if (!adev->mode_info.audio.enabled)
733
+ return;
734
+
735
+ if (adev->dm.audio_registered) {
736
+ component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737
+ adev->dm.audio_registered = false;
738
+ }
739
+
740
+ /* TODO: Disable audio? */
741
+
742
+ adev->mode_info.audio.enabled = false;
743
+}
744
+
745
+static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746
+{
747
+ struct drm_audio_component *acomp = adev->dm.audio_component;
748
+
749
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750
+ DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751
+
752
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753
+ pin, -1);
754
+ }
755
+}
756
+
757
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
758
+{
759
+ const struct dmcub_firmware_header_v1_0 *hdr;
760
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762
+ const struct firmware *dmub_fw = adev->dm.dmub_fw;
763
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764
+ struct abm *abm = adev->dm.dc->res_pool->abm;
765
+ struct dmub_srv_hw_params hw_params;
766
+ enum dmub_status status;
767
+ const unsigned char *fw_inst_const, *fw_bss_data;
768
+ uint32_t i, fw_inst_const_size, fw_bss_data_size;
769
+ bool has_hw_support;
770
+
771
+ if (!dmub_srv)
772
+ /* DMUB isn't supported on the ASIC. */
773
+ return 0;
774
+
775
+ if (!fb_info) {
776
+ DRM_ERROR("No framebuffer info for DMUB service.\n");
777
+ return -EINVAL;
778
+ }
779
+
780
+ if (!dmub_fw) {
781
+ /* Firmware required for DMUB support. */
782
+ DRM_ERROR("No firmware provided for DMUB.\n");
783
+ return -EINVAL;
784
+ }
785
+
786
+ status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787
+ if (status != DMUB_STATUS_OK) {
788
+ DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789
+ return -EINVAL;
790
+ }
791
+
792
+ if (!has_hw_support) {
793
+ DRM_INFO("DMUB unsupported on ASIC\n");
794
+ return 0;
795
+ }
796
+
797
+ hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798
+
799
+ fw_inst_const = dmub_fw->data +
800
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801
+ PSP_HEADER_BYTES;
802
+
803
+ fw_bss_data = dmub_fw->data +
804
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805
+ le32_to_cpu(hdr->inst_const_bytes);
806
+
807
+ /* Copy firmware and bios info into FB memory. */
808
+ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810
+
811
+ fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812
+
813
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814
+ * amdgpu_ucode_init_single_fw will load dmub firmware
815
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
816
+ * will be done by dm_dmub_hw_init
817
+ */
818
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820
+ fw_inst_const_size);
821
+ }
822
+
823
+ if (fw_bss_data_size)
824
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825
+ fw_bss_data, fw_bss_data_size);
826
+
827
+ /* Copy firmware bios info into FB memory. */
828
+ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829
+ adev->bios_size);
830
+
831
+ /* Reset regions that need to be reset. */
832
+ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833
+ fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834
+
835
+ memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836
+ fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837
+
838
+ memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839
+ fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840
+
841
+ /* Initialize hardware. */
842
+ memset(&hw_params, 0, sizeof(hw_params));
843
+ hw_params.fb_base = adev->gmc.fb_start;
844
+ hw_params.fb_offset = adev->gmc.aper_base;
845
+
846
+ /* backdoor load firmware and trigger dmub running */
847
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848
+ hw_params.load_inst_const = true;
849
+
850
+ if (dmcu)
851
+ hw_params.psp_version = dmcu->psp_version;
852
+
853
+ for (i = 0; i < fb_info->num_fb; ++i)
854
+ hw_params.fb[i] = &fb_info->fb[i];
855
+
856
+ status = dmub_srv_hw_init(dmub_srv, &hw_params);
857
+ if (status != DMUB_STATUS_OK) {
858
+ DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859
+ return -EINVAL;
860
+ }
861
+
862
+ /* Wait for firmware load to finish. */
863
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864
+ if (status != DMUB_STATUS_OK)
865
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866
+
867
+ /* Init DMCU and ABM if available. */
868
+ if (dmcu && abm) {
869
+ dmcu->funcs->dmcu_init(dmcu);
870
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871
+ }
872
+
873
+ if (!adev->dm.dc->ctx->dmub_srv)
874
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875
+ if (!adev->dm.dc->ctx->dmub_srv) {
876
+ DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877
+ return -ENOMEM;
878
+ }
879
+
880
+ DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881
+ adev->dm.dmcub_fw_version);
882
+
883
+ return 0;
884
+}
885
+
886
+static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
887
+ struct drm_atomic_state *state)
888
+{
889
+ struct drm_connector *connector;
890
+ struct drm_crtc *crtc;
891
+ struct amdgpu_dm_connector *amdgpu_dm_connector;
892
+ struct drm_connector_state *conn_state;
893
+ struct dm_crtc_state *acrtc_state;
894
+ struct drm_crtc_state *crtc_state;
895
+ struct dc_stream_state *stream;
896
+ struct drm_device *dev = adev_to_drm(adev);
897
+
898
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899
+
900
+ amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
901
+ conn_state = connector->state;
902
+
903
+ if (!(conn_state && conn_state->crtc))
904
+ continue;
905
+
906
+ crtc = conn_state->crtc;
907
+ acrtc_state = to_dm_crtc_state(crtc->state);
908
+
909
+ if (!(acrtc_state && acrtc_state->stream))
910
+ continue;
911
+
912
+ stream = acrtc_state->stream;
913
+
914
+ if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
915
+ amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
916
+ amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
917
+ amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
918
+ conn_state = drm_atomic_get_connector_state(state, connector);
919
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
920
+ crtc_state->mode_changed = true;
921
+ }
922
+ }
923
+}
924
+
925
+struct amdgpu_stutter_quirk {
926
+ u16 chip_vendor;
927
+ u16 chip_device;
928
+ u16 subsys_vendor;
929
+ u16 subsys_device;
930
+ u8 revision;
931
+};
932
+
933
+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
934
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
935
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
936
+ { 0, 0, 0, 0, 0 },
937
+};
938
+
939
+static bool dm_should_disable_stutter(struct pci_dev *pdev)
940
+{
941
+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
942
+
943
+ while (p && p->chip_device != 0) {
944
+ if (pdev->vendor == p->chip_vendor &&
945
+ pdev->device == p->chip_device &&
946
+ pdev->subsystem_vendor == p->subsys_vendor &&
947
+ pdev->subsystem_device == p->subsys_device &&
948
+ pdev->revision == p->revision) {
949
+ return true;
950
+ }
951
+ ++p;
952
+ }
953
+ return false;
954
+}
955
+
397956 static int amdgpu_dm_init(struct amdgpu_device *adev)
398957 {
399958 struct dc_init_data init_data;
400
- adev->dm.ddev = adev->ddev;
959
+#ifdef CONFIG_DRM_AMD_DC_HDCP
960
+ struct dc_callback_init init_params;
961
+#endif
962
+ int r;
963
+
964
+ adev->dm.ddev = adev_to_drm(adev);
401965 adev->dm.adev = adev;
402966
403967 /* Zero all the fields */
404968 memset(&init_data, 0, sizeof(init_data));
969
+#ifdef CONFIG_DRM_AMD_DC_HDCP
970
+ memset(&init_params, 0, sizeof(init_params));
971
+#endif
972
+
973
+ mutex_init(&adev->dm.dc_lock);
974
+ mutex_init(&adev->dm.audio_lock);
405975
406976 if(amdgpu_dm_irq_init(adev)) {
407977 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
....@@ -410,7 +980,7 @@
410980
411981 init_data.asic_id.chip_family = adev->family;
412982
413
- init_data.asic_id.pci_revision_id = adev->rev_id;
983
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
414984 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
415985 init_data.asic_id.chip_id = adev->pdev->device;
416986
....@@ -430,17 +1000,33 @@
4301000
4311001 init_data.cgs_device = adev->dm.cgs_device;
4321002
433
- adev->dm.dal = NULL;
434
-
4351003 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
4361004
437
- /*
438
- * TODO debug why this doesn't work on Raven
439
- */
440
- if (adev->flags & AMD_IS_APU &&
441
- adev->asic_type >= CHIP_CARRIZO &&
442
- adev->asic_type < CHIP_RAVEN)
1005
+ switch (adev->asic_type) {
1006
+ case CHIP_CARRIZO:
1007
+ case CHIP_STONEY:
1008
+ case CHIP_RAVEN:
1009
+ case CHIP_RENOIR:
4431010 init_data.flags.gpu_vm_support = true;
1011
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1012
+ init_data.flags.disable_dmcu = true;
1013
+ break;
1014
+ default:
1015
+ break;
1016
+ }
1017
+
1018
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1019
+ init_data.flags.fbc_support = true;
1020
+
1021
+ if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1022
+ init_data.flags.multi_mon_pp_mclk_switch = true;
1023
+
1024
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1025
+ init_data.flags.disable_fractional_pwm = true;
1026
+
1027
+ init_data.flags.power_down_display_on_boot = true;
1028
+
1029
+ init_data.soc_bounding_box = adev->dm.soc_bounding_box;
4441030
4451031 /* Display Core create. */
4461032 adev->dm.dc = dc_create(&init_data);
....@@ -452,7 +1038,32 @@
4521038 goto error;
4531039 }
4541040
455
- INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
1041
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1042
+ adev->dm.dc->debug.force_single_disp_pipe_split = false;
1043
+ adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1044
+ }
1045
+
1046
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1047
+ adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1048
+ if (dm_should_disable_stutter(adev->pdev))
1049
+ adev->dm.dc->debug.disable_stutter = true;
1050
+
1051
+ if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1052
+ adev->dm.dc->debug.disable_stutter = true;
1053
+
1054
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1055
+ adev->dm.dc->debug.disable_dsc = true;
1056
+
1057
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1058
+ adev->dm.dc->debug.disable_clock_gate = true;
1059
+
1060
+ r = dm_dmub_hw_init(adev);
1061
+ if (r) {
1062
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1063
+ goto error;
1064
+ }
1065
+
1066
+ dc_hardware_init(adev->dm.dc);
4561067
4571068 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
4581069 if (!adev->dm.freesync_module) {
....@@ -464,22 +1075,37 @@
4641075
4651076 amdgpu_dm_init_color_mod();
4661077
1078
+#ifdef CONFIG_DRM_AMD_DC_HDCP
1079
+ if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1080
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1081
+
1082
+ if (!adev->dm.hdcp_workqueue)
1083
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1084
+ else
1085
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1086
+
1087
+ dc_init_callbacks(adev->dm.dc, &init_params);
1088
+ }
1089
+#endif
4671090 if (amdgpu_dm_initialize_drm_device(adev)) {
4681091 DRM_ERROR(
4691092 "amdgpu: failed to initialize sw for display support.\n");
4701093 goto error;
4711094 }
4721095
473
- /* Update the actual used number of crtc */
474
- adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1096
+ /* create fake encoders for MST */
1097
+ dm_dp_create_fake_mst_encoders(adev);
4751098
4761099 /* TODO: Add_display_info? */
4771100
4781101 /* TODO use dynamic cursor width */
479
- adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
480
- adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1102
+ adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1103
+ adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4811104
482
- if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1105
+ /* Disable vblank IRQs aggressively for power-saving */
1106
+ adev_to_drm(adev)->vblank_disable_immediate = true;
1107
+
1108
+ if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4831109 DRM_ERROR(
4841110 "amdgpu: failed to initialize sw for display support.\n");
4851111 goto error;
....@@ -491,12 +1117,43 @@
4911117 error:
4921118 amdgpu_dm_fini(adev);
4931119
494
- return -1;
1120
+ return -EINVAL;
4951121 }
4961122
4971123 static void amdgpu_dm_fini(struct amdgpu_device *adev)
4981124 {
1125
+ int i;
1126
+
1127
+ for (i = 0; i < adev->dm.display_indexes_num; i++) {
1128
+ drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1129
+ }
1130
+
1131
+ amdgpu_dm_audio_fini(adev);
1132
+
4991133 amdgpu_dm_destroy_drm_device(&adev->dm);
1134
+
1135
+#ifdef CONFIG_DRM_AMD_DC_HDCP
1136
+ if (adev->dm.hdcp_workqueue) {
1137
+ hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1138
+ adev->dm.hdcp_workqueue = NULL;
1139
+ }
1140
+
1141
+ if (adev->dm.dc)
1142
+ dc_deinit_callbacks(adev->dm.dc);
1143
+#endif
1144
+ if (adev->dm.dc->ctx->dmub_srv) {
1145
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1146
+ adev->dm.dc->ctx->dmub_srv = NULL;
1147
+ }
1148
+
1149
+ if (adev->dm.dmub_bo)
1150
+ amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1151
+ &adev->dm.dmub_bo_gpu_addr,
1152
+ &adev->dm.dmub_bo_cpu_addr);
1153
+
1154
+ /* DC Destroy TODO: Replace destroy DAL */
1155
+ if (adev->dm.dc)
1156
+ dc_destroy(&adev->dm.dc);
5001157 /*
5011158 * TODO: pageflip, vlank interrupt
5021159 *
....@@ -511,19 +1168,302 @@
5111168 mod_freesync_destroy(adev->dm.freesync_module);
5121169 adev->dm.freesync_module = NULL;
5131170 }
514
- /* DC Destroy TODO: Replace destroy DAL */
515
- if (adev->dm.dc)
516
- dc_destroy(&adev->dm.dc);
1171
+
1172
+ mutex_destroy(&adev->dm.audio_lock);
1173
+ mutex_destroy(&adev->dm.dc_lock);
1174
+
5171175 return;
1176
+}
1177
+
1178
+static int load_dmcu_fw(struct amdgpu_device *adev)
1179
+{
1180
+ const char *fw_name_dmcu = NULL;
1181
+ int r;
1182
+ const struct dmcu_firmware_header_v1_0 *hdr;
1183
+
1184
+ switch(adev->asic_type) {
1185
+#if defined(CONFIG_DRM_AMD_DC_SI)
1186
+ case CHIP_TAHITI:
1187
+ case CHIP_PITCAIRN:
1188
+ case CHIP_VERDE:
1189
+ case CHIP_OLAND:
1190
+#endif
1191
+ case CHIP_BONAIRE:
1192
+ case CHIP_HAWAII:
1193
+ case CHIP_KAVERI:
1194
+ case CHIP_KABINI:
1195
+ case CHIP_MULLINS:
1196
+ case CHIP_TONGA:
1197
+ case CHIP_FIJI:
1198
+ case CHIP_CARRIZO:
1199
+ case CHIP_STONEY:
1200
+ case CHIP_POLARIS11:
1201
+ case CHIP_POLARIS10:
1202
+ case CHIP_POLARIS12:
1203
+ case CHIP_VEGAM:
1204
+ case CHIP_VEGA10:
1205
+ case CHIP_VEGA12:
1206
+ case CHIP_VEGA20:
1207
+ case CHIP_NAVI10:
1208
+ case CHIP_NAVI14:
1209
+ case CHIP_RENOIR:
1210
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1211
+ case CHIP_SIENNA_CICHLID:
1212
+ case CHIP_NAVY_FLOUNDER:
1213
+#endif
1214
+ return 0;
1215
+ case CHIP_NAVI12:
1216
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1217
+ break;
1218
+ case CHIP_RAVEN:
1219
+ if (ASICREV_IS_PICASSO(adev->external_rev_id))
1220
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1221
+ else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1222
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1223
+ else
1224
+ return 0;
1225
+ break;
1226
+ default:
1227
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1228
+ return -EINVAL;
1229
+ }
1230
+
1231
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1232
+ DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1233
+ return 0;
1234
+ }
1235
+
1236
+ r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1237
+ if (r == -ENOENT) {
1238
+ /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1239
+ DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1240
+ adev->dm.fw_dmcu = NULL;
1241
+ return 0;
1242
+ }
1243
+ if (r) {
1244
+ dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1245
+ fw_name_dmcu);
1246
+ return r;
1247
+ }
1248
+
1249
+ r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1250
+ if (r) {
1251
+ dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1252
+ fw_name_dmcu);
1253
+ release_firmware(adev->dm.fw_dmcu);
1254
+ adev->dm.fw_dmcu = NULL;
1255
+ return r;
1256
+ }
1257
+
1258
+ hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1259
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1260
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1261
+ adev->firmware.fw_size +=
1262
+ ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1263
+
1264
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1265
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1266
+ adev->firmware.fw_size +=
1267
+ ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1268
+
1269
+ adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1270
+
1271
+ DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1272
+
1273
+ return 0;
1274
+}
1275
+
1276
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1277
+{
1278
+ struct amdgpu_device *adev = ctx;
1279
+
1280
+ return dm_read_reg(adev->dm.dc->ctx, address);
1281
+}
1282
+
1283
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1284
+ uint32_t value)
1285
+{
1286
+ struct amdgpu_device *adev = ctx;
1287
+
1288
+ return dm_write_reg(adev->dm.dc->ctx, address, value);
1289
+}
1290
+
1291
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
1292
+{
1293
+ struct dmub_srv_create_params create_params;
1294
+ struct dmub_srv_region_params region_params;
1295
+ struct dmub_srv_region_info region_info;
1296
+ struct dmub_srv_fb_params fb_params;
1297
+ struct dmub_srv_fb_info *fb_info;
1298
+ struct dmub_srv *dmub_srv;
1299
+ const struct dmcub_firmware_header_v1_0 *hdr;
1300
+ const char *fw_name_dmub;
1301
+ enum dmub_asic dmub_asic;
1302
+ enum dmub_status status;
1303
+ int r;
1304
+
1305
+ switch (adev->asic_type) {
1306
+ case CHIP_RENOIR:
1307
+ dmub_asic = DMUB_ASIC_DCN21;
1308
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1309
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1310
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1311
+ break;
1312
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1313
+ case CHIP_SIENNA_CICHLID:
1314
+ dmub_asic = DMUB_ASIC_DCN30;
1315
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1316
+ break;
1317
+ case CHIP_NAVY_FLOUNDER:
1318
+ dmub_asic = DMUB_ASIC_DCN30;
1319
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1320
+ break;
1321
+#endif
1322
+
1323
+ default:
1324
+ /* ASIC doesn't support DMUB. */
1325
+ return 0;
1326
+ }
1327
+
1328
+ r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329
+ if (r) {
1330
+ DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331
+ return 0;
1332
+ }
1333
+
1334
+ r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335
+ if (r) {
1336
+ DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337
+ return 0;
1338
+ }
1339
+
1340
+ hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1342
+
1343
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1344
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1345
+ AMDGPU_UCODE_ID_DMCUB;
1346
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1347
+ adev->dm.dmub_fw;
1348
+ adev->firmware.fw_size +=
1349
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1350
+
1351
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1352
+ adev->dm.dmcub_fw_version);
1353
+ }
1354
+
1355
+
1356
+ adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357
+ dmub_srv = adev->dm.dmub_srv;
1358
+
1359
+ if (!dmub_srv) {
1360
+ DRM_ERROR("Failed to allocate DMUB service!\n");
1361
+ return -ENOMEM;
1362
+ }
1363
+
1364
+ memset(&create_params, 0, sizeof(create_params));
1365
+ create_params.user_ctx = adev;
1366
+ create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367
+ create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368
+ create_params.asic = dmub_asic;
1369
+
1370
+ /* Create the DMUB service. */
1371
+ status = dmub_srv_create(dmub_srv, &create_params);
1372
+ if (status != DMUB_STATUS_OK) {
1373
+ DRM_ERROR("Error creating DMUB service: %d\n", status);
1374
+ return -EINVAL;
1375
+ }
1376
+
1377
+ /* Calculate the size of all the regions for the DMUB service. */
1378
+ memset(&region_params, 0, sizeof(region_params));
1379
+
1380
+ region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382
+ region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383
+ region_params.vbios_size = adev->bios_size;
1384
+ region_params.fw_bss_data = region_params.bss_data_size ?
1385
+ adev->dm.dmub_fw->data +
1386
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387
+ le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388
+ region_params.fw_inst_const =
1389
+ adev->dm.dmub_fw->data +
1390
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391
+ PSP_HEADER_BYTES;
1392
+
1393
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1394
+ &region_info);
1395
+
1396
+ if (status != DMUB_STATUS_OK) {
1397
+ DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1398
+ return -EINVAL;
1399
+ }
1400
+
1401
+ /*
1402
+ * Allocate a framebuffer based on the total size of all the regions.
1403
+ * TODO: Move this into GART.
1404
+ */
1405
+ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407
+ &adev->dm.dmub_bo_gpu_addr,
1408
+ &adev->dm.dmub_bo_cpu_addr);
1409
+ if (r)
1410
+ return r;
1411
+
1412
+ /* Rebase the regions on the framebuffer address. */
1413
+ memset(&fb_params, 0, sizeof(fb_params));
1414
+ fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415
+ fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416
+ fb_params.region_info = &region_info;
1417
+
1418
+ adev->dm.dmub_fb_info =
1419
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420
+ fb_info = adev->dm.dmub_fb_info;
1421
+
1422
+ if (!fb_info) {
1423
+ DRM_ERROR(
1424
+ "Failed to allocate framebuffer info for DMUB service!\n");
1425
+ return -ENOMEM;
1426
+ }
1427
+
1428
+ status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429
+ if (status != DMUB_STATUS_OK) {
1430
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1431
+ return -EINVAL;
1432
+ }
1433
+
1434
+ return 0;
5181435 }
5191436
5201437 static int dm_sw_init(void *handle)
5211438 {
522
- return 0;
1439
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1440
+ int r;
1441
+
1442
+ r = dm_dmub_sw_init(adev);
1443
+ if (r)
1444
+ return r;
1445
+
1446
+ return load_dmcu_fw(adev);
5231447 }
5241448
5251449 static int dm_sw_fini(void *handle)
5261450 {
1451
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452
+
1453
+ kfree(adev->dm.dmub_fb_info);
1454
+ adev->dm.dmub_fb_info = NULL;
1455
+
1456
+ if (adev->dm.dmub_srv) {
1457
+ dmub_srv_destroy(adev->dm.dmub_srv);
1458
+ adev->dm.dmub_srv = NULL;
1459
+ }
1460
+
1461
+ release_firmware(adev->dm.dmub_fw);
1462
+ adev->dm.dmub_fw = NULL;
1463
+
1464
+ release_firmware(adev->dm.fw_dmcu);
1465
+ adev->dm.fw_dmcu = NULL;
1466
+
5271467 return 0;
5281468 }
5291469
....@@ -531,27 +1471,29 @@
5311471 {
5321472 struct amdgpu_dm_connector *aconnector;
5331473 struct drm_connector *connector;
1474
+ struct drm_connector_list_iter iter;
5341475 int ret = 0;
5351476
536
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
537
-
538
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1477
+ drm_connector_list_iter_begin(dev, &iter);
1478
+ drm_for_each_connector_iter(connector, &iter) {
5391479 aconnector = to_amdgpu_dm_connector(connector);
5401480 if (aconnector->dc_link->type == dc_connection_mst_branch &&
5411481 aconnector->mst_mgr.aux) {
5421482 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
543
- aconnector, aconnector->base.base.id);
1483
+ aconnector,
1484
+ aconnector->base.base.id);
5441485
5451486 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
5461487 if (ret < 0) {
5471488 DRM_ERROR("DM_MST: Failed to start MST\n");
548
- ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
549
- return ret;
550
- }
1489
+ aconnector->dc_link->type =
1490
+ dc_connection_single;
1491
+ break;
5511492 }
1493
+ }
5521494 }
1495
+ drm_connector_list_iter_end(&iter);
5531496
554
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
5551497 return ret;
5561498 }
5571499
....@@ -559,21 +1501,54 @@
5591501 {
5601502 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5611503
562
- return detect_mst_link_for_all_connectors(adev->ddev);
1504
+ struct dmcu_iram_parameters params;
1505
+ unsigned int linear_lut[16];
1506
+ int i;
1507
+ struct dmcu *dmcu = NULL;
1508
+ bool ret = true;
1509
+
1510
+ dmcu = adev->dm.dc->res_pool->dmcu;
1511
+
1512
+ for (i = 0; i < 16; i++)
1513
+ linear_lut[i] = 0xFFFF * i / 15;
1514
+
1515
+ params.set = 0;
1516
+ params.backlight_ramping_start = 0xCCCC;
1517
+ params.backlight_ramping_reduction = 0xCCCCCCCC;
1518
+ params.backlight_lut_array_size = 16;
1519
+ params.backlight_lut_array = linear_lut;
1520
+
1521
+ /* Min backlight level after ABM reduction, Don't allow below 1%
1522
+ * 0xFFFF x 0.01 = 0x28F
1523
+ */
1524
+ params.min_abm_backlight = 0x28F;
1525
+
1526
+ /* In the case where abm is implemented on dmcub,
1527
+ * dmcu object will be null.
1528
+ * ABM 2.4 and up are implemented on dmcub.
1529
+ */
1530
+ if (dmcu)
1531
+ ret = dmcu_load_iram(dmcu, params);
1532
+ else if (adev->dm.dc->ctx->dmub_srv)
1533
+ ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1534
+
1535
+ if (!ret)
1536
+ return -EINVAL;
1537
+
1538
+ return detect_mst_link_for_all_connectors(adev_to_drm(adev));
5631539 }
5641540
5651541 static void s3_handle_mst(struct drm_device *dev, bool suspend)
5661542 {
5671543 struct amdgpu_dm_connector *aconnector;
5681544 struct drm_connector *connector;
1545
+ struct drm_connector_list_iter iter;
5691546 struct drm_dp_mst_topology_mgr *mgr;
5701547 int ret;
5711548 bool need_hotplug = false;
5721549
573
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
574
-
575
- list_for_each_entry(connector, &dev->mode_config.connector_list,
576
- head) {
1550
+ drm_connector_list_iter_begin(dev, &iter);
1551
+ drm_for_each_connector_iter(connector, &iter) {
5771552 aconnector = to_amdgpu_dm_connector(connector);
5781553 if (aconnector->dc_link->type != dc_connection_mst_branch ||
5791554 aconnector->mst_port)
....@@ -584,20 +1559,95 @@
5841559 if (suspend) {
5851560 drm_dp_mst_topology_mgr_suspend(mgr);
5861561 } else {
587
- ret = drm_dp_mst_topology_mgr_resume(mgr);
1562
+ ret = drm_dp_mst_topology_mgr_resume(mgr, true);
5881563 if (ret < 0) {
5891564 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5901565 need_hotplug = true;
5911566 }
5921567 }
5931568 }
594
-
595
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
1569
+ drm_connector_list_iter_end(&iter);
5961570
5971571 if (need_hotplug)
5981572 drm_kms_helper_hotplug_event(dev);
5991573 }
6001574
1575
+static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1576
+{
1577
+ struct smu_context *smu = &adev->smu;
1578
+ int ret = 0;
1579
+
1580
+ if (!is_support_sw_smu(adev))
1581
+ return 0;
1582
+
1583
+ /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584
+ * on window driver dc implementation.
1585
+ * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586
+ * should be passed to smu during boot up and resume from s3.
1587
+ * boot up: dc calculate dcn watermark clock settings within dc_create,
1588
+ * dcn20_resource_construct
1589
+ * then call pplib functions below to pass the settings to smu:
1590
+ * smu_set_watermarks_for_clock_ranges
1591
+ * smu_set_watermarks_table
1592
+ * navi10_set_watermarks_table
1593
+ * smu_write_watermarks_table
1594
+ *
1595
+ * For Renoir, clock settings of dcn watermark are also fixed values.
1596
+ * dc has implemented different flow for window driver:
1597
+ * dc_hardware_init / dc_set_power_state
1598
+ * dcn10_init_hw
1599
+ * notify_wm_ranges
1600
+ * set_wm_ranges
1601
+ * -- Linux
1602
+ * smu_set_watermarks_for_clock_ranges
1603
+ * renoir_set_watermarks_table
1604
+ * smu_write_watermarks_table
1605
+ *
1606
+ * For Linux,
1607
+ * dc_hardware_init -> amdgpu_dm_init
1608
+ * dc_set_power_state --> dm_resume
1609
+ *
1610
+ * therefore, this function apply to navi10/12/14 but not Renoir
1611
+ * *
1612
+ */
1613
+ switch(adev->asic_type) {
1614
+ case CHIP_NAVI10:
1615
+ case CHIP_NAVI14:
1616
+ case CHIP_NAVI12:
1617
+ break;
1618
+ default:
1619
+ return 0;
1620
+ }
1621
+
1622
+ ret = smu_write_watermarks_table(smu);
1623
+ if (ret) {
1624
+ DRM_ERROR("Failed to update WMTABLE!\n");
1625
+ return ret;
1626
+ }
1627
+
1628
+ return 0;
1629
+}
1630
+
1631
+/**
1632
+ * dm_hw_init() - Initialize DC device
1633
+ * @handle: The base driver device containing the amdgpu_dm device.
1634
+ *
1635
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
1636
+ * the initializers of each DM component, then populating the struct with them.
1637
+ *
1638
+ * Although the function implies hardware initialization, both hardware and
1639
+ * software are initialized here. Splitting them out to their relevant init
1640
+ * hooks is a future TODO item.
1641
+ *
1642
+ * Some notable things that are initialized here:
1643
+ *
1644
+ * - Display Core, both software and hardware
1645
+ * - DC modules that we need (freesync and color management)
1646
+ * - DRM software states
1647
+ * - Interrupt sources and handlers
1648
+ * - Vblank support
1649
+ * - Debug FS entries, if enabled
1650
+ */
6011651 static int dm_hw_init(void *handle)
6021652 {
6031653 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
....@@ -608,6 +1658,14 @@
6081658 return 0;
6091659 }
6101660
1661
+/**
1662
+ * dm_hw_fini() - Teardown DC device
1663
+ * @handle: The base driver device containing the amdgpu_dm device.
1664
+ *
1665
+ * Teardown components within &struct amdgpu_display_manager that require
1666
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667
+ * were loaded. Also flush IRQ workqueues and disable them.
1668
+ */
6111669 static int dm_hw_fini(void *handle)
6121670 {
6131671 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
....@@ -619,23 +1677,125 @@
6191677 return 0;
6201678 }
6211679
1680
+
1681
+static int dm_enable_vblank(struct drm_crtc *crtc);
1682
+static void dm_disable_vblank(struct drm_crtc *crtc);
1683
+
1684
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685
+ struct dc_state *state, bool enable)
1686
+{
1687
+ enum dc_irq_source irq_source;
1688
+ struct amdgpu_crtc *acrtc;
1689
+ int rc = -EBUSY;
1690
+ int i = 0;
1691
+
1692
+ for (i = 0; i < state->stream_count; i++) {
1693
+ acrtc = get_crtc_by_otg_inst(
1694
+ adev, state->stream_status[i].primary_otg_inst);
1695
+
1696
+ if (acrtc && state->stream_status[i].plane_count != 0) {
1697
+ irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699
+ DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
1701
+ if (rc)
1702
+ DRM_WARN("Failed to %s pflip interrupts\n",
1703
+ enable ? "enable" : "disable");
1704
+
1705
+ if (enable) {
1706
+ rc = dm_enable_vblank(&acrtc->base);
1707
+ if (rc)
1708
+ DRM_WARN("Failed to enable vblank interrupts\n");
1709
+ } else {
1710
+ dm_disable_vblank(&acrtc->base);
1711
+ }
1712
+
1713
+ }
1714
+ }
1715
+
1716
+}
1717
+
1718
+static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1719
+{
1720
+ struct dc_state *context = NULL;
1721
+ enum dc_status res = DC_ERROR_UNEXPECTED;
1722
+ int i;
1723
+ struct dc_stream_state *del_streams[MAX_PIPES];
1724
+ int del_streams_count = 0;
1725
+
1726
+ memset(del_streams, 0, sizeof(del_streams));
1727
+
1728
+ context = dc_create_state(dc);
1729
+ if (context == NULL)
1730
+ goto context_alloc_fail;
1731
+
1732
+ dc_resource_state_copy_construct_current(dc, context);
1733
+
1734
+ /* First remove from context all streams */
1735
+ for (i = 0; i < context->stream_count; i++) {
1736
+ struct dc_stream_state *stream = context->streams[i];
1737
+
1738
+ del_streams[del_streams_count++] = stream;
1739
+ }
1740
+
1741
+ /* Remove all planes for removed streams and then remove the streams */
1742
+ for (i = 0; i < del_streams_count; i++) {
1743
+ if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744
+ res = DC_FAIL_DETACH_SURFACES;
1745
+ goto fail;
1746
+ }
1747
+
1748
+ res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1749
+ if (res != DC_OK)
1750
+ goto fail;
1751
+ }
1752
+
1753
+
1754
+ res = dc_validate_global_state(dc, context, false);
1755
+
1756
+ if (res != DC_OK) {
1757
+ DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1758
+ goto fail;
1759
+ }
1760
+
1761
+ res = dc_commit_state(dc, context);
1762
+
1763
+fail:
1764
+ dc_release_state(context);
1765
+
1766
+context_alloc_fail:
1767
+ return res;
1768
+}
1769
+
6221770 static int dm_suspend(void *handle)
6231771 {
6241772 struct amdgpu_device *adev = handle;
6251773 struct amdgpu_display_manager *dm = &adev->dm;
6261774 int ret = 0;
6271775
628
- WARN_ON(adev->dm.cached_state);
629
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1776
+ if (amdgpu_in_reset(adev)) {
1777
+ mutex_lock(&dm->dc_lock);
1778
+ dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
6301779
631
- s3_handle_mst(adev->ddev, true);
1780
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1781
+
1782
+ amdgpu_dm_commit_zero_streams(dm->dc);
1783
+
1784
+ amdgpu_dm_irq_suspend(adev);
1785
+
1786
+ return ret;
1787
+ }
1788
+
1789
+ WARN_ON(adev->dm.cached_state);
1790
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1791
+
1792
+ s3_handle_mst(adev_to_drm(adev), true);
6321793
6331794 amdgpu_dm_irq_suspend(adev);
6341795
635
-
6361796 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
6371797
638
- return ret;
1798
+ return 0;
6391799 }
6401800
6411801 static struct amdgpu_dm_connector *
....@@ -726,6 +1886,7 @@
7261886 return;
7271887 }
7281888
1889
+ /* dc_sink_create returns a new reference */
7291890 link->local_sink = sink;
7301891
7311892 edid_status = dm_helpers_read_local_edid(
....@@ -738,21 +1899,137 @@
7381899
7391900 }
7401901
1902
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
1903
+ struct amdgpu_display_manager *dm)
1904
+{
1905
+ struct {
1906
+ struct dc_surface_update surface_updates[MAX_SURFACES];
1907
+ struct dc_plane_info plane_infos[MAX_SURFACES];
1908
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
1909
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1910
+ struct dc_stream_update stream_update;
1911
+ } * bundle;
1912
+ int k, m;
1913
+
1914
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1915
+
1916
+ if (!bundle) {
1917
+ dm_error("Failed to allocate update bundle\n");
1918
+ goto cleanup;
1919
+ }
1920
+
1921
+ for (k = 0; k < dc_state->stream_count; k++) {
1922
+ bundle->stream_update.stream = dc_state->streams[k];
1923
+
1924
+ for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1925
+ bundle->surface_updates[m].surface =
1926
+ dc_state->stream_status->plane_states[m];
1927
+ bundle->surface_updates[m].surface->force_full_update =
1928
+ true;
1929
+ }
1930
+ dc_commit_updates_for_stream(
1931
+ dm->dc, bundle->surface_updates,
1932
+ dc_state->stream_status->plane_count,
1933
+ dc_state->streams[k], &bundle->stream_update, dc_state);
1934
+ }
1935
+
1936
+cleanup:
1937
+ kfree(bundle);
1938
+
1939
+ return;
1940
+}
1941
+
1942
+static void dm_set_dpms_off(struct dc_link *link)
1943
+{
1944
+ struct dc_stream_state *stream_state;
1945
+ struct amdgpu_dm_connector *aconnector = link->priv;
1946
+ struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1947
+ struct dc_stream_update stream_update;
1948
+ bool dpms_off = true;
1949
+
1950
+ memset(&stream_update, 0, sizeof(stream_update));
1951
+ stream_update.dpms_off = &dpms_off;
1952
+
1953
+ mutex_lock(&adev->dm.dc_lock);
1954
+ stream_state = dc_stream_find_from_link(link);
1955
+
1956
+ if (stream_state == NULL) {
1957
+ DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1958
+ mutex_unlock(&adev->dm.dc_lock);
1959
+ return;
1960
+ }
1961
+
1962
+ stream_update.stream = stream_state;
1963
+ dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1964
+ stream_state, &stream_update,
1965
+ stream_state->ctx->dc->current_state);
1966
+ mutex_unlock(&adev->dm.dc_lock);
1967
+}
1968
+
7411969 static int dm_resume(void *handle)
7421970 {
7431971 struct amdgpu_device *adev = handle;
744
- struct drm_device *ddev = adev->ddev;
1972
+ struct drm_device *ddev = adev_to_drm(adev);
7451973 struct amdgpu_display_manager *dm = &adev->dm;
7461974 struct amdgpu_dm_connector *aconnector;
7471975 struct drm_connector *connector;
1976
+ struct drm_connector_list_iter iter;
7481977 struct drm_crtc *crtc;
7491978 struct drm_crtc_state *new_crtc_state;
7501979 struct dm_crtc_state *dm_new_crtc_state;
7511980 struct drm_plane *plane;
7521981 struct drm_plane_state *new_plane_state;
7531982 struct dm_plane_state *dm_new_plane_state;
1983
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
7541984 enum dc_connection_type new_connection_type = dc_connection_none;
755
- int i;
1985
+ struct dc_state *dc_state;
1986
+ int i, r, j;
1987
+
1988
+ if (amdgpu_in_reset(adev)) {
1989
+ dc_state = dm->cached_dc_state;
1990
+
1991
+ r = dm_dmub_hw_init(adev);
1992
+ if (r)
1993
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1994
+
1995
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1996
+ dc_resume(dm->dc);
1997
+
1998
+ amdgpu_dm_irq_resume_early(adev);
1999
+
2000
+ for (i = 0; i < dc_state->stream_count; i++) {
2001
+ dc_state->streams[i]->mode_changed = true;
2002
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2003
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
2004
+ = 0xffffffff;
2005
+ }
2006
+ }
2007
+
2008
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
2009
+
2010
+ dm_gpureset_commit_state(dm->cached_dc_state, dm);
2011
+
2012
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2013
+
2014
+ dc_release_state(dm->cached_dc_state);
2015
+ dm->cached_dc_state = NULL;
2016
+
2017
+ amdgpu_dm_irq_resume_late(adev);
2018
+
2019
+ mutex_unlock(&dm->dc_lock);
2020
+
2021
+ return 0;
2022
+ }
2023
+ /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2024
+ dc_release_state(dm_state->context);
2025
+ dm_state->context = dc_create_state(dm->dc);
2026
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2027
+ dc_resource_state_construct(dm->dc, dm_state->context);
2028
+
2029
+ /* Before powering on DC we need to re-initialize DMUB. */
2030
+ r = dm_dmub_hw_init(adev);
2031
+ if (r)
2032
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
7562033
7572034 /* power on hardware */
7582035 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
....@@ -760,24 +2037,28 @@
7602037 /* program HPD filter */
7612038 dc_resume(dm->dc);
7622039
763
- /* On resume we need to rewrite the MSTM control bits to enamble MST*/
764
- s3_handle_mst(ddev, false);
765
-
7662040 /*
7672041 * early enable HPD Rx IRQ, should be done before set mode as short
7682042 * pulse interrupts are used for MST
7692043 */
7702044 amdgpu_dm_irq_resume_early(adev);
7712045
2046
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
2047
+ s3_handle_mst(ddev, false);
2048
+
7722049 /* Do detection*/
773
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
2050
+ drm_connector_list_iter_begin(ddev, &iter);
2051
+ drm_for_each_connector_iter(connector, &iter) {
7742052 aconnector = to_amdgpu_dm_connector(connector);
2053
+
2054
+ if (!aconnector->dc_link)
2055
+ continue;
7752056
7762057 /*
7772058 * this is the case when traversing through already created
7782059 * MST connectors, should be skipped
7792060 */
780
- if (aconnector->mst_port)
2061
+ if (aconnector->dc_link->type == dc_connection_mst_branch)
7812062 continue;
7822063
7832064 mutex_lock(&aconnector->hpd_lock);
....@@ -792,12 +2073,15 @@
7922073 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
7932074 aconnector->fake_enable = false;
7942075
2076
+ if (aconnector->dc_sink)
2077
+ dc_sink_release(aconnector->dc_sink);
7952078 aconnector->dc_sink = NULL;
7962079 amdgpu_dm_update_connector_after_detect(aconnector);
7972080 mutex_unlock(&aconnector->hpd_lock);
7982081 }
2082
+ drm_connector_list_iter_end(&iter);
7992083
800
- /* Force mode set in atomic comit */
2084
+ /* Force mode set in atomic commit */
8012085 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
8022086 new_crtc_state->active_changed = true;
8032087
....@@ -830,8 +2114,20 @@
8302114
8312115 amdgpu_dm_irq_resume_late(adev);
8322116
2117
+ amdgpu_dm_smu_write_watermarks_table(adev);
2118
+
8332119 return 0;
8342120 }
2121
+
2122
+/**
2123
+ * DOC: DM Lifecycle
2124
+ *
2125
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2126
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2127
+ * the base driver's device list to be initialized and torn down accordingly.
2128
+ *
2129
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2130
+ */
8352131
8362132 static const struct amd_ip_funcs amdgpu_dm_funcs = {
8372133 .name = "dm",
....@@ -861,61 +2157,90 @@
8612157 };
8622158
8632159
864
-static struct drm_atomic_state *
865
-dm_atomic_state_alloc(struct drm_device *dev)
866
-{
867
- struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
868
-
869
- if (!state)
870
- return NULL;
871
-
872
- if (drm_atomic_state_init(dev, &state->base) < 0)
873
- goto fail;
874
-
875
- return &state->base;
876
-
877
-fail:
878
- kfree(state);
879
- return NULL;
880
-}
881
-
882
-static void
883
-dm_atomic_state_clear(struct drm_atomic_state *state)
884
-{
885
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
886
-
887
- if (dm_state->context) {
888
- dc_release_state(dm_state->context);
889
- dm_state->context = NULL;
890
- }
891
-
892
- drm_atomic_state_default_clear(state);
893
-}
894
-
895
-static void
896
-dm_atomic_state_alloc_free(struct drm_atomic_state *state)
897
-{
898
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
899
- drm_atomic_state_default_release(state);
900
- kfree(dm_state);
901
-}
2160
+/**
2161
+ * DOC: atomic
2162
+ *
2163
+ * *WIP*
2164
+ */
9022165
9032166 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
9042167 .fb_create = amdgpu_display_user_framebuffer_create,
9052168 .output_poll_changed = drm_fb_helper_output_poll_changed,
9062169 .atomic_check = amdgpu_dm_atomic_check,
9072170 .atomic_commit = amdgpu_dm_atomic_commit,
908
- .atomic_state_alloc = dm_atomic_state_alloc,
909
- .atomic_state_clear = dm_atomic_state_clear,
910
- .atomic_state_free = dm_atomic_state_alloc_free
9112171 };
9122172
9132173 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
9142174 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
9152175 };
9162176
917
-static void
918
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
2177
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2178
+{
2179
+ u32 max_avg, min_cll, max, min, q, r;
2180
+ struct amdgpu_dm_backlight_caps *caps;
2181
+ struct amdgpu_display_manager *dm;
2182
+ struct drm_connector *conn_base;
2183
+ struct amdgpu_device *adev;
2184
+ struct dc_link *link = NULL;
2185
+ static const u8 pre_computed_values[] = {
2186
+ 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2187
+ 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2188
+
2189
+ if (!aconnector || !aconnector->dc_link)
2190
+ return;
2191
+
2192
+ link = aconnector->dc_link;
2193
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
2194
+ return;
2195
+
2196
+ conn_base = &aconnector->base;
2197
+ adev = drm_to_adev(conn_base->dev);
2198
+ dm = &adev->dm;
2199
+ caps = &dm->backlight_caps;
2200
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2201
+ caps->aux_support = false;
2202
+ max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2203
+ min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2204
+
2205
+ if (caps->ext_caps->bits.oled == 1 /*||
2206
+ caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2207
+ caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2208
+ caps->aux_support = true;
2209
+
2210
+ if (amdgpu_backlight == 0)
2211
+ caps->aux_support = false;
2212
+ else if (amdgpu_backlight == 1)
2213
+ caps->aux_support = true;
2214
+
2215
+ /* From the specification (CTA-861-G), for calculating the maximum
2216
+ * luminance we need to use:
2217
+ * Luminance = 50*2**(CV/32)
2218
+ * Where CV is a one-byte value.
2219
+ * For calculating this expression we may need float point precision;
2220
+ * to avoid this complexity level, we take advantage that CV is divided
2221
+ * by a constant. From the Euclids division algorithm, we know that CV
2222
+ * can be written as: CV = 32*q + r. Next, we replace CV in the
2223
+ * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2224
+ * need to pre-compute the value of r/32. For pre-computing the values
2225
+ * We just used the following Ruby line:
2226
+ * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2227
+ * The results of the above expressions can be verified at
2228
+ * pre_computed_values.
2229
+ */
2230
+ q = max_avg >> 5;
2231
+ r = max_avg % 32;
2232
+ max = (1 << q) * pre_computed_values[r];
2233
+
2234
+ // min luminance: maxLum * (CV/255)^2 / 100
2235
+ q = DIV_ROUND_CLOSEST(min_cll, 255);
2236
+ min = max * DIV_ROUND_CLOSEST((q * q), 100);
2237
+
2238
+ caps->aux_max_input_signal = max;
2239
+ caps->aux_min_input_signal = min;
2240
+}
2241
+
2242
+void amdgpu_dm_update_connector_after_detect(
2243
+ struct amdgpu_dm_connector *aconnector)
9192244 {
9202245 struct drm_connector *connector = &aconnector->base;
9212246 struct drm_device *dev = connector->dev;
....@@ -925,45 +2250,51 @@
9252250 if (aconnector->mst_mgr.mst_state == true)
9262251 return;
9272252
928
-
9292253 sink = aconnector->dc_link->local_sink;
2254
+ if (sink)
2255
+ dc_sink_retain(sink);
9302256
931
- /* Edid mgmt connector gets first update only in mode_valid hook and then
2257
+ /*
2258
+ * Edid mgmt connector gets first update only in mode_valid hook and then
9322259 * the connector sink is set to either fake or physical sink depends on link status.
933
- * don't do it here if u are during boot
2260
+ * Skip if already done during boot.
9342261 */
9352262 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
9362263 && aconnector->dc_em_sink) {
9372264
938
- /* For S3 resume with headless use eml_sink to fake stream
939
- * because on resume connecotr->sink is set ti NULL
2265
+ /*
2266
+ * For S3 resume with headless use eml_sink to fake stream
2267
+ * because on resume connector->sink is set to NULL
9402268 */
9412269 mutex_lock(&dev->mode_config.mutex);
9422270
9432271 if (sink) {
9442272 if (aconnector->dc_sink) {
945
- amdgpu_dm_remove_sink_from_freesync_module(
946
- connector);
947
- /* retain and release bellow are used for
948
- * bump up refcount for sink because the link don't point
949
- * to it anymore after disconnect so on next crtc to connector
2273
+ amdgpu_dm_update_freesync_caps(connector, NULL);
2274
+ /*
2275
+ * retain and release below are used to
2276
+ * bump up refcount for sink because the link doesn't point
2277
+ * to it anymore after disconnect, so on next crtc to connector
9502278 * reshuffle by UMD we will get into unwanted dc_sink release
9512279 */
952
- if (aconnector->dc_sink != aconnector->dc_em_sink)
953
- dc_sink_release(aconnector->dc_sink);
2280
+ dc_sink_release(aconnector->dc_sink);
9542281 }
9552282 aconnector->dc_sink = sink;
956
- amdgpu_dm_add_sink_to_freesync_module(
957
- connector, aconnector->edid);
2283
+ dc_sink_retain(aconnector->dc_sink);
2284
+ amdgpu_dm_update_freesync_caps(connector,
2285
+ aconnector->edid);
9582286 } else {
959
- amdgpu_dm_remove_sink_from_freesync_module(connector);
960
- if (!aconnector->dc_sink)
2287
+ amdgpu_dm_update_freesync_caps(connector, NULL);
2288
+ if (!aconnector->dc_sink) {
9612289 aconnector->dc_sink = aconnector->dc_em_sink;
962
- else if (aconnector->dc_sink != aconnector->dc_em_sink)
9632290 dc_sink_retain(aconnector->dc_sink);
2291
+ }
9642292 }
9652293
9662294 mutex_unlock(&dev->mode_config.mutex);
2295
+
2296
+ if (sink)
2297
+ dc_sink_release(sink);
9672298 return;
9682299 }
9692300
....@@ -971,14 +2302,20 @@
9712302 * TODO: temporary guard to look for proper fix
9722303 * if this sink is MST sink, we should not do anything
9732304 */
974
- if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
2305
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2306
+ dc_sink_release(sink);
9752307 return;
2308
+ }
9762309
9772310 if (aconnector->dc_sink == sink) {
978
- /* We got a DP short pulse (Link Loss, DP CTS, etc...).
979
- * Do nothing!! */
2311
+ /*
2312
+ * We got a DP short pulse (Link Loss, DP CTS, etc...).
2313
+ * Do nothing!!
2314
+ */
9802315 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
9812316 aconnector->connector_id);
2317
+ if (sink)
2318
+ dc_sink_release(sink);
9822319 return;
9832320 }
9842321
....@@ -987,37 +2324,61 @@
9872324
9882325 mutex_lock(&dev->mode_config.mutex);
9892326
990
- /* 1. Update status of the drm connector
991
- * 2. Send an event and let userspace tell us what to do */
2327
+ /*
2328
+ * 1. Update status of the drm connector
2329
+ * 2. Send an event and let userspace tell us what to do
2330
+ */
9922331 if (sink) {
993
- /* TODO: check if we still need the S3 mode update workaround.
994
- * If yes, put it here. */
995
- if (aconnector->dc_sink)
996
- amdgpu_dm_remove_sink_from_freesync_module(
997
- connector);
2332
+ /*
2333
+ * TODO: check if we still need the S3 mode update workaround.
2334
+ * If yes, put it here.
2335
+ */
2336
+ if (aconnector->dc_sink) {
2337
+ amdgpu_dm_update_freesync_caps(connector, NULL);
2338
+ dc_sink_release(aconnector->dc_sink);
2339
+ }
9982340
9992341 aconnector->dc_sink = sink;
2342
+ dc_sink_retain(aconnector->dc_sink);
10002343 if (sink->dc_edid.length == 0) {
10012344 aconnector->edid = NULL;
2345
+ if (aconnector->dc_link->aux_mode) {
2346
+ drm_dp_cec_unset_edid(
2347
+ &aconnector->dm_dp_aux.aux);
2348
+ }
10022349 } else {
10032350 aconnector->edid =
1004
- (struct edid *) sink->dc_edid.raw_edid;
2351
+ (struct edid *)sink->dc_edid.raw_edid;
10052352
1006
-
1007
- drm_connector_update_edid_property(connector,
1008
- aconnector->edid);
2353
+ if (aconnector->dc_link->aux_mode)
2354
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2355
+ aconnector->edid);
10092356 }
1010
- amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
10112357
2358
+ drm_connector_update_edid_property(connector, aconnector->edid);
2359
+ amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2360
+ update_connector_ext_caps(aconnector);
10122361 } else {
1013
- amdgpu_dm_remove_sink_from_freesync_module(connector);
2362
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2363
+ amdgpu_dm_update_freesync_caps(connector, NULL);
10142364 drm_connector_update_edid_property(connector, NULL);
10152365 aconnector->num_modes = 0;
2366
+ dc_sink_release(aconnector->dc_sink);
10162367 aconnector->dc_sink = NULL;
10172368 aconnector->edid = NULL;
2369
+#ifdef CONFIG_DRM_AMD_DC_HDCP
2370
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2371
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2372
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2373
+#endif
10182374 }
10192375
10202376 mutex_unlock(&dev->mode_config.mutex);
2377
+
2378
+ update_subconnector_property(aconnector);
2379
+
2380
+ if (sink)
2381
+ dc_sink_release(sink);
10212382 }
10222383
10232384 static void handle_hpd_irq(void *param)
....@@ -1026,12 +2387,20 @@
10262387 struct drm_connector *connector = &aconnector->base;
10272388 struct drm_device *dev = connector->dev;
10282389 enum dc_connection_type new_connection_type = dc_connection_none;
2390
+#ifdef CONFIG_DRM_AMD_DC_HDCP
2391
+ struct amdgpu_device *adev = drm_to_adev(dev);
2392
+#endif
10292393
1030
- /* In case of failure or MST no need to update connector status or notify the OS
1031
- * since (for MST case) MST does this in it's own context.
2394
+ /*
2395
+ * In case of failure or MST no need to update connector status or notify the OS
2396
+ * since (for MST case) MST does this in its own context.
10322397 */
10332398 mutex_lock(&aconnector->hpd_lock);
10342399
2400
+#ifdef CONFIG_DRM_AMD_DC_HDCP
2401
+ if (adev->dm.hdcp_workqueue)
2402
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2403
+#endif
10352404 if (aconnector->fake_enable)
10362405 aconnector->fake_enable = false;
10372406
....@@ -1050,8 +2419,11 @@
10502419 drm_kms_helper_hotplug_event(dev);
10512420
10522421 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1053
- amdgpu_dm_update_connector_after_detect(aconnector);
2422
+ if (new_connection_type == dc_connection_none &&
2423
+ aconnector->dc_link->type == dc_connection_none)
2424
+ dm_set_dpms_off(aconnector->dc_link);
10542425
2426
+ amdgpu_dm_update_connector_after_detect(aconnector);
10552427
10562428 drm_modeset_lock_all(dev);
10572429 dm_restore_drm_connector_state(dev, connector);
....@@ -1125,7 +2497,7 @@
11252497 break;
11262498 }
11272499
1128
- /* check if there is new irq to be handle */
2500
+ /* check if there is new irq to be handled */
11292501 dret = drm_dp_dpcd_read(
11302502 &aconnector->dm_dp_aux.aux,
11312503 dpcd_addr,
....@@ -1150,15 +2522,27 @@
11502522 struct dc_link *dc_link = aconnector->dc_link;
11512523 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
11522524 enum dc_connection_type new_connection_type = dc_connection_none;
2525
+#ifdef CONFIG_DRM_AMD_DC_HDCP
2526
+ union hpd_irq_data hpd_irq_data;
2527
+ struct amdgpu_device *adev = drm_to_adev(dev);
11532528
1154
- /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2529
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2530
+#endif
2531
+
2532
+ /*
2533
+ * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
11552534 * conflict, after implement i2c helper, this mutex should be
11562535 * retired.
11572536 */
11582537 if (dc_link->type != dc_connection_mst_branch)
11592538 mutex_lock(&aconnector->hpd_lock);
11602539
2540
+
2541
+#ifdef CONFIG_DRM_AMD_DC_HDCP
2542
+ if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2543
+#else
11612544 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2545
+#endif
11622546 !is_mst_root_connector) {
11632547 /* Downstream Port status changed. */
11642548 if (!dc_link_detect_sink(dc_link, &new_connection_type))
....@@ -1193,17 +2577,25 @@
11932577 drm_kms_helper_hotplug_event(dev);
11942578 }
11952579 }
2580
+#ifdef CONFIG_DRM_AMD_DC_HDCP
2581
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2582
+ if (adev->dm.hdcp_workqueue)
2583
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2584
+ }
2585
+#endif
11962586 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
11972587 (dc_link->type == dc_connection_mst_branch))
11982588 dm_handle_hpd_rx_irq(aconnector);
11992589
1200
- if (dc_link->type != dc_connection_mst_branch)
2590
+ if (dc_link->type != dc_connection_mst_branch) {
2591
+ drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
12012592 mutex_unlock(&aconnector->hpd_lock);
2593
+ }
12022594 }
12032595
12042596 static void register_hpd_handlers(struct amdgpu_device *adev)
12052597 {
1206
- struct drm_device *dev = adev->ddev;
2598
+ struct drm_device *dev = adev_to_drm(adev);
12072599 struct drm_connector *connector;
12082600 struct amdgpu_dm_connector *aconnector;
12092601 const struct dc_link *dc_link;
....@@ -1240,26 +2632,22 @@
12402632 }
12412633 }
12422634
2635
+#if defined(CONFIG_DRM_AMD_DC_SI)
12432636 /* Register IRQ sources and initialize IRQ callbacks */
1244
-static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2637
+static int dce60_register_irq_handlers(struct amdgpu_device *adev)
12452638 {
12462639 struct dc *dc = adev->dm.dc;
12472640 struct common_irq_params *c_irq_params;
12482641 struct dc_interrupt_params int_params = {0};
12492642 int r;
12502643 int i;
1251
- unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1252
-
1253
- if (adev->asic_type == CHIP_VEGA10 ||
1254
- adev->asic_type == CHIP_VEGA12 ||
1255
- adev->asic_type == CHIP_VEGA20 ||
1256
- adev->asic_type == CHIP_RAVEN)
1257
- client_id = SOC15_IH_CLIENTID_DCE;
2644
+ unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
12582645
12592646 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
12602647 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
12612648
1262
- /* Actions of amdgpu_irq_add_id():
2649
+ /*
2650
+ * Actions of amdgpu_irq_add_id():
12632651 * 1. Register a set() function with base driver.
12642652 * Base driver will call set() function to enable/disable an
12652653 * interrupt in DC hardware.
....@@ -1270,8 +2658,8 @@
12702658 * for acknowledging and handling. */
12712659
12722660 /* Use VBLANK interrupt */
1273
- for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1274
- r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2661
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
2662
+ r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
12752663 if (r) {
12762664 DRM_ERROR("Failed to add crtc irq id!\n");
12772665 return r;
....@@ -1279,7 +2667,7 @@
12792667
12802668 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
12812669 int_params.irq_source =
1282
- dc_interrupt_to_irq_source(dc, i, 0);
2670
+ dc_interrupt_to_irq_source(dc, i+1 , 0);
12832671
12842672 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
12852673
....@@ -1325,8 +2713,114 @@
13252713
13262714 return 0;
13272715 }
2716
+#endif
13282717
1329
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2718
+/* Register IRQ sources and initialize IRQ callbacks */
2719
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2720
+{
2721
+ struct dc *dc = adev->dm.dc;
2722
+ struct common_irq_params *c_irq_params;
2723
+ struct dc_interrupt_params int_params = {0};
2724
+ int r;
2725
+ int i;
2726
+ unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2727
+
2728
+ if (adev->asic_type >= CHIP_VEGA10)
2729
+ client_id = SOC15_IH_CLIENTID_DCE;
2730
+
2731
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2732
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2733
+
2734
+ /*
2735
+ * Actions of amdgpu_irq_add_id():
2736
+ * 1. Register a set() function with base driver.
2737
+ * Base driver will call set() function to enable/disable an
2738
+ * interrupt in DC hardware.
2739
+ * 2. Register amdgpu_dm_irq_handler().
2740
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2741
+ * coming from DC hardware.
2742
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2743
+ * for acknowledging and handling. */
2744
+
2745
+ /* Use VBLANK interrupt */
2746
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2747
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2748
+ if (r) {
2749
+ DRM_ERROR("Failed to add crtc irq id!\n");
2750
+ return r;
2751
+ }
2752
+
2753
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2754
+ int_params.irq_source =
2755
+ dc_interrupt_to_irq_source(dc, i, 0);
2756
+
2757
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2758
+
2759
+ c_irq_params->adev = adev;
2760
+ c_irq_params->irq_src = int_params.irq_source;
2761
+
2762
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
2763
+ dm_crtc_high_irq, c_irq_params);
2764
+ }
2765
+
2766
+ /* Use VUPDATE interrupt */
2767
+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2768
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2769
+ if (r) {
2770
+ DRM_ERROR("Failed to add vupdate irq id!\n");
2771
+ return r;
2772
+ }
2773
+
2774
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2775
+ int_params.irq_source =
2776
+ dc_interrupt_to_irq_source(dc, i, 0);
2777
+
2778
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2779
+
2780
+ c_irq_params->adev = adev;
2781
+ c_irq_params->irq_src = int_params.irq_source;
2782
+
2783
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
2784
+ dm_vupdate_high_irq, c_irq_params);
2785
+ }
2786
+
2787
+ /* Use GRPH_PFLIP interrupt */
2788
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2789
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2790
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2791
+ if (r) {
2792
+ DRM_ERROR("Failed to add page flip irq id!\n");
2793
+ return r;
2794
+ }
2795
+
2796
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2797
+ int_params.irq_source =
2798
+ dc_interrupt_to_irq_source(dc, i, 0);
2799
+
2800
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2801
+
2802
+ c_irq_params->adev = adev;
2803
+ c_irq_params->irq_src = int_params.irq_source;
2804
+
2805
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
2806
+ dm_pflip_high_irq, c_irq_params);
2807
+
2808
+ }
2809
+
2810
+ /* HPD */
2811
+ r = amdgpu_irq_add_id(adev, client_id,
2812
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2813
+ if (r) {
2814
+ DRM_ERROR("Failed to add hpd irq id!\n");
2815
+ return r;
2816
+ }
2817
+
2818
+ register_hpd_handlers(adev);
2819
+
2820
+ return 0;
2821
+}
2822
+
2823
+#if defined(CONFIG_DRM_AMD_DC_DCN)
13302824 /* Register IRQ sources and initialize IRQ callbacks */
13312825 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
13322826 {
....@@ -1339,7 +2833,8 @@
13392833 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
13402834 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
13412835
1342
- /* Actions of amdgpu_irq_add_id():
2836
+ /*
2837
+ * Actions of amdgpu_irq_add_id():
13432838 * 1. Register a set() function with base driver.
13442839 * Base driver will call set() function to enable/disable an
13452840 * interrupt in DC hardware.
....@@ -1348,7 +2843,7 @@
13482843 * coming from DC hardware.
13492844 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
13502845 * for acknowledging and handling.
1351
- * */
2846
+ */
13522847
13532848 /* Use VSTARTUP interrupt */
13542849 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
....@@ -1370,8 +2865,36 @@
13702865 c_irq_params->adev = adev;
13712866 c_irq_params->irq_src = int_params.irq_source;
13722867
2868
+ amdgpu_dm_irq_register_interrupt(
2869
+ adev, &int_params, dm_crtc_high_irq, c_irq_params);
2870
+ }
2871
+
2872
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2873
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2874
+ * to trigger at end of each vblank, regardless of state of the lock,
2875
+ * matching DCE behaviour.
2876
+ */
2877
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2878
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2879
+ i++) {
2880
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2881
+
2882
+ if (r) {
2883
+ DRM_ERROR("Failed to add vupdate irq id!\n");
2884
+ return r;
2885
+ }
2886
+
2887
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2888
+ int_params.irq_source =
2889
+ dc_interrupt_to_irq_source(dc, i, 0);
2890
+
2891
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2892
+
2893
+ c_irq_params->adev = adev;
2894
+ c_irq_params->irq_src = int_params.irq_source;
2895
+
13732896 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1374
- dm_crtc_high_irq, c_irq_params);
2897
+ dm_vupdate_high_irq, c_irq_params);
13752898 }
13762899
13772900 /* Use GRPH_PFLIP interrupt */
....@@ -1412,54 +2935,277 @@
14122935 }
14132936 #endif
14142937
1415
-static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2938
+/*
2939
+ * Acquires the lock for the atomic state object and returns
2940
+ * the new atomic state.
2941
+ *
2942
+ * This should only be called during atomic check.
2943
+ */
2944
+static int dm_atomic_get_state(struct drm_atomic_state *state,
2945
+ struct dm_atomic_state **dm_state)
14162946 {
1417
- int r;
2947
+ struct drm_device *dev = state->dev;
2948
+ struct amdgpu_device *adev = drm_to_adev(dev);
2949
+ struct amdgpu_display_manager *dm = &adev->dm;
2950
+ struct drm_private_state *priv_state;
14182951
1419
- adev->mode_info.mode_config_initialized = true;
2952
+ if (*dm_state)
2953
+ return 0;
14202954
1421
- adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1422
- adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2955
+ priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2956
+ if (IS_ERR(priv_state))
2957
+ return PTR_ERR(priv_state);
14232958
1424
- adev->ddev->mode_config.max_width = 16384;
1425
- adev->ddev->mode_config.max_height = 16384;
1426
-
1427
- adev->ddev->mode_config.preferred_depth = 24;
1428
- adev->ddev->mode_config.prefer_shadow = 1;
1429
- /* indicate support of immediate flip */
1430
- adev->ddev->mode_config.async_page_flip = true;
1431
-
1432
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1433
-
1434
- r = amdgpu_display_modeset_create_props(adev);
1435
- if (r)
1436
- return r;
2959
+ *dm_state = to_dm_atomic_state(priv_state);
14372960
14382961 return 0;
14392962 }
14402963
2964
+static struct dm_atomic_state *
2965
+dm_atomic_get_new_state(struct drm_atomic_state *state)
2966
+{
2967
+ struct drm_device *dev = state->dev;
2968
+ struct amdgpu_device *adev = drm_to_adev(dev);
2969
+ struct amdgpu_display_manager *dm = &adev->dm;
2970
+ struct drm_private_obj *obj;
2971
+ struct drm_private_state *new_obj_state;
2972
+ int i;
2973
+
2974
+ for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2975
+ if (obj->funcs == dm->atomic_obj.funcs)
2976
+ return to_dm_atomic_state(new_obj_state);
2977
+ }
2978
+
2979
+ return NULL;
2980
+}
2981
+
2982
+static struct drm_private_state *
2983
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
2984
+{
2985
+ struct dm_atomic_state *old_state, *new_state;
2986
+
2987
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2988
+ if (!new_state)
2989
+ return NULL;
2990
+
2991
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2992
+
2993
+ old_state = to_dm_atomic_state(obj->state);
2994
+
2995
+ if (old_state && old_state->context)
2996
+ new_state->context = dc_copy_state(old_state->context);
2997
+
2998
+ if (!new_state->context) {
2999
+ kfree(new_state);
3000
+ return NULL;
3001
+ }
3002
+
3003
+ return &new_state->base;
3004
+}
3005
+
3006
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3007
+ struct drm_private_state *state)
3008
+{
3009
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3010
+
3011
+ if (dm_state && dm_state->context)
3012
+ dc_release_state(dm_state->context);
3013
+
3014
+ kfree(dm_state);
3015
+}
3016
+
3017
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
3018
+ .atomic_duplicate_state = dm_atomic_duplicate_state,
3019
+ .atomic_destroy_state = dm_atomic_destroy_state,
3020
+};
3021
+
3022
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3023
+{
3024
+ struct dm_atomic_state *state;
3025
+ int r;
3026
+
3027
+ adev->mode_info.mode_config_initialized = true;
3028
+
3029
+ adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3030
+ adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3031
+
3032
+ adev_to_drm(adev)->mode_config.max_width = 16384;
3033
+ adev_to_drm(adev)->mode_config.max_height = 16384;
3034
+
3035
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
3036
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3037
+ /* indicates support for immediate flip */
3038
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
3039
+
3040
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3041
+
3042
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
3043
+ if (!state)
3044
+ return -ENOMEM;
3045
+
3046
+ state->context = dc_create_state(adev->dm.dc);
3047
+ if (!state->context) {
3048
+ kfree(state);
3049
+ return -ENOMEM;
3050
+ }
3051
+
3052
+ dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3053
+
3054
+ drm_atomic_private_obj_init(adev_to_drm(adev),
3055
+ &adev->dm.atomic_obj,
3056
+ &state->base,
3057
+ &dm_atomic_state_funcs);
3058
+
3059
+ r = amdgpu_display_modeset_create_props(adev);
3060
+ if (r) {
3061
+ dc_release_state(state->context);
3062
+ kfree(state);
3063
+ return r;
3064
+ }
3065
+
3066
+ r = amdgpu_dm_audio_init(adev);
3067
+ if (r) {
3068
+ dc_release_state(state->context);
3069
+ kfree(state);
3070
+ return r;
3071
+ }
3072
+
3073
+ return 0;
3074
+}
3075
+
3076
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3077
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3078
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3079
+
14413080 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
14423081 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3082
+
3083
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3084
+{
3085
+#if defined(CONFIG_ACPI)
3086
+ struct amdgpu_dm_backlight_caps caps;
3087
+
3088
+ memset(&caps, 0, sizeof(caps));
3089
+
3090
+ if (dm->backlight_caps.caps_valid)
3091
+ return;
3092
+
3093
+ amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3094
+ if (caps.caps_valid) {
3095
+ dm->backlight_caps.caps_valid = true;
3096
+ if (caps.aux_support)
3097
+ return;
3098
+ dm->backlight_caps.min_input_signal = caps.min_input_signal;
3099
+ dm->backlight_caps.max_input_signal = caps.max_input_signal;
3100
+ } else {
3101
+ dm->backlight_caps.min_input_signal =
3102
+ AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3103
+ dm->backlight_caps.max_input_signal =
3104
+ AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3105
+ }
3106
+#else
3107
+ if (dm->backlight_caps.aux_support)
3108
+ return;
3109
+
3110
+ dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3111
+ dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3112
+#endif
3113
+}
3114
+
3115
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3116
+ unsigned *min, unsigned *max)
3117
+{
3118
+ if (!caps)
3119
+ return 0;
3120
+
3121
+ if (caps->aux_support) {
3122
+ // Firmware limits are in nits, DC API wants millinits.
3123
+ *max = 1000 * caps->aux_max_input_signal;
3124
+ *min = 1000 * caps->aux_min_input_signal;
3125
+ } else {
3126
+ // Firmware limits are 8-bit, PWM control is 16-bit.
3127
+ *max = 0x101 * caps->max_input_signal;
3128
+ *min = 0x101 * caps->min_input_signal;
3129
+ }
3130
+ return 1;
3131
+}
3132
+
3133
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3134
+ uint32_t brightness)
3135
+{
3136
+ unsigned min, max;
3137
+
3138
+ if (!get_brightness_range(caps, &min, &max))
3139
+ return brightness;
3140
+
3141
+ // Rescale 0..255 to min..max
3142
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3143
+ AMDGPU_MAX_BL_LEVEL);
3144
+}
3145
+
3146
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3147
+ uint32_t brightness)
3148
+{
3149
+ unsigned min, max;
3150
+
3151
+ if (!get_brightness_range(caps, &min, &max))
3152
+ return brightness;
3153
+
3154
+ if (brightness < min)
3155
+ return 0;
3156
+ // Rescale min..max to 0..255
3157
+ return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3158
+ max - min);
3159
+}
14433160
14443161 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
14453162 {
14463163 struct amdgpu_display_manager *dm = bl_get_data(bd);
3164
+ struct amdgpu_dm_backlight_caps caps;
3165
+ struct dc_link *link = NULL;
3166
+ u32 brightness;
3167
+ bool rc;
14473168
1448
- if (dc_link_set_backlight_level(dm->backlight_link,
1449
- bd->props.brightness, 0, 0))
1450
- return 0;
3169
+ amdgpu_dm_update_backlight_caps(dm);
3170
+ caps = dm->backlight_caps;
3171
+
3172
+ link = (struct dc_link *)dm->backlight_link;
3173
+
3174
+ brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3175
+ // Change brightness based on AUX property
3176
+ if (caps.aux_support)
3177
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
3178
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
14513179 else
1452
- return 1;
3180
+ rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3181
+
3182
+ return rc ? 0 : 1;
14533183 }
14543184
14553185 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
14563186 {
14573187 struct amdgpu_display_manager *dm = bl_get_data(bd);
1458
- int ret = dc_link_get_backlight_level(dm->backlight_link);
3188
+ struct amdgpu_dm_backlight_caps caps;
14593189
1460
- if (ret == DC_ERROR_UNEXPECTED)
1461
- return bd->props.brightness;
1462
- return ret;
3190
+ amdgpu_dm_update_backlight_caps(dm);
3191
+ caps = dm->backlight_caps;
3192
+
3193
+ if (caps.aux_support) {
3194
+ struct dc_link *link = (struct dc_link *)dm->backlight_link;
3195
+ u32 avg, peak;
3196
+ bool rc;
3197
+
3198
+ rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3199
+ if (!rc)
3200
+ return bd->props.brightness;
3201
+ return convert_brightness_to_user(&caps, avg);
3202
+ } else {
3203
+ int ret = dc_link_get_backlight_level(dm->backlight_link);
3204
+
3205
+ if (ret == DC_ERROR_UNEXPECTED)
3206
+ return bd->props.brightness;
3207
+ return convert_brightness_to_user(&caps, ret);
3208
+ }
14633209 }
14643210
14653211 static const struct backlight_ops amdgpu_dm_backlight_ops = {
....@@ -1474,18 +3220,20 @@
14743220 char bl_name[16];
14753221 struct backlight_properties props = { 0 };
14763222
3223
+ amdgpu_dm_update_backlight_caps(dm);
3224
+
14773225 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
14783226 props.brightness = AMDGPU_MAX_BL_LEVEL;
14793227 props.type = BACKLIGHT_RAW;
14803228
14813229 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1482
- dm->adev->ddev->primary->index);
3230
+ adev_to_drm(dm->adev)->primary->index);
14833231
14843232 dm->backlight_dev = backlight_device_register(bl_name,
1485
- dm->adev->ddev->dev,
1486
- dm,
1487
- &amdgpu_dm_backlight_ops,
1488
- &props);
3233
+ adev_to_drm(dm->adev)->dev,
3234
+ dm,
3235
+ &amdgpu_dm_backlight_ops,
3236
+ &props);
14893237
14903238 if (IS_ERR(dm->backlight_dev))
14913239 DRM_ERROR("DM: Backlight registration failed!\n");
....@@ -1496,38 +3244,41 @@
14963244 #endif
14973245
14983246 static int initialize_plane(struct amdgpu_display_manager *dm,
1499
- struct amdgpu_mode_info *mode_info,
1500
- int plane_id)
3247
+ struct amdgpu_mode_info *mode_info, int plane_id,
3248
+ enum drm_plane_type plane_type,
3249
+ const struct dc_plane_cap *plane_cap)
15013250 {
1502
- struct amdgpu_plane *plane;
3251
+ struct drm_plane *plane;
15033252 unsigned long possible_crtcs;
15043253 int ret = 0;
15053254
1506
- plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1507
- mode_info->planes[plane_id] = plane;
1508
-
3255
+ plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
15093256 if (!plane) {
15103257 DRM_ERROR("KMS: Failed to allocate plane\n");
15113258 return -ENOMEM;
15123259 }
1513
- plane->base.type = mode_info->plane_type[plane_id];
3260
+ plane->type = plane_type;
15143261
15153262 /*
1516
- * HACK: IGT tests expect that each plane can only have one
1517
- * one possible CRTC. For now, set one CRTC for each
1518
- * plane that is not an underlay, but still allow multiple
1519
- * CRTCs for underlay planes.
3263
+ * HACK: IGT tests expect that the primary plane for a CRTC
3264
+ * can only have one possible CRTC. Only expose support for
3265
+ * any CRTC if they're not going to be used as a primary plane
3266
+ * for a CRTC - like overlay or underlay planes.
15203267 */
15213268 possible_crtcs = 1 << plane_id;
15223269 if (plane_id >= dm->dc->caps.max_streams)
15233270 possible_crtcs = 0xff;
15243271
1525
- ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
3272
+ ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
15263273
15273274 if (ret) {
15283275 DRM_ERROR("KMS: Failed to initialize plane\n");
3276
+ kfree(plane);
15293277 return ret;
15303278 }
3279
+
3280
+ if (mode_info)
3281
+ mode_info->planes[plane_id] = plane;
15313282
15323283 return ret;
15333284 }
....@@ -1541,7 +3292,8 @@
15413292
15423293 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
15433294 link->type != dc_connection_none) {
1544
- /* Event if registration failed, we should continue with
3295
+ /*
3296
+ * Event if registration failed, we should continue with
15453297 * DM initialization because not having a backlight control
15463298 * is better then a black screen.
15473299 */
....@@ -1554,7 +3306,8 @@
15543306 }
15553307
15563308
1557
-/* In this architecture, the association
3309
+/*
3310
+ * In this architecture, the association
15583311 * connector -> encoder -> crtc
15593312 * id not really requried. The crtc and connector will hold the
15603313 * display_index as an abstraction to use with DAL component
....@@ -1569,42 +3322,74 @@
15693322 struct amdgpu_encoder *aencoder = NULL;
15703323 struct amdgpu_mode_info *mode_info = &adev->mode_info;
15713324 uint32_t link_cnt;
1572
- int32_t total_overlay_planes, total_primary_planes;
3325
+ int32_t primary_planes;
15733326 enum dc_connection_type new_connection_type = dc_connection_none;
3327
+ const struct dc_plane_cap *plane;
3328
+
3329
+ dm->display_indexes_num = dm->dc->caps.max_streams;
3330
+ /* Update the actual used number of crtc */
3331
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
15743332
15753333 link_cnt = dm->dc->caps.max_links;
15763334 if (amdgpu_dm_mode_config_init(dm->adev)) {
15773335 DRM_ERROR("DM: Failed to initialize mode config\n");
1578
- return -1;
3336
+ return -EINVAL;
15793337 }
15803338
1581
- /* Identify the number of planes to be initialized */
1582
- total_overlay_planes = dm->dc->caps.max_slave_planes;
1583
- total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
3339
+ /* There is one primary plane per CRTC */
3340
+ primary_planes = dm->dc->caps.max_streams;
3341
+ ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
15843342
1585
- /* First initialize overlay planes, index starting after primary planes */
1586
- for (i = (total_overlay_planes - 1); i >= 0; i--) {
1587
- if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1588
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1589
- goto fail;
1590
- }
1591
- }
3343
+ /*
3344
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
3345
+ * Order is reversed to match iteration order in atomic check.
3346
+ */
3347
+ for (i = (primary_planes - 1); i >= 0; i--) {
3348
+ plane = &dm->dc->caps.planes[i];
15923349
1593
- /* Initialize primary planes */
1594
- for (i = (total_primary_planes - 1); i >= 0; i--) {
1595
- if (initialize_plane(dm, mode_info, i)) {
3350
+ if (initialize_plane(dm, mode_info, i,
3351
+ DRM_PLANE_TYPE_PRIMARY, plane)) {
15963352 DRM_ERROR("KMS: Failed to initialize primary plane\n");
15973353 goto fail;
15983354 }
15993355 }
16003356
1601
- for (i = 0; i < dm->dc->caps.max_streams; i++)
1602
- if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1603
- DRM_ERROR("KMS: Failed to initialize crtc\n");
3357
+ /*
3358
+ * Initialize overlay planes, index starting after primary planes.
3359
+ * These planes have a higher DRM index than the primary planes since
3360
+ * they should be considered as having a higher z-order.
3361
+ * Order is reversed to match iteration order in atomic check.
3362
+ *
3363
+ * Only support DCN for now, and only expose one so we don't encourage
3364
+ * userspace to use up all the pipes.
3365
+ */
3366
+ for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3367
+ struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3368
+
3369
+ if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3370
+ continue;
3371
+
3372
+ if (!plane->blends_with_above || !plane->blends_with_below)
3373
+ continue;
3374
+
3375
+ if (!plane->pixel_format_support.argb8888)
3376
+ continue;
3377
+
3378
+ if (initialize_plane(dm, NULL, primary_planes + i,
3379
+ DRM_PLANE_TYPE_OVERLAY, plane)) {
3380
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
16043381 goto fail;
16053382 }
16063383
1607
- dm->display_indexes_num = dm->dc->caps.max_streams;
3384
+ /* Only create one overlay plane. */
3385
+ break;
3386
+ }
3387
+
3388
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
3389
+ if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3390
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
3391
+ goto fail;
3392
+ }
16083393
16093394 /* loops over all connectors on the board */
16103395 for (i = 0; i < link_cnt; i++) {
....@@ -1647,6 +3432,8 @@
16473432 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
16483433 amdgpu_dm_update_connector_after_detect(aconnector);
16493434 register_backlight_device(dm, link);
3435
+ if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3436
+ amdgpu_dm_set_psr_caps(link);
16503437 }
16513438
16523439
....@@ -1654,6 +3441,17 @@
16543441
16553442 /* Software is initialized. Now we can register interrupt handlers. */
16563443 switch (adev->asic_type) {
3444
+#if defined(CONFIG_DRM_AMD_DC_SI)
3445
+ case CHIP_TAHITI:
3446
+ case CHIP_PITCAIRN:
3447
+ case CHIP_VERDE:
3448
+ case CHIP_OLAND:
3449
+ if (dce60_register_irq_handlers(dm->adev)) {
3450
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
3451
+ goto fail;
3452
+ }
3453
+ break;
3454
+#endif
16573455 case CHIP_BONAIRE:
16583456 case CHIP_HAWAII:
16593457 case CHIP_KAVERI:
....@@ -1675,8 +3473,16 @@
16753473 goto fail;
16763474 }
16773475 break;
1678
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
3476
+#if defined(CONFIG_DRM_AMD_DC_DCN)
16793477 case CHIP_RAVEN:
3478
+ case CHIP_NAVI12:
3479
+ case CHIP_NAVI10:
3480
+ case CHIP_NAVI14:
3481
+ case CHIP_RENOIR:
3482
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3483
+ case CHIP_SIENNA_CICHLID:
3484
+ case CHIP_NAVY_FLOUNDER:
3485
+#endif
16803486 if (dcn10_register_irq_handlers(dm->adev)) {
16813487 DRM_ERROR("DM: Failed to initialize IRQ\n");
16823488 goto fail;
....@@ -1688,21 +3494,18 @@
16883494 goto fail;
16893495 }
16903496
1691
- if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1692
- dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1693
-
16943497 return 0;
16953498 fail:
16963499 kfree(aencoder);
16973500 kfree(aconnector);
1698
- for (i = 0; i < dm->dc->caps.max_planes; i++)
1699
- kfree(mode_info->planes[i]);
1700
- return -1;
3501
+
3502
+ return -EINVAL;
17013503 }
17023504
17033505 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
17043506 {
17053507 drm_mode_config_cleanup(dm->ddev);
3508
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
17063509 return;
17073510 }
17083511
....@@ -1710,7 +3513,7 @@
17103513 * amdgpu_display_funcs functions
17113514 *****************************************************************************/
17123515
1713
-/**
3516
+/*
17143517 * dm_bandwidth_update - program display watermarks
17153518 *
17163519 * @adev: amdgpu_device pointer
....@@ -1720,31 +3523,6 @@
17203523 static void dm_bandwidth_update(struct amdgpu_device *adev)
17213524 {
17223525 /* TODO: implement later */
1723
-}
1724
-
1725
-static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1726
- struct drm_file *filp)
1727
-{
1728
- struct mod_freesync_params freesync_params;
1729
- uint8_t num_streams;
1730
- uint8_t i;
1731
-
1732
- struct amdgpu_device *adev = dev->dev_private;
1733
- int r = 0;
1734
-
1735
- /* Get freesync enable flag from DRM */
1736
-
1737
- num_streams = dc_get_current_stream_count(adev->dm.dc);
1738
-
1739
- for (i = 0; i < num_streams; i++) {
1740
- struct dc_stream_state *stream;
1741
- stream = dc_get_stream_at_index(adev->dm.dc, i);
1742
-
1743
- mod_freesync_update_state(adev->dm.freesync_module,
1744
- &stream, 1, &freesync_params);
1745
- }
1746
-
1747
- return r;
17483526 }
17493527
17503528 static const struct amdgpu_display_funcs dm_display_funcs = {
....@@ -1759,8 +3537,6 @@
17593537 dm_crtc_get_scanoutpos,/* called unconditionally */
17603538 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
17613539 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1762
- .notify_freesync = amdgpu_notify_freesync,
1763
-
17643540 };
17653541
17663542 #if defined(CONFIG_DEBUG_KERNEL_DC)
....@@ -1772,16 +3548,15 @@
17723548 {
17733549 int ret;
17743550 int s3_state;
1775
- struct pci_dev *pdev = to_pci_dev(device);
1776
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
1777
- struct amdgpu_device *adev = drm_dev->dev_private;
3551
+ struct drm_device *drm_dev = dev_get_drvdata(device);
3552
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
17783553
17793554 ret = kstrtoint(buf, 0, &s3_state);
17803555
17813556 if (ret == 0) {
17823557 if (s3_state) {
17833558 dm_resume(adev);
1784
- drm_kms_helper_hotplug_event(adev->ddev);
3559
+ drm_kms_helper_hotplug_event(adev_to_drm(adev));
17853560 } else
17863561 dm_suspend(adev);
17873562 }
....@@ -1798,58 +3573,64 @@
17983573 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
17993574
18003575 switch (adev->asic_type) {
3576
+#if defined(CONFIG_DRM_AMD_DC_SI)
3577
+ case CHIP_TAHITI:
3578
+ case CHIP_PITCAIRN:
3579
+ case CHIP_VERDE:
3580
+ adev->mode_info.num_crtc = 6;
3581
+ adev->mode_info.num_hpd = 6;
3582
+ adev->mode_info.num_dig = 6;
3583
+ break;
3584
+ case CHIP_OLAND:
3585
+ adev->mode_info.num_crtc = 2;
3586
+ adev->mode_info.num_hpd = 2;
3587
+ adev->mode_info.num_dig = 2;
3588
+ break;
3589
+#endif
18013590 case CHIP_BONAIRE:
18023591 case CHIP_HAWAII:
18033592 adev->mode_info.num_crtc = 6;
18043593 adev->mode_info.num_hpd = 6;
18053594 adev->mode_info.num_dig = 6;
1806
- adev->mode_info.plane_type = dm_plane_type_default;
18073595 break;
18083596 case CHIP_KAVERI:
18093597 adev->mode_info.num_crtc = 4;
18103598 adev->mode_info.num_hpd = 6;
18113599 adev->mode_info.num_dig = 7;
1812
- adev->mode_info.plane_type = dm_plane_type_default;
18133600 break;
18143601 case CHIP_KABINI:
18153602 case CHIP_MULLINS:
18163603 adev->mode_info.num_crtc = 2;
18173604 adev->mode_info.num_hpd = 6;
18183605 adev->mode_info.num_dig = 6;
1819
- adev->mode_info.plane_type = dm_plane_type_default;
18203606 break;
18213607 case CHIP_FIJI:
18223608 case CHIP_TONGA:
18233609 adev->mode_info.num_crtc = 6;
18243610 adev->mode_info.num_hpd = 6;
18253611 adev->mode_info.num_dig = 7;
1826
- adev->mode_info.plane_type = dm_plane_type_default;
18273612 break;
18283613 case CHIP_CARRIZO:
18293614 adev->mode_info.num_crtc = 3;
18303615 adev->mode_info.num_hpd = 6;
18313616 adev->mode_info.num_dig = 9;
1832
- adev->mode_info.plane_type = dm_plane_type_carizzo;
18333617 break;
18343618 case CHIP_STONEY:
18353619 adev->mode_info.num_crtc = 2;
18363620 adev->mode_info.num_hpd = 6;
18373621 adev->mode_info.num_dig = 9;
1838
- adev->mode_info.plane_type = dm_plane_type_stoney;
18393622 break;
18403623 case CHIP_POLARIS11:
18413624 case CHIP_POLARIS12:
18423625 adev->mode_info.num_crtc = 5;
18433626 adev->mode_info.num_hpd = 5;
18443627 adev->mode_info.num_dig = 5;
1845
- adev->mode_info.plane_type = dm_plane_type_default;
18463628 break;
18473629 case CHIP_POLARIS10:
18483630 case CHIP_VEGAM:
18493631 adev->mode_info.num_crtc = 6;
18503632 adev->mode_info.num_hpd = 6;
18513633 adev->mode_info.num_dig = 6;
1852
- adev->mode_info.plane_type = dm_plane_type_default;
18533634 break;
18543635 case CHIP_VEGA10:
18553636 case CHIP_VEGA12:
....@@ -1857,16 +3638,34 @@
18573638 adev->mode_info.num_crtc = 6;
18583639 adev->mode_info.num_hpd = 6;
18593640 adev->mode_info.num_dig = 6;
1860
- adev->mode_info.plane_type = dm_plane_type_default;
18613641 break;
1862
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
3642
+#if defined(CONFIG_DRM_AMD_DC_DCN)
18633643 case CHIP_RAVEN:
18643644 adev->mode_info.num_crtc = 4;
18653645 adev->mode_info.num_hpd = 4;
18663646 adev->mode_info.num_dig = 4;
1867
- adev->mode_info.plane_type = dm_plane_type_default;
18683647 break;
18693648 #endif
3649
+ case CHIP_NAVI10:
3650
+ case CHIP_NAVI12:
3651
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3652
+ case CHIP_SIENNA_CICHLID:
3653
+ case CHIP_NAVY_FLOUNDER:
3654
+#endif
3655
+ adev->mode_info.num_crtc = 6;
3656
+ adev->mode_info.num_hpd = 6;
3657
+ adev->mode_info.num_dig = 6;
3658
+ break;
3659
+ case CHIP_NAVI14:
3660
+ adev->mode_info.num_crtc = 5;
3661
+ adev->mode_info.num_hpd = 5;
3662
+ adev->mode_info.num_dig = 5;
3663
+ break;
3664
+ case CHIP_RENOIR:
3665
+ adev->mode_info.num_crtc = 4;
3666
+ adev->mode_info.num_hpd = 4;
3667
+ adev->mode_info.num_dig = 4;
3668
+ break;
18703669 default:
18713670 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
18723671 return -EINVAL;
....@@ -1877,12 +3676,14 @@
18773676 if (adev->mode_info.funcs == NULL)
18783677 adev->mode_info.funcs = &dm_display_funcs;
18793678
1880
- /* Note: Do NOT change adev->audio_endpt_rreg and
3679
+ /*
3680
+ * Note: Do NOT change adev->audio_endpt_rreg and
18813681 * adev->audio_endpt_wreg because they are initialised in
1882
- * amdgpu_device_init() */
3682
+ * amdgpu_device_init()
3683
+ */
18833684 #if defined(CONFIG_DEBUG_KERNEL_DC)
18843685 device_create_file(
1885
- adev->ddev->dev,
3686
+ adev_to_drm(adev)->dev,
18863687 &dev_attr_s3_debug);
18873688 #endif
18883689
....@@ -1893,21 +3694,12 @@
18933694 struct dc_stream_state *new_stream,
18943695 struct dc_stream_state *old_stream)
18953696 {
1896
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
1897
- return false;
1898
-
1899
- if (!crtc_state->enable)
1900
- return false;
1901
-
1902
- return crtc_state->active;
3697
+ return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
19033698 }
19043699
19053700 static bool modereset_required(struct drm_crtc_state *crtc_state)
19063701 {
1907
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
1908
- return false;
1909
-
1910
- return !crtc_state->enable || !crtc_state->active;
3702
+ return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
19113703 }
19123704
19133705 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
....@@ -1920,64 +3712,114 @@
19203712 .destroy = amdgpu_dm_encoder_destroy,
19213713 };
19223714
1923
-static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1924
- struct dc_plane_state *plane_state)
3715
+
3716
+static int fill_dc_scaling_info(const struct drm_plane_state *state,
3717
+ struct dc_scaling_info *scaling_info)
19253718 {
1926
- plane_state->src_rect.x = state->src_x >> 16;
1927
- plane_state->src_rect.y = state->src_y >> 16;
1928
- /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1929
- plane_state->src_rect.width = state->src_w >> 16;
3719
+ int scale_w, scale_h;
19303720
1931
- if (plane_state->src_rect.width == 0)
1932
- return false;
3721
+ memset(scaling_info, 0, sizeof(*scaling_info));
19333722
1934
- plane_state->src_rect.height = state->src_h >> 16;
1935
- if (plane_state->src_rect.height == 0)
1936
- return false;
3723
+ /* Source is fixed 16.16 but we ignore mantissa for now... */
3724
+ scaling_info->src_rect.x = state->src_x >> 16;
3725
+ scaling_info->src_rect.y = state->src_y >> 16;
19373726
1938
- plane_state->dst_rect.x = state->crtc_x;
1939
- plane_state->dst_rect.y = state->crtc_y;
3727
+ /*
3728
+ * For reasons we don't (yet) fully understand a non-zero
3729
+ * src_y coordinate into an NV12 buffer can cause a
3730
+ * system hang. To avoid hangs (and maybe be overly cautious)
3731
+ * let's reject both non-zero src_x and src_y.
3732
+ *
3733
+ * We currently know of only one use-case to reproduce a
3734
+ * scenario with non-zero src_x and src_y for NV12, which
3735
+ * is to gesture the YouTube Android app into full screen
3736
+ * on ChromeOS.
3737
+ */
3738
+ if (state->fb &&
3739
+ state->fb->format->format == DRM_FORMAT_NV12 &&
3740
+ (scaling_info->src_rect.x != 0 ||
3741
+ scaling_info->src_rect.y != 0))
3742
+ return -EINVAL;
3743
+
3744
+ /*
3745
+ * For reasons we don't (yet) fully understand a non-zero
3746
+ * src_y coordinate into an NV12 buffer can cause a
3747
+ * system hang. To avoid hangs (and maybe be overly cautious)
3748
+ * let's reject both non-zero src_x and src_y.
3749
+ *
3750
+ * We currently know of only one use-case to reproduce a
3751
+ * scenario with non-zero src_x and src_y for NV12, which
3752
+ * is to gesture the YouTube Android app into full screen
3753
+ * on ChromeOS.
3754
+ */
3755
+ if (state->fb &&
3756
+ state->fb->format->format == DRM_FORMAT_NV12 &&
3757
+ (scaling_info->src_rect.x != 0 ||
3758
+ scaling_info->src_rect.y != 0))
3759
+ return -EINVAL;
3760
+
3761
+ scaling_info->src_rect.width = state->src_w >> 16;
3762
+ if (scaling_info->src_rect.width == 0)
3763
+ return -EINVAL;
3764
+
3765
+ scaling_info->src_rect.height = state->src_h >> 16;
3766
+ if (scaling_info->src_rect.height == 0)
3767
+ return -EINVAL;
3768
+
3769
+ scaling_info->dst_rect.x = state->crtc_x;
3770
+ scaling_info->dst_rect.y = state->crtc_y;
19403771
19413772 if (state->crtc_w == 0)
1942
- return false;
3773
+ return -EINVAL;
19433774
1944
- plane_state->dst_rect.width = state->crtc_w;
3775
+ scaling_info->dst_rect.width = state->crtc_w;
19453776
19463777 if (state->crtc_h == 0)
1947
- return false;
3778
+ return -EINVAL;
19483779
1949
- plane_state->dst_rect.height = state->crtc_h;
3780
+ scaling_info->dst_rect.height = state->crtc_h;
19503781
1951
- plane_state->clip_rect = plane_state->dst_rect;
3782
+ /* DRM doesn't specify clipping on destination output. */
3783
+ scaling_info->clip_rect = scaling_info->dst_rect;
19523784
1953
- switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1954
- case DRM_MODE_ROTATE_0:
1955
- plane_state->rotation = ROTATION_ANGLE_0;
1956
- break;
1957
- case DRM_MODE_ROTATE_90:
1958
- plane_state->rotation = ROTATION_ANGLE_90;
1959
- break;
1960
- case DRM_MODE_ROTATE_180:
1961
- plane_state->rotation = ROTATION_ANGLE_180;
1962
- break;
1963
- case DRM_MODE_ROTATE_270:
1964
- plane_state->rotation = ROTATION_ANGLE_270;
1965
- break;
1966
- default:
1967
- plane_state->rotation = ROTATION_ANGLE_0;
1968
- break;
3785
+ /* TODO: Validate scaling per-format with DC plane caps */
3786
+ scale_w = scaling_info->dst_rect.width * 1000 /
3787
+ scaling_info->src_rect.width;
3788
+
3789
+ if (scale_w < 250 || scale_w > 16000)
3790
+ return -EINVAL;
3791
+
3792
+ scale_h = scaling_info->dst_rect.height * 1000 /
3793
+ scaling_info->src_rect.height;
3794
+
3795
+ if (scale_h < 250 || scale_h > 16000)
3796
+ return -EINVAL;
3797
+
3798
+ /*
3799
+ * The "scaling_quality" can be ignored for now, quality = 0 has DC
3800
+ * assume reasonable defaults based on the format.
3801
+ */
3802
+
3803
+ return 0;
3804
+}
3805
+
3806
+static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3807
+ uint64_t *tiling_flags, bool *tmz_surface)
3808
+{
3809
+ struct amdgpu_bo *rbo;
3810
+ int r;
3811
+
3812
+ if (!amdgpu_fb) {
3813
+ *tiling_flags = 0;
3814
+ *tmz_surface = false;
3815
+ return 0;
19693816 }
19703817
1971
- return true;
1972
-}
1973
-static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1974
- uint64_t *tiling_flags)
1975
-{
1976
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1977
- int r = amdgpu_bo_reserve(rbo, false);
3818
+ rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3819
+ r = amdgpu_bo_reserve(rbo, false);
19783820
19793821 if (unlikely(r)) {
1980
- // Don't show error msg. when return -ERESTARTSYS
3822
+ /* Don't show error message when returning -ERESTARTSYS */
19813823 if (r != -ERESTARTSYS)
19823824 DRM_ERROR("Unable to reserve buffer: %d\n", r);
19833825 return r;
....@@ -1986,91 +3828,149 @@
19863828 if (tiling_flags)
19873829 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
19883830
3831
+ if (tmz_surface)
3832
+ *tmz_surface = amdgpu_bo_encrypted(rbo);
3833
+
19893834 amdgpu_bo_unreserve(rbo);
19903835
19913836 return r;
19923837 }
19933838
1994
-static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1995
- struct dc_plane_state *plane_state,
1996
- const struct amdgpu_framebuffer *amdgpu_fb)
3839
+static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
19973840 {
1998
- uint64_t tiling_flags;
1999
- unsigned int awidth;
2000
- const struct drm_framebuffer *fb = &amdgpu_fb->base;
2001
- int ret = 0;
2002
- struct drm_format_name_buf format_name;
3841
+ uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
20033842
2004
- ret = get_fb_info(
2005
- amdgpu_fb,
2006
- &tiling_flags);
3843
+ return offset ? (address + offset * 256) : 0;
3844
+}
20073845
2008
- if (ret)
2009
- return ret;
3846
+static int
3847
+fill_plane_dcc_attributes(struct amdgpu_device *adev,
3848
+ const struct amdgpu_framebuffer *afb,
3849
+ const enum surface_pixel_format format,
3850
+ const enum dc_rotation_angle rotation,
3851
+ const struct plane_size *plane_size,
3852
+ const union dc_tiling_info *tiling_info,
3853
+ const uint64_t info,
3854
+ struct dc_plane_dcc_param *dcc,
3855
+ struct dc_plane_address *address,
3856
+ bool force_disable_dcc)
3857
+{
3858
+ struct dc *dc = adev->dm.dc;
3859
+ struct dc_dcc_surface_param input;
3860
+ struct dc_surface_dcc_cap output;
3861
+ uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3862
+ uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3863
+ uint64_t dcc_address;
20103864
2011
- switch (fb->format->format) {
2012
- case DRM_FORMAT_C8:
2013
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2014
- break;
2015
- case DRM_FORMAT_RGB565:
2016
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2017
- break;
2018
- case DRM_FORMAT_XRGB8888:
2019
- case DRM_FORMAT_ARGB8888:
2020
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2021
- break;
2022
- case DRM_FORMAT_XRGB2101010:
2023
- case DRM_FORMAT_ARGB2101010:
2024
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2025
- break;
2026
- case DRM_FORMAT_XBGR2101010:
2027
- case DRM_FORMAT_ABGR2101010:
2028
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2029
- break;
2030
- case DRM_FORMAT_NV21:
2031
- plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2032
- break;
2033
- case DRM_FORMAT_NV12:
2034
- plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2035
- break;
2036
- default:
2037
- DRM_ERROR("Unsupported screen format %s\n",
2038
- drm_get_format_name(fb->format->format, &format_name));
3865
+ memset(&input, 0, sizeof(input));
3866
+ memset(&output, 0, sizeof(output));
3867
+
3868
+ if (force_disable_dcc)
3869
+ return 0;
3870
+
3871
+ if (!offset)
3872
+ return 0;
3873
+
3874
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3875
+ return 0;
3876
+
3877
+ if (!dc->cap_funcs.get_dcc_compression_cap)
20393878 return -EINVAL;
3879
+
3880
+ input.format = format;
3881
+ input.surface_size.width = plane_size->surface_size.width;
3882
+ input.surface_size.height = plane_size->surface_size.height;
3883
+ input.swizzle_mode = tiling_info->gfx9.swizzle;
3884
+
3885
+ if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3886
+ input.scan = SCAN_DIRECTION_HORIZONTAL;
3887
+ else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3888
+ input.scan = SCAN_DIRECTION_VERTICAL;
3889
+
3890
+ if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3891
+ return -EINVAL;
3892
+
3893
+ if (!output.capable)
3894
+ return -EINVAL;
3895
+
3896
+ if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3897
+ return -EINVAL;
3898
+
3899
+ dcc->enable = 1;
3900
+ dcc->meta_pitch =
3901
+ AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3902
+ dcc->independent_64b_blks = i64b;
3903
+
3904
+ dcc_address = get_dcc_address(afb->address, info);
3905
+ address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3906
+ address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3907
+
3908
+ return 0;
3909
+}
3910
+
3911
+static int
3912
+fill_plane_buffer_attributes(struct amdgpu_device *adev,
3913
+ const struct amdgpu_framebuffer *afb,
3914
+ const enum surface_pixel_format format,
3915
+ const enum dc_rotation_angle rotation,
3916
+ const uint64_t tiling_flags,
3917
+ union dc_tiling_info *tiling_info,
3918
+ struct plane_size *plane_size,
3919
+ struct dc_plane_dcc_param *dcc,
3920
+ struct dc_plane_address *address,
3921
+ bool tmz_surface,
3922
+ bool force_disable_dcc)
3923
+{
3924
+ const struct drm_framebuffer *fb = &afb->base;
3925
+ int ret;
3926
+
3927
+ memset(tiling_info, 0, sizeof(*tiling_info));
3928
+ memset(plane_size, 0, sizeof(*plane_size));
3929
+ memset(dcc, 0, sizeof(*dcc));
3930
+ memset(address, 0, sizeof(*address));
3931
+
3932
+ address->tmz_surface = tmz_surface;
3933
+
3934
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3935
+ plane_size->surface_size.x = 0;
3936
+ plane_size->surface_size.y = 0;
3937
+ plane_size->surface_size.width = fb->width;
3938
+ plane_size->surface_size.height = fb->height;
3939
+ plane_size->surface_pitch =
3940
+ fb->pitches[0] / fb->format->cpp[0];
3941
+
3942
+ address->type = PLN_ADDR_TYPE_GRAPHICS;
3943
+ address->grph.addr.low_part = lower_32_bits(afb->address);
3944
+ address->grph.addr.high_part = upper_32_bits(afb->address);
3945
+ } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3946
+ uint64_t chroma_addr = afb->address + fb->offsets[1];
3947
+
3948
+ plane_size->surface_size.x = 0;
3949
+ plane_size->surface_size.y = 0;
3950
+ plane_size->surface_size.width = fb->width;
3951
+ plane_size->surface_size.height = fb->height;
3952
+ plane_size->surface_pitch =
3953
+ fb->pitches[0] / fb->format->cpp[0];
3954
+
3955
+ plane_size->chroma_size.x = 0;
3956
+ plane_size->chroma_size.y = 0;
3957
+ /* TODO: set these based on surface format */
3958
+ plane_size->chroma_size.width = fb->width / 2;
3959
+ plane_size->chroma_size.height = fb->height / 2;
3960
+
3961
+ plane_size->chroma_pitch =
3962
+ fb->pitches[1] / fb->format->cpp[1];
3963
+
3964
+ address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3965
+ address->video_progressive.luma_addr.low_part =
3966
+ lower_32_bits(afb->address);
3967
+ address->video_progressive.luma_addr.high_part =
3968
+ upper_32_bits(afb->address);
3969
+ address->video_progressive.chroma_addr.low_part =
3970
+ lower_32_bits(chroma_addr);
3971
+ address->video_progressive.chroma_addr.high_part =
3972
+ upper_32_bits(chroma_addr);
20403973 }
2041
-
2042
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2043
- plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
2044
- plane_state->plane_size.grph.surface_size.x = 0;
2045
- plane_state->plane_size.grph.surface_size.y = 0;
2046
- plane_state->plane_size.grph.surface_size.width = fb->width;
2047
- plane_state->plane_size.grph.surface_size.height = fb->height;
2048
- plane_state->plane_size.grph.surface_pitch =
2049
- fb->pitches[0] / fb->format->cpp[0];
2050
- /* TODO: unhardcode */
2051
- plane_state->color_space = COLOR_SPACE_SRGB;
2052
-
2053
- } else {
2054
- awidth = ALIGN(fb->width, 64);
2055
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2056
- plane_state->plane_size.video.luma_size.x = 0;
2057
- plane_state->plane_size.video.luma_size.y = 0;
2058
- plane_state->plane_size.video.luma_size.width = awidth;
2059
- plane_state->plane_size.video.luma_size.height = fb->height;
2060
- /* TODO: unhardcode */
2061
- plane_state->plane_size.video.luma_pitch = awidth;
2062
-
2063
- plane_state->plane_size.video.chroma_size.x = 0;
2064
- plane_state->plane_size.video.chroma_size.y = 0;
2065
- plane_state->plane_size.video.chroma_size.width = awidth;
2066
- plane_state->plane_size.video.chroma_size.height = fb->height;
2067
- plane_state->plane_size.video.chroma_pitch = awidth / 2;
2068
-
2069
- /* TODO: unhardcode */
2070
- plane_state->color_space = COLOR_SPACE_YCBCR709;
2071
- }
2072
-
2073
- memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
20743974
20753975 /* Fill GFX8 params */
20763976 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
....@@ -2083,93 +3983,312 @@
20833983 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
20843984
20853985 /* XXX fix me for VI */
2086
- plane_state->tiling_info.gfx8.num_banks = num_banks;
2087
- plane_state->tiling_info.gfx8.array_mode =
3986
+ tiling_info->gfx8.num_banks = num_banks;
3987
+ tiling_info->gfx8.array_mode =
20883988 DC_ARRAY_2D_TILED_THIN1;
2089
- plane_state->tiling_info.gfx8.tile_split = tile_split;
2090
- plane_state->tiling_info.gfx8.bank_width = bankw;
2091
- plane_state->tiling_info.gfx8.bank_height = bankh;
2092
- plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
2093
- plane_state->tiling_info.gfx8.tile_mode =
3989
+ tiling_info->gfx8.tile_split = tile_split;
3990
+ tiling_info->gfx8.bank_width = bankw;
3991
+ tiling_info->gfx8.bank_height = bankh;
3992
+ tiling_info->gfx8.tile_aspect = mtaspect;
3993
+ tiling_info->gfx8.tile_mode =
20943994 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
20953995 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
20963996 == DC_ARRAY_1D_TILED_THIN1) {
2097
- plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3997
+ tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
20983998 }
20993999
2100
- plane_state->tiling_info.gfx8.pipe_config =
4000
+ tiling_info->gfx8.pipe_config =
21014001 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
21024002
21034003 if (adev->asic_type == CHIP_VEGA10 ||
21044004 adev->asic_type == CHIP_VEGA12 ||
21054005 adev->asic_type == CHIP_VEGA20 ||
4006
+ adev->asic_type == CHIP_NAVI10 ||
4007
+ adev->asic_type == CHIP_NAVI14 ||
4008
+ adev->asic_type == CHIP_NAVI12 ||
4009
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
4010
+ adev->asic_type == CHIP_SIENNA_CICHLID ||
4011
+ adev->asic_type == CHIP_NAVY_FLOUNDER ||
4012
+#endif
4013
+ adev->asic_type == CHIP_RENOIR ||
21064014 adev->asic_type == CHIP_RAVEN) {
21074015 /* Fill GFX9 params */
2108
- plane_state->tiling_info.gfx9.num_pipes =
4016
+ tiling_info->gfx9.num_pipes =
21094017 adev->gfx.config.gb_addr_config_fields.num_pipes;
2110
- plane_state->tiling_info.gfx9.num_banks =
4018
+ tiling_info->gfx9.num_banks =
21114019 adev->gfx.config.gb_addr_config_fields.num_banks;
2112
- plane_state->tiling_info.gfx9.pipe_interleave =
4020
+ tiling_info->gfx9.pipe_interleave =
21134021 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2114
- plane_state->tiling_info.gfx9.num_shader_engines =
4022
+ tiling_info->gfx9.num_shader_engines =
21154023 adev->gfx.config.gb_addr_config_fields.num_se;
2116
- plane_state->tiling_info.gfx9.max_compressed_frags =
4024
+ tiling_info->gfx9.max_compressed_frags =
21174025 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2118
- plane_state->tiling_info.gfx9.num_rb_per_se =
4026
+ tiling_info->gfx9.num_rb_per_se =
21194027 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2120
- plane_state->tiling_info.gfx9.swizzle =
4028
+ tiling_info->gfx9.swizzle =
21214029 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2122
- plane_state->tiling_info.gfx9.shaderEnable = 1;
4030
+ tiling_info->gfx9.shaderEnable = 1;
4031
+
4032
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
4033
+ if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4034
+ adev->asic_type == CHIP_NAVY_FLOUNDER)
4035
+ tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4036
+#endif
4037
+ ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4038
+ plane_size, tiling_info,
4039
+ tiling_flags, dcc, address,
4040
+ force_disable_dcc);
4041
+ if (ret)
4042
+ return ret;
21234043 }
21244044
2125
- plane_state->visible = true;
2126
- plane_state->scaling_quality.h_taps_c = 0;
2127
- plane_state->scaling_quality.v_taps_c = 0;
2128
-
2129
- /* is this needed? is plane_state zeroed at allocation? */
2130
- plane_state->scaling_quality.h_taps = 0;
2131
- plane_state->scaling_quality.v_taps = 0;
2132
- plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
2133
-
2134
- return ret;
2135
-
4045
+ return 0;
21364046 }
21374047
2138
-static int fill_plane_attributes(struct amdgpu_device *adev,
2139
- struct dc_plane_state *dc_plane_state,
2140
- struct drm_plane_state *plane_state,
2141
- struct drm_crtc_state *crtc_state)
4048
+static void
4049
+fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4050
+ bool *per_pixel_alpha, bool *global_alpha,
4051
+ int *global_alpha_value)
21424052 {
2143
- const struct amdgpu_framebuffer *amdgpu_fb =
2144
- to_amdgpu_framebuffer(plane_state->fb);
2145
- const struct drm_crtc *crtc = plane_state->crtc;
2146
- int ret = 0;
4053
+ *per_pixel_alpha = false;
4054
+ *global_alpha = false;
4055
+ *global_alpha_value = 0xff;
21474056
2148
- if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
4057
+ if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4058
+ return;
4059
+
4060
+ if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4061
+ static const uint32_t alpha_formats[] = {
4062
+ DRM_FORMAT_ARGB8888,
4063
+ DRM_FORMAT_RGBA8888,
4064
+ DRM_FORMAT_ABGR8888,
4065
+ };
4066
+ uint32_t format = plane_state->fb->format->format;
4067
+ unsigned int i;
4068
+
4069
+ for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4070
+ if (format == alpha_formats[i]) {
4071
+ *per_pixel_alpha = true;
4072
+ break;
4073
+ }
4074
+ }
4075
+ }
4076
+
4077
+ if (plane_state->alpha < 0xffff) {
4078
+ *global_alpha = true;
4079
+ *global_alpha_value = plane_state->alpha >> 8;
4080
+ }
4081
+}
4082
+
4083
+static int
4084
+fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4085
+ const enum surface_pixel_format format,
4086
+ enum dc_color_space *color_space)
4087
+{
4088
+ bool full_range;
4089
+
4090
+ *color_space = COLOR_SPACE_SRGB;
4091
+
4092
+ /* DRM color properties only affect non-RGB formats. */
4093
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4094
+ return 0;
4095
+
4096
+ full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4097
+
4098
+ switch (plane_state->color_encoding) {
4099
+ case DRM_COLOR_YCBCR_BT601:
4100
+ if (full_range)
4101
+ *color_space = COLOR_SPACE_YCBCR601;
4102
+ else
4103
+ *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4104
+ break;
4105
+
4106
+ case DRM_COLOR_YCBCR_BT709:
4107
+ if (full_range)
4108
+ *color_space = COLOR_SPACE_YCBCR709;
4109
+ else
4110
+ *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4111
+ break;
4112
+
4113
+ case DRM_COLOR_YCBCR_BT2020:
4114
+ if (full_range)
4115
+ *color_space = COLOR_SPACE_2020_YCBCR;
4116
+ else
4117
+ return -EINVAL;
4118
+ break;
4119
+
4120
+ default:
21494121 return -EINVAL;
4122
+ }
21504123
2151
- ret = fill_plane_attributes_from_fb(
2152
- crtc->dev->dev_private,
2153
- dc_plane_state,
2154
- amdgpu_fb);
4124
+ return 0;
4125
+}
21554126
4127
+static int
4128
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4129
+ const struct drm_plane_state *plane_state,
4130
+ const uint64_t tiling_flags,
4131
+ struct dc_plane_info *plane_info,
4132
+ struct dc_plane_address *address,
4133
+ bool tmz_surface,
4134
+ bool force_disable_dcc)
4135
+{
4136
+ const struct drm_framebuffer *fb = plane_state->fb;
4137
+ const struct amdgpu_framebuffer *afb =
4138
+ to_amdgpu_framebuffer(plane_state->fb);
4139
+ struct drm_format_name_buf format_name;
4140
+ int ret;
4141
+
4142
+ memset(plane_info, 0, sizeof(*plane_info));
4143
+
4144
+ switch (fb->format->format) {
4145
+ case DRM_FORMAT_C8:
4146
+ plane_info->format =
4147
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4148
+ break;
4149
+ case DRM_FORMAT_RGB565:
4150
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4151
+ break;
4152
+ case DRM_FORMAT_XRGB8888:
4153
+ case DRM_FORMAT_ARGB8888:
4154
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4155
+ break;
4156
+ case DRM_FORMAT_XRGB2101010:
4157
+ case DRM_FORMAT_ARGB2101010:
4158
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4159
+ break;
4160
+ case DRM_FORMAT_XBGR2101010:
4161
+ case DRM_FORMAT_ABGR2101010:
4162
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4163
+ break;
4164
+ case DRM_FORMAT_XBGR8888:
4165
+ case DRM_FORMAT_ABGR8888:
4166
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4167
+ break;
4168
+ case DRM_FORMAT_NV21:
4169
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4170
+ break;
4171
+ case DRM_FORMAT_NV12:
4172
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4173
+ break;
4174
+ case DRM_FORMAT_P010:
4175
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4176
+ break;
4177
+ case DRM_FORMAT_XRGB16161616F:
4178
+ case DRM_FORMAT_ARGB16161616F:
4179
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4180
+ break;
4181
+ case DRM_FORMAT_XBGR16161616F:
4182
+ case DRM_FORMAT_ABGR16161616F:
4183
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4184
+ break;
4185
+ default:
4186
+ DRM_ERROR(
4187
+ "Unsupported screen format %s\n",
4188
+ drm_get_format_name(fb->format->format, &format_name));
4189
+ return -EINVAL;
4190
+ }
4191
+
4192
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4193
+ case DRM_MODE_ROTATE_0:
4194
+ plane_info->rotation = ROTATION_ANGLE_0;
4195
+ break;
4196
+ case DRM_MODE_ROTATE_90:
4197
+ plane_info->rotation = ROTATION_ANGLE_90;
4198
+ break;
4199
+ case DRM_MODE_ROTATE_180:
4200
+ plane_info->rotation = ROTATION_ANGLE_180;
4201
+ break;
4202
+ case DRM_MODE_ROTATE_270:
4203
+ plane_info->rotation = ROTATION_ANGLE_270;
4204
+ break;
4205
+ default:
4206
+ plane_info->rotation = ROTATION_ANGLE_0;
4207
+ break;
4208
+ }
4209
+
4210
+ plane_info->visible = true;
4211
+ plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4212
+
4213
+ plane_info->layer_index = 0;
4214
+
4215
+ ret = fill_plane_color_attributes(plane_state, plane_info->format,
4216
+ &plane_info->color_space);
21564217 if (ret)
21574218 return ret;
4219
+
4220
+ ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4221
+ plane_info->rotation, tiling_flags,
4222
+ &plane_info->tiling_info,
4223
+ &plane_info->plane_size,
4224
+ &plane_info->dcc, address, tmz_surface,
4225
+ force_disable_dcc);
4226
+ if (ret)
4227
+ return ret;
4228
+
4229
+ fill_blending_from_plane_state(
4230
+ plane_state, &plane_info->per_pixel_alpha,
4231
+ &plane_info->global_alpha, &plane_info->global_alpha_value);
4232
+
4233
+ return 0;
4234
+}
4235
+
4236
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4237
+ struct dc_plane_state *dc_plane_state,
4238
+ struct drm_plane_state *plane_state,
4239
+ struct drm_crtc_state *crtc_state)
4240
+{
4241
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4242
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4243
+ struct dc_scaling_info scaling_info;
4244
+ struct dc_plane_info plane_info;
4245
+ int ret;
4246
+ bool force_disable_dcc = false;
4247
+
4248
+ ret = fill_dc_scaling_info(plane_state, &scaling_info);
4249
+ if (ret)
4250
+ return ret;
4251
+
4252
+ dc_plane_state->src_rect = scaling_info.src_rect;
4253
+ dc_plane_state->dst_rect = scaling_info.dst_rect;
4254
+ dc_plane_state->clip_rect = scaling_info.clip_rect;
4255
+ dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4256
+
4257
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4258
+ ret = fill_dc_plane_info_and_addr(adev, plane_state,
4259
+ dm_plane_state->tiling_flags,
4260
+ &plane_info,
4261
+ &dc_plane_state->address,
4262
+ dm_plane_state->tmz_surface,
4263
+ force_disable_dcc);
4264
+ if (ret)
4265
+ return ret;
4266
+
4267
+ dc_plane_state->format = plane_info.format;
4268
+ dc_plane_state->color_space = plane_info.color_space;
4269
+ dc_plane_state->format = plane_info.format;
4270
+ dc_plane_state->plane_size = plane_info.plane_size;
4271
+ dc_plane_state->rotation = plane_info.rotation;
4272
+ dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4273
+ dc_plane_state->stereo_format = plane_info.stereo_format;
4274
+ dc_plane_state->tiling_info = plane_info.tiling_info;
4275
+ dc_plane_state->visible = plane_info.visible;
4276
+ dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4277
+ dc_plane_state->global_alpha = plane_info.global_alpha;
4278
+ dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4279
+ dc_plane_state->dcc = plane_info.dcc;
4280
+ dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
21584281
21594282 /*
21604283 * Always set input transfer function, since plane state is refreshed
21614284 * every time.
21624285 */
2163
- ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2164
- if (ret) {
2165
- dc_transfer_func_release(dc_plane_state->in_transfer_func);
2166
- dc_plane_state->in_transfer_func = NULL;
2167
- }
4286
+ ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4287
+ if (ret)
4288
+ return ret;
21684289
2169
- return ret;
4290
+ return 0;
21704291 }
2171
-
2172
-/*****************************************************************************/
21734292
21744293 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
21754294 const struct dm_connector_state *dm_state,
....@@ -2227,20 +4346,46 @@
22274346 }
22284347
22294348 static enum dc_color_depth
2230
-convert_color_depth_from_display_info(const struct drm_connector *connector)
4349
+convert_color_depth_from_display_info(const struct drm_connector *connector,
4350
+ bool is_y420, int requested_bpc)
22314351 {
2232
- struct dm_connector_state *dm_conn_state =
2233
- to_dm_connector_state(connector->state);
2234
- uint32_t bpc = connector->display_info.bpc;
4352
+ uint8_t bpc;
22354353
2236
- /* TODO: Remove this when there's support for max_bpc in drm */
2237
- if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2238
- /* Round down to nearest even number. */
2239
- bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
4354
+ if (is_y420) {
4355
+ bpc = 8;
4356
+
4357
+ /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4358
+ if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4359
+ bpc = 16;
4360
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4361
+ bpc = 12;
4362
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4363
+ bpc = 10;
4364
+ } else {
4365
+ bpc = (uint8_t)connector->display_info.bpc;
4366
+ /* Assume 8 bpc by default if no bpc is specified. */
4367
+ bpc = bpc ? bpc : 8;
4368
+ }
4369
+
4370
+ if (requested_bpc > 0) {
4371
+ /*
4372
+ * Cap display bpc based on the user requested value.
4373
+ *
4374
+ * The value for state->max_bpc may not correctly updated
4375
+ * depending on when the connector gets added to the state
4376
+ * or if this was called outside of atomic check, so it
4377
+ * can't be used directly.
4378
+ */
4379
+ bpc = min_t(u8, bpc, requested_bpc);
4380
+
4381
+ /* Round down to the nearest even number. */
4382
+ bpc = bpc - (bpc & 1);
4383
+ }
22404384
22414385 switch (bpc) {
22424386 case 0:
2243
- /* Temporary Work around, DRM don't parse color depth for
4387
+ /*
4388
+ * Temporary Work around, DRM doesn't parse color depth for
22444389 * EDID revision before 1.4
22454390 * TODO: Fix edid parsing
22464391 */
....@@ -2284,7 +4429,7 @@
22844429 * according to HDMI spec, we use YCbCr709 and YCbCr601
22854430 * respectively
22864431 */
2287
- if (dc_crtc_timing->pix_clk_khz > 27030) {
4432
+ if (dc_crtc_timing->pix_clk_100hz > 270300) {
22884433 if (dc_crtc_timing->flags.Y_ONLY)
22894434 color_space =
22904435 COLOR_SPACE_YCBCR709_LIMITED;
....@@ -2312,27 +4457,21 @@
23124457 return color_space;
23134458 }
23144459
2315
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
4460
+static bool adjust_colour_depth_from_display_info(
4461
+ struct dc_crtc_timing *timing_out,
4462
+ const struct drm_display_info *info)
23164463 {
2317
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2318
- return;
2319
-
2320
- timing_out->display_color_depth--;
2321
-}
2322
-
2323
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2324
- const struct drm_display_info *info)
2325
-{
4464
+ enum dc_color_depth depth = timing_out->display_color_depth;
23264465 int normalized_clk;
2327
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2328
- return;
23294466 do {
2330
- normalized_clk = timing_out->pix_clk_khz;
4467
+ normalized_clk = timing_out->pix_clk_100hz / 10;
23314468 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
23324469 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
23334470 normalized_clk /= 2;
23344471 /* Adjusting pix clock following on HDMI spec based on colour depth */
2335
- switch (timing_out->display_color_depth) {
4472
+ switch (depth) {
4473
+ case COLOR_DEPTH_888:
4474
+ break;
23364475 case COLOR_DEPTH_101010:
23374476 normalized_clk = (normalized_clk * 30) / 24;
23384477 break;
....@@ -2343,26 +4482,33 @@
23434482 normalized_clk = (normalized_clk * 48) / 24;
23444483 break;
23454484 default:
2346
- return;
4485
+ /* The above depths are the only ones valid for HDMI. */
4486
+ return false;
23474487 }
2348
- if (normalized_clk <= info->max_tmds_clock)
2349
- return;
2350
- reduce_mode_colour_depth(timing_out);
2351
-
2352
- } while (timing_out->display_color_depth > COLOR_DEPTH_888);
2353
-
4488
+ if (normalized_clk <= info->max_tmds_clock) {
4489
+ timing_out->display_color_depth = depth;
4490
+ return true;
4491
+ }
4492
+ } while (--depth > COLOR_DEPTH_666);
4493
+ return false;
23544494 }
2355
-/*****************************************************************************/
23564495
2357
-static void
2358
-fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2359
- const struct drm_display_mode *mode_in,
2360
- const struct drm_connector *connector)
4496
+static void fill_stream_properties_from_drm_display_mode(
4497
+ struct dc_stream_state *stream,
4498
+ const struct drm_display_mode *mode_in,
4499
+ const struct drm_connector *connector,
4500
+ const struct drm_connector_state *connector_state,
4501
+ const struct dc_stream_state *old_stream,
4502
+ int requested_bpc)
23614503 {
23624504 struct dc_crtc_timing *timing_out = &stream->timing;
23634505 const struct drm_display_info *info = &connector->display_info;
4506
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4507
+ struct hdmi_vendor_infoframe hv_frame;
4508
+ struct hdmi_avi_infoframe avi_frame;
23644509
2365
- memset(timing_out, 0, sizeof(struct dc_crtc_timing));
4510
+ memset(&hv_frame, 0, sizeof(hv_frame));
4511
+ memset(&avi_frame, 0, sizeof(avi_frame));
23664512
23674513 timing_out->h_border_left = 0;
23684514 timing_out->h_border_right = 0;
....@@ -2370,20 +4516,43 @@
23704516 timing_out->v_border_bottom = 0;
23714517 /* TODO: un-hardcode */
23724518 if (drm_mode_is_420_only(info, mode_in)
2373
- && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
4519
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4520
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4521
+ else if (drm_mode_is_420_also(info, mode_in)
4522
+ && aconnector->force_yuv420_output)
23744523 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
23754524 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2376
- && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
4525
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
23774526 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
23784527 else
23794528 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
23804529
23814530 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
23824531 timing_out->display_color_depth = convert_color_depth_from_display_info(
2383
- connector);
4532
+ connector,
4533
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4534
+ requested_bpc);
23844535 timing_out->scan_type = SCANNING_TYPE_NODATA;
23854536 timing_out->hdmi_vic = 0;
2386
- timing_out->vic = drm_match_cea_mode(mode_in);
4537
+
4538
+ if(old_stream) {
4539
+ timing_out->vic = old_stream->timing.vic;
4540
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4541
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4542
+ } else {
4543
+ timing_out->vic = drm_match_cea_mode(mode_in);
4544
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4545
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4546
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4547
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4548
+ }
4549
+
4550
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4551
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4552
+ timing_out->vic = avi_frame.video_code;
4553
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4554
+ timing_out->hdmi_vic = hv_frame.vic;
4555
+ }
23874556
23884557 timing_out->h_addressable = mode_in->crtc_hdisplay;
23894558 timing_out->h_total = mode_in->crtc_htotal;
....@@ -2397,19 +4566,21 @@
23974566 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
23984567 timing_out->v_sync_width =
23994568 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2400
- timing_out->pix_clk_khz = mode_in->crtc_clock;
4569
+ timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
24014570 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2402
- if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2403
- timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2404
- if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2405
- timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2406
-
2407
- stream->output_color_space = get_output_color_space(timing_out);
24084571
24094572 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
24104573 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2411
- if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2412
- adjust_colour_depth_from_display_info(timing_out, info);
4574
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4575
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4576
+ drm_mode_is_420_also(info, mode_in) &&
4577
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4578
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4579
+ adjust_colour_depth_from_display_info(timing_out, info);
4580
+ }
4581
+ }
4582
+
4583
+ stream->output_color_space = get_output_color_space(timing_out);
24134584 }
24144585
24154586 static void fill_audio_info(struct audio_info *audio_info,
....@@ -2425,9 +4596,9 @@
24254596
24264597 cea_revision = drm_connector->display_info.cea_rev;
24274598
2428
- strncpy(audio_info->display_name,
4599
+ strscpy(audio_info->display_name,
24294600 edid_caps->display_name,
2430
- AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
4601
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
24314602
24324603 if (cea_revision >= 3) {
24334604 audio_info->mode_count = edid_caps->audio_mode_count;
....@@ -2529,7 +4700,7 @@
25294700 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
25304701 int refresh_rate = 0;
25314702
2532
- refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
4703
+ refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
25334704 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
25344705 if (refresh_rate > highest_rfr) {
25354706 highest_rfr = refresh_rate;
....@@ -2552,9 +4723,10 @@
25524723 for (i = 0; i < context->stream_count ; i++) {
25534724 if (!context->streams[i])
25544725 continue;
2555
- /* TODO: add a function to read AMD VSDB bits and will set
4726
+ /*
4727
+ * TODO: add a function to read AMD VSDB bits and set
25564728 * crtc_sync_master.multi_sync_enabled flag
2557
- * For now its set to false
4729
+ * For now it's set to false
25584730 */
25594731 set_multisync_trigger_params(context->streams[i]);
25604732 }
....@@ -2564,13 +4736,25 @@
25644736 static struct dc_stream_state *
25654737 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
25664738 const struct drm_display_mode *drm_mode,
2567
- const struct dm_connector_state *dm_state)
4739
+ const struct dm_connector_state *dm_state,
4740
+ const struct dc_stream_state *old_stream,
4741
+ int requested_bpc)
25684742 {
25694743 struct drm_display_mode *preferred_mode = NULL;
25704744 struct drm_connector *drm_connector;
4745
+ const struct drm_connector_state *con_state =
4746
+ dm_state ? &dm_state->base : NULL;
25714747 struct dc_stream_state *stream = NULL;
25724748 struct drm_display_mode mode = *drm_mode;
25734749 bool native_mode_found = false;
4750
+ bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4751
+ int mode_refresh;
4752
+ int preferred_refresh = 0;
4753
+#if defined(CONFIG_DRM_AMD_DC_DCN)
4754
+ struct dsc_dec_dpcd_caps dsc_caps;
4755
+#endif
4756
+ uint32_t link_bandwidth_kbps;
4757
+
25744758 struct dc_sink *sink = NULL;
25754759 if (aconnector == NULL) {
25764760 DRM_ERROR("aconnector is NULL!\n");
....@@ -2580,20 +4764,12 @@
25804764 drm_connector = &aconnector->base;
25814765
25824766 if (!aconnector->dc_sink) {
2583
- /*
2584
- * Create dc_sink when necessary to MST
2585
- * Don't apply fake_sink to MST
2586
- */
2587
- if (aconnector->mst_port) {
2588
- dm_dp_mst_dc_sink_create(drm_connector);
2589
- return stream;
2590
- }
2591
-
25924767 sink = create_fake_sink(aconnector);
25934768 if (!sink)
25944769 return stream;
25954770 } else {
25964771 sink = aconnector->dc_sink;
4772
+ dc_sink_retain(sink);
25974773 }
25984774
25994775 stream = dc_create_stream_for_sink(sink);
....@@ -2602,6 +4778,11 @@
26024778 DRM_ERROR("Failed to create stream for sink!\n");
26034779 goto finish;
26044780 }
4781
+
4782
+ stream->dm_stream_context = aconnector;
4783
+
4784
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4785
+ drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
26054786
26064787 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
26074788 /* Search for preferred mode */
....@@ -2616,8 +4797,11 @@
26164797 struct drm_display_mode,
26174798 head);
26184799
4800
+ mode_refresh = drm_mode_vrefresh(&mode);
4801
+
26194802 if (preferred_mode == NULL) {
2620
- /* This may not be an error, the use case is when we we have no
4803
+ /*
4804
+ * This may not be an error, the use case is when we have no
26214805 * usermode calls to reset and set mode upon hotplug. In this
26224806 * case, we call set mode ourselves to restore the previous mode
26234807 * and the modelist may not be filled in in time.
....@@ -2627,13 +4811,64 @@
26274811 decide_crtc_timing_for_drm_display_mode(
26284812 &mode, preferred_mode,
26294813 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4814
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
26304815 }
26314816
26324817 if (!dm_state)
26334818 drm_mode_set_crtcinfo(&mode, 0);
26344819
2635
- fill_stream_properties_from_drm_display_mode(stream,
2636
- &mode, &aconnector->base);
4820
+ /*
4821
+ * If scaling is enabled and refresh rate didn't change
4822
+ * we copy the vic and polarities of the old timings
4823
+ */
4824
+ if (!scale || mode_refresh != preferred_refresh)
4825
+ fill_stream_properties_from_drm_display_mode(stream,
4826
+ &mode, &aconnector->base, con_state, NULL, requested_bpc);
4827
+ else
4828
+ fill_stream_properties_from_drm_display_mode(stream,
4829
+ &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4830
+
4831
+ stream->timing.flags.DSC = 0;
4832
+
4833
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4834
+#if defined(CONFIG_DRM_AMD_DC_DCN)
4835
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4836
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4837
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4838
+ &dsc_caps);
4839
+#endif
4840
+ link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4841
+ dc_link_get_link_cap(aconnector->dc_link));
4842
+
4843
+#if defined(CONFIG_DRM_AMD_DC_DCN)
4844
+ if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4845
+ /* Set DSC policy according to dsc_clock_en */
4846
+ dc_dsc_policy_set_enable_dsc_when_not_needed(
4847
+ aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4848
+
4849
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4850
+ &dsc_caps,
4851
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4852
+ link_bandwidth_kbps,
4853
+ &stream->timing,
4854
+ &stream->timing.dsc_cfg))
4855
+ stream->timing.flags.DSC = 1;
4856
+ /* Overwrite the stream flag if DSC is enabled through debugfs */
4857
+ if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4858
+ stream->timing.flags.DSC = 1;
4859
+
4860
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4861
+ stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4862
+
4863
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4864
+ stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4865
+
4866
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4867
+ stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4868
+ }
4869
+#endif
4870
+ }
4871
+
26374872 update_stream_scaling_settings(&mode, dm_state, stream);
26384873
26394874 fill_audio_info(
....@@ -2641,13 +4876,28 @@
26414876 drm_connector,
26424877 sink);
26434878
2644
- update_stream_signal(stream);
4879
+ update_stream_signal(stream, sink);
26454880
2646
- if (dm_state && dm_state->freesync_capable)
2647
- stream->ignore_msa_timing_param = true;
4881
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4882
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4883
+
4884
+ if (stream->link->psr_settings.psr_feature_enabled) {
4885
+ //
4886
+ // should decide stream support vsc sdp colorimetry capability
4887
+ // before building vsc info packet
4888
+ //
4889
+ stream->use_vsc_sdp_for_colorimetry = false;
4890
+ if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4891
+ stream->use_vsc_sdp_for_colorimetry =
4892
+ aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4893
+ } else {
4894
+ if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4895
+ stream->use_vsc_sdp_for_colorimetry = true;
4896
+ }
4897
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4898
+ }
26484899 finish:
2649
- if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
2650
- dc_sink_release(sink);
4900
+ dc_sink_release(sink);
26514901
26524902 return stream;
26534903 }
....@@ -2685,9 +4935,7 @@
26854935 if (WARN_ON(!state))
26864936 return;
26874937
2688
- crtc->state = &state->base;
2689
- crtc->state->crtc = crtc;
2690
-
4938
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
26914939 }
26924940
26934941 static struct drm_crtc_state *
....@@ -2711,17 +4959,55 @@
27114959 dc_stream_retain(state->stream);
27124960 }
27134961
4962
+ state->active_planes = cur->active_planes;
4963
+ state->vrr_infopacket = cur->vrr_infopacket;
4964
+ state->abm_level = cur->abm_level;
4965
+ state->vrr_supported = cur->vrr_supported;
4966
+ state->freesync_config = cur->freesync_config;
4967
+ state->crc_src = cur->crc_src;
4968
+ state->cm_has_degamma = cur->cm_has_degamma;
4969
+ state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4970
+
27144971 /* TODO Duplicate dc_stream after objects are stream object is flattened */
27154972
27164973 return &state->base;
27174974 }
27184975
4976
+static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4977
+{
4978
+ enum dc_irq_source irq_source;
4979
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4980
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4981
+ int rc;
4982
+
4983
+ irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4984
+
4985
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4986
+
4987
+ DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4988
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
4989
+ return rc;
4990
+}
27194991
27204992 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
27214993 {
27224994 enum dc_irq_source irq_source;
27234995 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2724
- struct amdgpu_device *adev = crtc->dev->dev_private;
4996
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4997
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4998
+ int rc = 0;
4999
+
5000
+ if (enable) {
5001
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
5002
+ if (amdgpu_dm_vrr_active(acrtc_state))
5003
+ rc = dm_set_vupdate_irq(crtc, true);
5004
+ } else {
5005
+ /* vblank irq off -> vupdate irq off */
5006
+ rc = dm_set_vupdate_irq(crtc, false);
5007
+ }
5008
+
5009
+ if (rc)
5010
+ return rc;
27255011
27265012 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
27275013 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
....@@ -2747,8 +5033,12 @@
27475033 .atomic_duplicate_state = dm_crtc_duplicate_state,
27485034 .atomic_destroy_state = dm_crtc_destroy_state,
27495035 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5036
+ .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5037
+ .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5038
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
27505039 .enable_vblank = dm_enable_vblank,
27515040 .disable_vblank = dm_disable_vblank,
5041
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
27525042 };
27535043
27545044 static enum drm_connector_status
....@@ -2757,16 +5047,20 @@
27575047 bool connected;
27585048 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
27595049
2760
- /* Notes:
5050
+ /*
5051
+ * Notes:
27615052 * 1. This interface is NOT called in context of HPD irq.
27625053 * 2. This interface *is called* in context of user-mode ioctl. Which
2763
- * makes it a bad place for *any* MST-related activit. */
5054
+ * makes it a bad place for *any* MST-related activity.
5055
+ */
27645056
27655057 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
27665058 !aconnector->fake_enable)
27675059 connected = (aconnector->dc_sink != NULL);
27685060 else
27695061 connected = (aconnector->base.force == DRM_FORCE_ON);
5062
+
5063
+ update_subconnector_property(aconnector);
27705064
27715065 return (connected ? connector_status_connected :
27725066 connector_status_disconnected);
....@@ -2778,7 +5072,7 @@
27785072 uint64_t val)
27795073 {
27805074 struct drm_device *dev = connector->dev;
2781
- struct amdgpu_device *adev = dev->dev_private;
5075
+ struct amdgpu_device *adev = drm_to_adev(dev);
27825076 struct dm_connector_state *dm_old_state =
27835077 to_dm_connector_state(connector->state);
27845078 struct dm_connector_state *dm_new_state =
....@@ -2819,8 +5113,8 @@
28195113 } else if (property == adev->mode_info.underscan_property) {
28205114 dm_new_state->underscan_enable = val;
28215115 ret = 0;
2822
- } else if (property == adev->mode_info.max_bpc_property) {
2823
- dm_new_state->max_bpc = val;
5116
+ } else if (property == adev->mode_info.abm_level_property) {
5117
+ dm_new_state->abm_level = val;
28245118 ret = 0;
28255119 }
28265120
....@@ -2833,7 +5127,7 @@
28335127 uint64_t *val)
28345128 {
28355129 struct drm_device *dev = connector->dev;
2836
- struct amdgpu_device *adev = dev->dev_private;
5130
+ struct amdgpu_device *adev = drm_to_adev(dev);
28375131 struct dm_connector_state *dm_state =
28385132 to_dm_connector_state(state);
28395133 int ret = -EINVAL;
....@@ -2864,19 +5158,34 @@
28645158 } else if (property == adev->mode_info.underscan_property) {
28655159 *val = dm_state->underscan_enable;
28665160 ret = 0;
2867
- } else if (property == adev->mode_info.max_bpc_property) {
2868
- *val = dm_state->max_bpc;
5161
+ } else if (property == adev->mode_info.abm_level_property) {
5162
+ *val = dm_state->abm_level;
28695163 ret = 0;
28705164 }
5165
+
28715166 return ret;
5167
+}
5168
+
5169
+static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5170
+{
5171
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5172
+
5173
+ drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
28725174 }
28735175
28745176 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
28755177 {
28765178 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
28775179 const struct dc_link *link = aconnector->dc_link;
2878
- struct amdgpu_device *adev = connector->dev->dev_private;
5180
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
28795181 struct amdgpu_display_manager *dm = &adev->dm;
5182
+
5183
+ /*
5184
+ * Call only if mst_mgr was iniitalized before since it's not done
5185
+ * for all connector types.
5186
+ */
5187
+ if (aconnector->mst_mgr.dev)
5188
+ drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
28805189
28815190 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
28825191 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
....@@ -2888,8 +5197,23 @@
28885197 dm->backlight_dev = NULL;
28895198 }
28905199 #endif
5200
+
5201
+ if (aconnector->dc_em_sink)
5202
+ dc_sink_release(aconnector->dc_em_sink);
5203
+ aconnector->dc_em_sink = NULL;
5204
+ if (aconnector->dc_sink)
5205
+ dc_sink_release(aconnector->dc_sink);
5206
+ aconnector->dc_sink = NULL;
5207
+
5208
+ drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
28915209 drm_connector_unregister(connector);
28925210 drm_connector_cleanup(connector);
5211
+ if (aconnector->i2c) {
5212
+ i2c_del_adapter(&aconnector->i2c->base);
5213
+ kfree(aconnector->i2c);
5214
+ }
5215
+ kfree(aconnector->dm_dp_aux.aux.name);
5216
+
28935217 kfree(connector);
28945218 }
28955219
....@@ -2910,7 +5234,11 @@
29105234 state->underscan_enable = false;
29115235 state->underscan_hborder = 0;
29125236 state->underscan_vborder = 0;
2913
- state->max_bpc = 8;
5237
+ state->base.max_requested_bpc = 8;
5238
+ state->vcpi_slots = 0;
5239
+ state->pbn = 0;
5240
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5241
+ state->abm_level = amdgpu_dm_abm_level;
29145242
29155243 __drm_atomic_helper_connector_reset(connector, &state->base);
29165244 }
....@@ -2925,14 +5253,42 @@
29255253 struct dm_connector_state *new_state =
29265254 kmemdup(state, sizeof(*state), GFP_KERNEL);
29275255
2928
- if (new_state) {
2929
- __drm_atomic_helper_connector_duplicate_state(connector,
2930
- &new_state->base);
2931
- new_state->max_bpc = state->max_bpc;
2932
- return &new_state->base;
5256
+ if (!new_state)
5257
+ return NULL;
5258
+
5259
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5260
+
5261
+ new_state->freesync_capable = state->freesync_capable;
5262
+ new_state->abm_level = state->abm_level;
5263
+ new_state->scaling = state->scaling;
5264
+ new_state->underscan_enable = state->underscan_enable;
5265
+ new_state->underscan_hborder = state->underscan_hborder;
5266
+ new_state->underscan_vborder = state->underscan_vborder;
5267
+ new_state->vcpi_slots = state->vcpi_slots;
5268
+ new_state->pbn = state->pbn;
5269
+ return &new_state->base;
5270
+}
5271
+
5272
+static int
5273
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
5274
+{
5275
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
5276
+ to_amdgpu_dm_connector(connector);
5277
+ int r;
5278
+
5279
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5280
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5281
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5282
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5283
+ if (r)
5284
+ return r;
29335285 }
29345286
2935
- return NULL;
5287
+#if defined(CONFIG_DEBUG_FS)
5288
+ connector_debugfs_init(amdgpu_dm_connector);
5289
+#endif
5290
+
5291
+ return 0;
29365292 }
29375293
29385294 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
....@@ -2943,30 +5299,10 @@
29435299 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
29445300 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
29455301 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2946
- .atomic_get_property = amdgpu_dm_connector_atomic_get_property
5302
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5303
+ .late_register = amdgpu_dm_connector_late_register,
5304
+ .early_unregister = amdgpu_dm_connector_unregister
29475305 };
2948
-
2949
-static struct drm_encoder *best_encoder(struct drm_connector *connector)
2950
-{
2951
- int enc_id = connector->encoder_ids[0];
2952
- struct drm_mode_object *obj;
2953
- struct drm_encoder *encoder;
2954
-
2955
- DRM_DEBUG_DRIVER("Finding the best encoder\n");
2956
-
2957
- /* pick the encoder ids */
2958
- if (enc_id) {
2959
- obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2960
- if (!obj) {
2961
- DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2962
- return NULL;
2963
- }
2964
- encoder = obj_to_encoder(obj);
2965
- return encoder;
2966
- }
2967
- DRM_ERROR("No encoder id\n");
2968
- return NULL;
2969
-}
29705306
29715307 static int get_modes(struct drm_connector *connector)
29725308 {
....@@ -3000,17 +5336,20 @@
30005336 (edid->extensions + 1) * EDID_LENGTH,
30015337 &init_params);
30025338
3003
- if (aconnector->base.force == DRM_FORCE_ON)
5339
+ if (aconnector->base.force == DRM_FORCE_ON) {
30045340 aconnector->dc_sink = aconnector->dc_link->local_sink ?
30055341 aconnector->dc_link->local_sink :
30065342 aconnector->dc_em_sink;
5343
+ dc_sink_retain(aconnector->dc_sink);
5344
+ }
30075345 }
30085346
30095347 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
30105348 {
30115349 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
30125350
3013
- /* In case of headless boot with force on for DP managed connector
5351
+ /*
5352
+ * In case of headless boot with force on for DP managed connector
30145353 * Those settings have to be != 0 to get initial modeset
30155354 */
30165355 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
....@@ -3023,22 +5362,72 @@
30235362 create_eml_sink(aconnector);
30245363 }
30255364
5365
+static struct dc_stream_state *
5366
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5367
+ const struct drm_display_mode *drm_mode,
5368
+ const struct dm_connector_state *dm_state,
5369
+ const struct dc_stream_state *old_stream)
5370
+{
5371
+ struct drm_connector *connector = &aconnector->base;
5372
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
5373
+ struct dc_stream_state *stream;
5374
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5375
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5376
+ enum dc_status dc_result = DC_OK;
5377
+
5378
+ do {
5379
+ stream = create_stream_for_sink(aconnector, drm_mode,
5380
+ dm_state, old_stream,
5381
+ requested_bpc);
5382
+ if (stream == NULL) {
5383
+ DRM_ERROR("Failed to create stream for sink!\n");
5384
+ break;
5385
+ }
5386
+
5387
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
5388
+
5389
+ if (dc_result != DC_OK) {
5390
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5391
+ drm_mode->hdisplay,
5392
+ drm_mode->vdisplay,
5393
+ drm_mode->clock,
5394
+ dc_result,
5395
+ dc_status_to_str(dc_result));
5396
+
5397
+ dc_stream_release(stream);
5398
+ stream = NULL;
5399
+ requested_bpc -= 2; /* lower bpc to retry validation */
5400
+ }
5401
+
5402
+ } while (stream == NULL && requested_bpc >= 6);
5403
+
5404
+ if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5405
+ DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
5406
+
5407
+ aconnector->force_yuv420_output = true;
5408
+ stream = create_validate_stream_for_sink(aconnector, drm_mode,
5409
+ dm_state, old_stream);
5410
+ aconnector->force_yuv420_output = false;
5411
+ }
5412
+
5413
+ return stream;
5414
+}
5415
+
30265416 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
30275417 struct drm_display_mode *mode)
30285418 {
30295419 int result = MODE_ERROR;
30305420 struct dc_sink *dc_sink;
3031
- struct amdgpu_device *adev = connector->dev->dev_private;
30325421 /* TODO: Unhardcode stream count */
30335422 struct dc_stream_state *stream;
30345423 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3035
- enum dc_status dc_result = DC_OK;
30365424
30375425 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
30385426 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
30395427 return result;
30405428
3041
- /* Only run this the first time mode_valid is called to initilialize
5429
+ /*
5430
+ * Only run this the first time mode_valid is called to initilialize
30425431 * EDID mgmt
30435432 */
30445433 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
....@@ -3052,54 +5441,213 @@
30525441 goto fail;
30535442 }
30545443
3055
- stream = create_stream_for_sink(aconnector, mode, NULL);
3056
- if (stream == NULL) {
3057
- DRM_ERROR("Failed to create stream for sink!\n");
3058
- goto fail;
3059
- }
3060
-
3061
- dc_result = dc_validate_stream(adev->dm.dc, stream);
3062
-
3063
- if (dc_result == DC_OK)
5444
+ stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5445
+ if (stream) {
5446
+ dc_stream_release(stream);
30645447 result = MODE_OK;
3065
- else
3066
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3067
- mode->vdisplay,
3068
- mode->hdisplay,
3069
- mode->clock,
3070
- dc_result);
3071
-
3072
- dc_stream_release(stream);
5448
+ }
30735449
30745450 fail:
30755451 /* TODO: error handling*/
30765452 return result;
30775453 }
30785454
5455
+static int fill_hdr_info_packet(const struct drm_connector_state *state,
5456
+ struct dc_info_packet *out)
5457
+{
5458
+ struct hdmi_drm_infoframe frame;
5459
+ unsigned char buf[30]; /* 26 + 4 */
5460
+ ssize_t len;
5461
+ int ret, i;
5462
+
5463
+ memset(out, 0, sizeof(*out));
5464
+
5465
+ if (!state->hdr_output_metadata)
5466
+ return 0;
5467
+
5468
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5469
+ if (ret)
5470
+ return ret;
5471
+
5472
+ len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5473
+ if (len < 0)
5474
+ return (int)len;
5475
+
5476
+ /* Static metadata is a fixed 26 bytes + 4 byte header. */
5477
+ if (len != 30)
5478
+ return -EINVAL;
5479
+
5480
+ /* Prepare the infopacket for DC. */
5481
+ switch (state->connector->connector_type) {
5482
+ case DRM_MODE_CONNECTOR_HDMIA:
5483
+ out->hb0 = 0x87; /* type */
5484
+ out->hb1 = 0x01; /* version */
5485
+ out->hb2 = 0x1A; /* length */
5486
+ out->sb[0] = buf[3]; /* checksum */
5487
+ i = 1;
5488
+ break;
5489
+
5490
+ case DRM_MODE_CONNECTOR_DisplayPort:
5491
+ case DRM_MODE_CONNECTOR_eDP:
5492
+ out->hb0 = 0x00; /* sdp id, zero */
5493
+ out->hb1 = 0x87; /* type */
5494
+ out->hb2 = 0x1D; /* payload len - 1 */
5495
+ out->hb3 = (0x13 << 2); /* sdp version */
5496
+ out->sb[0] = 0x01; /* version */
5497
+ out->sb[1] = 0x1A; /* length */
5498
+ i = 2;
5499
+ break;
5500
+
5501
+ default:
5502
+ return -EINVAL;
5503
+ }
5504
+
5505
+ memcpy(&out->sb[i], &buf[4], 26);
5506
+ out->valid = true;
5507
+
5508
+ print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5509
+ sizeof(out->sb), false);
5510
+
5511
+ return 0;
5512
+}
5513
+
5514
+static bool
5515
+is_hdr_metadata_different(const struct drm_connector_state *old_state,
5516
+ const struct drm_connector_state *new_state)
5517
+{
5518
+ struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5519
+ struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5520
+
5521
+ if (old_blob != new_blob) {
5522
+ if (old_blob && new_blob &&
5523
+ old_blob->length == new_blob->length)
5524
+ return memcmp(old_blob->data, new_blob->data,
5525
+ old_blob->length);
5526
+
5527
+ return true;
5528
+ }
5529
+
5530
+ return false;
5531
+}
5532
+
5533
+static int
5534
+amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5535
+ struct drm_atomic_state *state)
5536
+{
5537
+ struct drm_connector_state *new_con_state =
5538
+ drm_atomic_get_new_connector_state(state, conn);
5539
+ struct drm_connector_state *old_con_state =
5540
+ drm_atomic_get_old_connector_state(state, conn);
5541
+ struct drm_crtc *crtc = new_con_state->crtc;
5542
+ struct drm_crtc_state *new_crtc_state;
5543
+ int ret;
5544
+
5545
+ if (!crtc)
5546
+ return 0;
5547
+
5548
+ if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5549
+ struct dc_info_packet hdr_infopacket;
5550
+
5551
+ ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5552
+ if (ret)
5553
+ return ret;
5554
+
5555
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5556
+ if (IS_ERR(new_crtc_state))
5557
+ return PTR_ERR(new_crtc_state);
5558
+
5559
+ /*
5560
+ * DC considers the stream backends changed if the
5561
+ * static metadata changes. Forcing the modeset also
5562
+ * gives a simple way for userspace to switch from
5563
+ * 8bpc to 10bpc when setting the metadata to enter
5564
+ * or exit HDR.
5565
+ *
5566
+ * Changing the static metadata after it's been
5567
+ * set is permissible, however. So only force a
5568
+ * modeset if we're entering or exiting HDR.
5569
+ */
5570
+ new_crtc_state->mode_changed =
5571
+ !old_con_state->hdr_output_metadata ||
5572
+ !new_con_state->hdr_output_metadata;
5573
+ }
5574
+
5575
+ return 0;
5576
+}
5577
+
30795578 static const struct drm_connector_helper_funcs
30805579 amdgpu_dm_connector_helper_funcs = {
30815580 /*
3082
- * If hotplug a second bigger display in FB Con mode, bigger resolution
5581
+ * If hotplugging a second bigger display in FB Con mode, bigger resolution
30835582 * modes will be filtered by drm_mode_validate_size(), and those modes
3084
- * is missing after user start lightdm. So we need to renew modes list.
5583
+ * are missing after user start lightdm. So we need to renew modes list.
30855584 * in get_modes call back, not just return the modes count
30865585 */
30875586 .get_modes = get_modes,
30885587 .mode_valid = amdgpu_dm_connector_mode_valid,
3089
- .best_encoder = best_encoder
5588
+ .atomic_check = amdgpu_dm_connector_atomic_check,
30905589 };
30915590
30925591 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
30935592 {
30945593 }
30955594
5595
+static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5596
+{
5597
+ struct drm_atomic_state *state = new_crtc_state->state;
5598
+ struct drm_plane *plane;
5599
+ int num_active = 0;
5600
+
5601
+ drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5602
+ struct drm_plane_state *new_plane_state;
5603
+
5604
+ /* Cursor planes are "fake". */
5605
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
5606
+ continue;
5607
+
5608
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5609
+
5610
+ if (!new_plane_state) {
5611
+ /*
5612
+ * The plane is enable on the CRTC and hasn't changed
5613
+ * state. This means that it previously passed
5614
+ * validation and is therefore enabled.
5615
+ */
5616
+ num_active += 1;
5617
+ continue;
5618
+ }
5619
+
5620
+ /* We need a framebuffer to be considered enabled. */
5621
+ num_active += (new_plane_state->fb != NULL);
5622
+ }
5623
+
5624
+ return num_active;
5625
+}
5626
+
5627
+static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5628
+ struct drm_crtc_state *new_crtc_state)
5629
+{
5630
+ struct dm_crtc_state *dm_new_crtc_state =
5631
+ to_dm_crtc_state(new_crtc_state);
5632
+
5633
+ dm_new_crtc_state->active_planes = 0;
5634
+
5635
+ if (!dm_new_crtc_state->stream)
5636
+ return;
5637
+
5638
+ dm_new_crtc_state->active_planes =
5639
+ count_crtc_active_planes(new_crtc_state);
5640
+}
5641
+
30965642 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
30975643 struct drm_crtc_state *state)
30985644 {
3099
- struct amdgpu_device *adev = crtc->dev->dev_private;
5645
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
31005646 struct dc *dc = adev->dm.dc;
31015647 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
31025648 int ret = -EINVAL;
5649
+
5650
+ dm_update_crtc_active_planes(crtc, state);
31035651
31045652 if (unlikely(!dm_crtc_state->stream &&
31055653 modeset_required(state, NULL, dm_crtc_state->stream))) {
....@@ -3107,7 +5655,17 @@
31075655 return ret;
31085656 }
31095657
3110
- /* In some use cases, like reset, no stream is attached */
5658
+ /*
5659
+ * We require the primary plane to be enabled whenever the CRTC is, otherwise
5660
+ * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5661
+ * planes are disabled, which is not supported by the hardware. And there is legacy
5662
+ * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5663
+ */
5664
+ if (state->enable &&
5665
+ !(state->plane_mask & drm_plane_mask(crtc->primary)))
5666
+ return -EINVAL;
5667
+
5668
+ /* In some use cases, like reset, no stream is attached */
31115669 if (!dm_crtc_state->stream)
31125670 return 0;
31135671
....@@ -3127,7 +5685,8 @@
31275685 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
31285686 .disable = dm_crtc_helper_disable,
31295687 .atomic_check = dm_crtc_helper_atomic_check,
3130
- .mode_fixup = dm_crtc_helper_mode_fixup
5688
+ .mode_fixup = dm_crtc_helper_mode_fixup,
5689
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
31315690 };
31325691
31335692 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
....@@ -3135,10 +5694,71 @@
31355694
31365695 }
31375696
5697
+static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5698
+{
5699
+ switch (display_color_depth) {
5700
+ case COLOR_DEPTH_666:
5701
+ return 6;
5702
+ case COLOR_DEPTH_888:
5703
+ return 8;
5704
+ case COLOR_DEPTH_101010:
5705
+ return 10;
5706
+ case COLOR_DEPTH_121212:
5707
+ return 12;
5708
+ case COLOR_DEPTH_141414:
5709
+ return 14;
5710
+ case COLOR_DEPTH_161616:
5711
+ return 16;
5712
+ default:
5713
+ break;
5714
+ }
5715
+ return 0;
5716
+}
5717
+
31385718 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
31395719 struct drm_crtc_state *crtc_state,
31405720 struct drm_connector_state *conn_state)
31415721 {
5722
+ struct drm_atomic_state *state = crtc_state->state;
5723
+ struct drm_connector *connector = conn_state->connector;
5724
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5725
+ struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5726
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5727
+ struct drm_dp_mst_topology_mgr *mst_mgr;
5728
+ struct drm_dp_mst_port *mst_port;
5729
+ enum dc_color_depth color_depth;
5730
+ int clock, bpp = 0;
5731
+ bool is_y420 = false;
5732
+
5733
+ if (!aconnector->port || !aconnector->dc_sink)
5734
+ return 0;
5735
+
5736
+ mst_port = aconnector->port;
5737
+ mst_mgr = &aconnector->mst_port->mst_mgr;
5738
+
5739
+ if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5740
+ return 0;
5741
+
5742
+ if (!state->duplicated) {
5743
+ int max_bpc = conn_state->max_requested_bpc;
5744
+ is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5745
+ aconnector->force_yuv420_output;
5746
+ color_depth = convert_color_depth_from_display_info(connector,
5747
+ is_y420,
5748
+ max_bpc);
5749
+ bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5750
+ clock = adjusted_mode->clock;
5751
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5752
+ }
5753
+ dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5754
+ mst_mgr,
5755
+ mst_port,
5756
+ dm_new_connector_state->pbn,
5757
+ dm_mst_get_pbn_divider(aconnector->dc_link));
5758
+ if (dm_new_connector_state->vcpi_slots < 0) {
5759
+ DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5760
+ return dm_new_connector_state->vcpi_slots;
5761
+ }
31425762 return 0;
31435763 }
31445764
....@@ -3146,6 +5766,71 @@
31465766 .disable = dm_encoder_helper_disable,
31475767 .atomic_check = dm_encoder_helper_atomic_check
31485768 };
5769
+
5770
+#if defined(CONFIG_DRM_AMD_DC_DCN)
5771
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5772
+ struct dc_state *dc_state)
5773
+{
5774
+ struct dc_stream_state *stream = NULL;
5775
+ struct drm_connector *connector;
5776
+ struct drm_connector_state *new_con_state, *old_con_state;
5777
+ struct amdgpu_dm_connector *aconnector;
5778
+ struct dm_connector_state *dm_conn_state;
5779
+ int i, j, clock, bpp;
5780
+ int vcpi, pbn_div, pbn = 0;
5781
+
5782
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5783
+
5784
+ aconnector = to_amdgpu_dm_connector(connector);
5785
+
5786
+ if (!aconnector->port)
5787
+ continue;
5788
+
5789
+ if (!new_con_state || !new_con_state->crtc)
5790
+ continue;
5791
+
5792
+ dm_conn_state = to_dm_connector_state(new_con_state);
5793
+
5794
+ for (j = 0; j < dc_state->stream_count; j++) {
5795
+ stream = dc_state->streams[j];
5796
+ if (!stream)
5797
+ continue;
5798
+
5799
+ if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5800
+ break;
5801
+
5802
+ stream = NULL;
5803
+ }
5804
+
5805
+ if (!stream)
5806
+ continue;
5807
+
5808
+ if (stream->timing.flags.DSC != 1) {
5809
+ drm_dp_mst_atomic_enable_dsc(state,
5810
+ aconnector->port,
5811
+ dm_conn_state->pbn,
5812
+ 0,
5813
+ false);
5814
+ continue;
5815
+ }
5816
+
5817
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
5818
+ bpp = stream->timing.dsc_cfg.bits_per_pixel;
5819
+ clock = stream->timing.pix_clk_100hz / 10;
5820
+ pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5821
+ vcpi = drm_dp_mst_atomic_enable_dsc(state,
5822
+ aconnector->port,
5823
+ pbn, pbn_div,
5824
+ true);
5825
+ if (vcpi < 0)
5826
+ return vcpi;
5827
+
5828
+ dm_conn_state->pbn = pbn;
5829
+ dm_conn_state->vcpi_slots = vcpi;
5830
+ }
5831
+ return 0;
5832
+}
5833
+#endif
31495834
31505835 static void dm_drm_plane_reset(struct drm_plane *plane)
31515836 {
....@@ -3156,12 +5841,9 @@
31565841
31575842 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
31585843 WARN_ON(amdgpu_state == NULL);
3159
-
3160
- if (amdgpu_state) {
3161
- plane->state = &amdgpu_state->base;
3162
- plane->state->plane = plane;
3163
- plane->state->rotation = DRM_MODE_ROTATE_0;
3164
- }
5844
+
5845
+ if (amdgpu_state)
5846
+ __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
31655847 }
31665848
31675849 static struct drm_plane_state *
....@@ -3181,10 +5863,14 @@
31815863 dc_plane_state_retain(dm_plane_state->dc_state);
31825864 }
31835865
5866
+ /* Framebuffer hasn't been updated yet, so retain old flags. */
5867
+ dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5868
+ dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5869
+
31845870 return &dm_plane_state->base;
31855871 }
31865872
3187
-void dm_drm_plane_destroy_state(struct drm_plane *plane,
5873
+static void dm_drm_plane_destroy_state(struct drm_plane *plane,
31885874 struct drm_plane_state *state)
31895875 {
31905876 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
....@@ -3211,14 +5897,12 @@
32115897 struct drm_gem_object *obj;
32125898 struct amdgpu_device *adev;
32135899 struct amdgpu_bo *rbo;
3214
- uint64_t chroma_addr = 0;
32155900 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3216
- unsigned int awidth;
5901
+ struct list_head list;
5902
+ struct ttm_validate_buffer tv;
5903
+ struct ww_acquire_ctx ticket;
32175904 uint32_t domain;
32185905 int r;
3219
-
3220
- dm_plane_state_old = to_dm_plane_state(plane->state);
3221
- dm_plane_state_new = to_dm_plane_state(new_state);
32225906
32235907 if (!new_state->fb) {
32245908 DRM_DEBUG_DRIVER("No FB bound\n");
....@@ -3229,12 +5913,20 @@
32295913 obj = new_state->fb->obj[0];
32305914 rbo = gem_to_amdgpu_bo(obj);
32315915 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3232
- r = amdgpu_bo_reserve(rbo, false);
3233
- if (unlikely(r != 0))
5916
+ INIT_LIST_HEAD(&list);
5917
+
5918
+ tv.bo = &rbo->tbo;
5919
+ tv.num_shared = 1;
5920
+ list_add(&tv.head, &list);
5921
+
5922
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5923
+ if (r) {
5924
+ dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
32345925 return r;
5926
+ }
32355927
32365928 if (plane->type != DRM_PLANE_TYPE_CURSOR)
3237
- domain = amdgpu_display_supported_domains(adev);
5929
+ domain = amdgpu_display_supported_domains(adev, rbo->flags);
32385930 else
32395931 domain = AMDGPU_GEM_DOMAIN_VRAM;
32405932
....@@ -3242,43 +5934,47 @@
32425934 if (unlikely(r != 0)) {
32435935 if (r != -ERESTARTSYS)
32445936 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3245
- amdgpu_bo_unreserve(rbo);
5937
+ ttm_eu_backoff_reservation(&ticket, &list);
32465938 return r;
32475939 }
32485940
32495941 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
32505942 if (unlikely(r != 0)) {
32515943 amdgpu_bo_unpin(rbo);
3252
- amdgpu_bo_unreserve(rbo);
5944
+ ttm_eu_backoff_reservation(&ticket, &list);
32535945 DRM_ERROR("%p bind failed\n", rbo);
32545946 return r;
32555947 }
3256
- amdgpu_bo_unreserve(rbo);
5948
+
5949
+ ttm_eu_backoff_reservation(&ticket, &list);
32575950
32585951 afb->address = amdgpu_bo_gpu_offset(rbo);
32595952
32605953 amdgpu_bo_ref(rbo);
32615954
3262
- if (dm_plane_state_new->dc_state &&
3263
- dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3264
- struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5955
+ /**
5956
+ * We don't do surface updates on planes that have been newly created,
5957
+ * but we also don't have the afb->address during atomic check.
5958
+ *
5959
+ * Fill in buffer attributes depending on the address here, but only on
5960
+ * newly created planes since they're not being used by DC yet and this
5961
+ * won't modify global state.
5962
+ */
5963
+ dm_plane_state_old = to_dm_plane_state(plane->state);
5964
+ dm_plane_state_new = to_dm_plane_state(new_state);
32655965
3266
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3267
- plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3268
- plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3269
- } else {
3270
- awidth = ALIGN(new_state->fb->width, 64);
3271
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3272
- plane_state->address.video_progressive.luma_addr.low_part
3273
- = lower_32_bits(afb->address);
3274
- plane_state->address.video_progressive.luma_addr.high_part
3275
- = upper_32_bits(afb->address);
3276
- chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3277
- plane_state->address.video_progressive.chroma_addr.low_part
3278
- = lower_32_bits(chroma_addr);
3279
- plane_state->address.video_progressive.chroma_addr.high_part
3280
- = upper_32_bits(chroma_addr);
3281
- }
5966
+ if (dm_plane_state_new->dc_state &&
5967
+ dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5968
+ struct dc_plane_state *plane_state =
5969
+ dm_plane_state_new->dc_state;
5970
+ bool force_disable_dcc = !plane_state->dcc.enable;
5971
+
5972
+ fill_plane_buffer_attributes(
5973
+ adev, afb, plane_state->format, plane_state->rotation,
5974
+ dm_plane_state_new->tiling_flags,
5975
+ &plane_state->tiling_info, &plane_state->plane_size,
5976
+ &plane_state->dcc, &plane_state->address,
5977
+ dm_plane_state_new->tmz_surface, force_disable_dcc);
32825978 }
32835979
32845980 return 0;
....@@ -3305,18 +6001,44 @@
33056001 amdgpu_bo_unref(&rbo);
33066002 }
33076003
6004
+static int dm_plane_helper_check_state(struct drm_plane_state *state,
6005
+ struct drm_crtc_state *new_crtc_state)
6006
+{
6007
+ int max_downscale = 0;
6008
+ int max_upscale = INT_MAX;
6009
+
6010
+ /* TODO: These should be checked against DC plane caps */
6011
+ return drm_atomic_helper_check_plane_state(
6012
+ state, new_crtc_state, max_downscale, max_upscale, true, true);
6013
+}
6014
+
33086015 static int dm_plane_atomic_check(struct drm_plane *plane,
33096016 struct drm_plane_state *state)
33106017 {
3311
- struct amdgpu_device *adev = plane->dev->dev_private;
6018
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
33126019 struct dc *dc = adev->dm.dc;
3313
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6020
+ struct dm_plane_state *dm_plane_state;
6021
+ struct dc_scaling_info scaling_info;
6022
+ struct drm_crtc_state *new_crtc_state;
6023
+ int ret;
6024
+
6025
+ dm_plane_state = to_dm_plane_state(state);
33146026
33156027 if (!dm_plane_state->dc_state)
33166028 return 0;
33176029
3318
- if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
6030
+ new_crtc_state =
6031
+ drm_atomic_get_new_crtc_state(state->state, state->crtc);
6032
+ if (!new_crtc_state)
33196033 return -EINVAL;
6034
+
6035
+ ret = dm_plane_helper_check_state(state, new_crtc_state);
6036
+ if (ret)
6037
+ return ret;
6038
+
6039
+ ret = fill_dc_scaling_info(state, &scaling_info);
6040
+ if (ret)
6041
+ return ret;
33206042
33216043 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
33226044 return 0;
....@@ -3324,20 +6046,51 @@
33246046 return -EINVAL;
33256047 }
33266048
6049
+static int dm_plane_atomic_async_check(struct drm_plane *plane,
6050
+ struct drm_plane_state *new_plane_state)
6051
+{
6052
+ /* Only support async updates on cursor planes. */
6053
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
6054
+ return -EINVAL;
6055
+
6056
+ return 0;
6057
+}
6058
+
6059
+static void dm_plane_atomic_async_update(struct drm_plane *plane,
6060
+ struct drm_plane_state *new_state)
6061
+{
6062
+ struct drm_plane_state *old_state =
6063
+ drm_atomic_get_old_plane_state(new_state->state, plane);
6064
+
6065
+ swap(plane->state->fb, new_state->fb);
6066
+
6067
+ plane->state->src_x = new_state->src_x;
6068
+ plane->state->src_y = new_state->src_y;
6069
+ plane->state->src_w = new_state->src_w;
6070
+ plane->state->src_h = new_state->src_h;
6071
+ plane->state->crtc_x = new_state->crtc_x;
6072
+ plane->state->crtc_y = new_state->crtc_y;
6073
+ plane->state->crtc_w = new_state->crtc_w;
6074
+ plane->state->crtc_h = new_state->crtc_h;
6075
+
6076
+ handle_cursor_update(plane, old_state);
6077
+}
6078
+
33276079 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
33286080 .prepare_fb = dm_plane_helper_prepare_fb,
33296081 .cleanup_fb = dm_plane_helper_cleanup_fb,
33306082 .atomic_check = dm_plane_atomic_check,
6083
+ .atomic_async_check = dm_plane_atomic_async_check,
6084
+ .atomic_async_update = dm_plane_atomic_async_update
33316085 };
33326086
33336087 /*
33346088 * TODO: these are currently initialized to rgb formats only.
33356089 * For future use cases we should either initialize them dynamically based on
33366090 * plane capabilities, or initialize this array to all formats, so internal drm
3337
- * check will succeed, and let DC to implement proper check
6091
+ * check will succeed, and let DC implement proper check
33386092 */
33396093 static const uint32_t rgb_formats[] = {
3340
- DRM_FORMAT_RGB888,
33416094 DRM_FORMAT_XRGB8888,
33426095 DRM_FORMAT_ARGB8888,
33436096 DRM_FORMAT_RGBA8888,
....@@ -3345,64 +6098,137 @@
33456098 DRM_FORMAT_XBGR2101010,
33466099 DRM_FORMAT_ARGB2101010,
33476100 DRM_FORMAT_ABGR2101010,
6101
+ DRM_FORMAT_XBGR8888,
6102
+ DRM_FORMAT_ABGR8888,
6103
+ DRM_FORMAT_RGB565,
33486104 };
33496105
3350
-static const uint32_t yuv_formats[] = {
3351
- DRM_FORMAT_NV12,
3352
- DRM_FORMAT_NV21,
6106
+static const uint32_t overlay_formats[] = {
6107
+ DRM_FORMAT_XRGB8888,
6108
+ DRM_FORMAT_ARGB8888,
6109
+ DRM_FORMAT_RGBA8888,
6110
+ DRM_FORMAT_XBGR8888,
6111
+ DRM_FORMAT_ABGR8888,
6112
+ DRM_FORMAT_RGB565
33536113 };
33546114
33556115 static const u32 cursor_formats[] = {
33566116 DRM_FORMAT_ARGB8888
33576117 };
33586118
3359
-static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3360
- struct amdgpu_plane *aplane,
3361
- unsigned long possible_crtcs)
6119
+static int get_plane_formats(const struct drm_plane *plane,
6120
+ const struct dc_plane_cap *plane_cap,
6121
+ uint32_t *formats, int max_formats)
33626122 {
3363
- int res = -EPERM;
6123
+ int i, num_formats = 0;
33646124
3365
- switch (aplane->base.type) {
6125
+ /*
6126
+ * TODO: Query support for each group of formats directly from
6127
+ * DC plane caps. This will require adding more formats to the
6128
+ * caps list.
6129
+ */
6130
+
6131
+ switch (plane->type) {
33666132 case DRM_PLANE_TYPE_PRIMARY:
3367
- res = drm_universal_plane_init(
3368
- dm->adev->ddev,
3369
- &aplane->base,
3370
- possible_crtcs,
3371
- &dm_plane_funcs,
3372
- rgb_formats,
3373
- ARRAY_SIZE(rgb_formats),
3374
- NULL, aplane->base.type, NULL);
6133
+ for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6134
+ if (num_formats >= max_formats)
6135
+ break;
6136
+
6137
+ formats[num_formats++] = rgb_formats[i];
6138
+ }
6139
+
6140
+ if (plane_cap && plane_cap->pixel_format_support.nv12)
6141
+ formats[num_formats++] = DRM_FORMAT_NV12;
6142
+ if (plane_cap && plane_cap->pixel_format_support.p010)
6143
+ formats[num_formats++] = DRM_FORMAT_P010;
6144
+ if (plane_cap && plane_cap->pixel_format_support.fp16) {
6145
+ formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6146
+ formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6147
+ formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6148
+ formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6149
+ }
33756150 break;
6151
+
33766152 case DRM_PLANE_TYPE_OVERLAY:
3377
- res = drm_universal_plane_init(
3378
- dm->adev->ddev,
3379
- &aplane->base,
3380
- possible_crtcs,
3381
- &dm_plane_funcs,
3382
- yuv_formats,
3383
- ARRAY_SIZE(yuv_formats),
3384
- NULL, aplane->base.type, NULL);
6153
+ for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6154
+ if (num_formats >= max_formats)
6155
+ break;
6156
+
6157
+ formats[num_formats++] = overlay_formats[i];
6158
+ }
33856159 break;
6160
+
33866161 case DRM_PLANE_TYPE_CURSOR:
3387
- res = drm_universal_plane_init(
3388
- dm->adev->ddev,
3389
- &aplane->base,
3390
- possible_crtcs,
3391
- &dm_plane_funcs,
3392
- cursor_formats,
3393
- ARRAY_SIZE(cursor_formats),
3394
- NULL, aplane->base.type, NULL);
6162
+ for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6163
+ if (num_formats >= max_formats)
6164
+ break;
6165
+
6166
+ formats[num_formats++] = cursor_formats[i];
6167
+ }
33956168 break;
33966169 }
33976170
3398
- drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
6171
+ return num_formats;
6172
+}
6173
+
6174
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6175
+ struct drm_plane *plane,
6176
+ unsigned long possible_crtcs,
6177
+ const struct dc_plane_cap *plane_cap)
6178
+{
6179
+ uint32_t formats[32];
6180
+ int num_formats;
6181
+ int res = -EPERM;
6182
+ unsigned int supported_rotations;
6183
+
6184
+ num_formats = get_plane_formats(plane, plane_cap, formats,
6185
+ ARRAY_SIZE(formats));
6186
+
6187
+ res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6188
+ &dm_plane_funcs, formats, num_formats,
6189
+ NULL, plane->type, NULL);
6190
+ if (res)
6191
+ return res;
6192
+
6193
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6194
+ plane_cap && plane_cap->per_pixel_alpha) {
6195
+ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6196
+ BIT(DRM_MODE_BLEND_PREMULTI);
6197
+
6198
+ drm_plane_create_alpha_property(plane);
6199
+ drm_plane_create_blend_mode_property(plane, blend_caps);
6200
+ }
6201
+
6202
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6203
+ plane_cap &&
6204
+ (plane_cap->pixel_format_support.nv12 ||
6205
+ plane_cap->pixel_format_support.p010)) {
6206
+ /* This only affects YUV formats. */
6207
+ drm_plane_create_color_properties(
6208
+ plane,
6209
+ BIT(DRM_COLOR_YCBCR_BT601) |
6210
+ BIT(DRM_COLOR_YCBCR_BT709) |
6211
+ BIT(DRM_COLOR_YCBCR_BT2020),
6212
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6213
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6214
+ DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6215
+ }
6216
+
6217
+ supported_rotations =
6218
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6219
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6220
+
6221
+ if (dm->adev->asic_type >= CHIP_BONAIRE)
6222
+ drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6223
+ supported_rotations);
6224
+
6225
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
33996226
34006227 /* Create (reset) the plane state */
3401
- if (aplane->base.funcs->reset)
3402
- aplane->base.funcs->reset(&aplane->base);
6228
+ if (plane->funcs->reset)
6229
+ plane->funcs->reset(plane);
34036230
3404
-
3405
- return res;
6231
+ return 0;
34066232 }
34076233
34086234 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
....@@ -3410,7 +6236,7 @@
34106236 uint32_t crtc_index)
34116237 {
34126238 struct amdgpu_crtc *acrtc = NULL;
3413
- struct amdgpu_plane *cursor_plane;
6239
+ struct drm_plane *cursor_plane;
34146240
34156241 int res = -ENOMEM;
34166242
....@@ -3418,8 +6244,8 @@
34186244 if (!cursor_plane)
34196245 goto fail;
34206246
3421
- cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3422
- res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
6247
+ cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6248
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
34236249
34246250 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
34256251 if (!acrtc)
....@@ -3429,7 +6255,7 @@
34296255 dm->ddev,
34306256 &acrtc->base,
34316257 plane,
3432
- &cursor_plane->base,
6258
+ cursor_plane,
34336259 &amdgpu_dm_crtc_funcs, NULL);
34346260
34356261 if (res)
....@@ -3446,6 +6272,7 @@
34466272
34476273 acrtc->crtc_id = crtc_index;
34486274 acrtc->base.enabled = false;
6275
+ acrtc->otg_inst = -1;
34496276
34506277 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
34516278 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
....@@ -3468,6 +6295,8 @@
34686295 return DRM_MODE_CONNECTOR_HDMIA;
34696296 case SIGNAL_TYPE_EDP:
34706297 return DRM_MODE_CONNECTOR_eDP;
6298
+ case SIGNAL_TYPE_LVDS:
6299
+ return DRM_MODE_CONNECTOR_LVDS;
34716300 case SIGNAL_TYPE_RGB:
34726301 return DRM_MODE_CONNECTOR_VGA;
34736302 case SIGNAL_TYPE_DISPLAY_PORT:
....@@ -3484,14 +6313,23 @@
34846313 }
34856314 }
34866315
6316
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6317
+{
6318
+ struct drm_encoder *encoder;
6319
+
6320
+ /* There is only one encoder per connector */
6321
+ drm_connector_for_each_possible_encoder(connector, encoder)
6322
+ return encoder;
6323
+
6324
+ return NULL;
6325
+}
6326
+
34876327 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
34886328 {
3489
- const struct drm_connector_helper_funcs *helper =
3490
- connector->helper_private;
34916329 struct drm_encoder *encoder;
34926330 struct amdgpu_encoder *amdgpu_encoder;
34936331
3494
- encoder = helper->best_encoder(connector);
6332
+ encoder = amdgpu_dm_connector_to_encoder(connector);
34956333
34966334 if (encoder == NULL)
34976335 return;
....@@ -3533,7 +6371,7 @@
35336371 mode->hdisplay = hdisplay;
35346372 mode->vdisplay = vdisplay;
35356373 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3536
- strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6374
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
35376375
35386376 return mode;
35396377
....@@ -3593,6 +6431,9 @@
35936431 mode = amdgpu_dm_create_common_mode(encoder,
35946432 common_modes[i].name, common_modes[i].w,
35956433 common_modes[i].h);
6434
+ if (!mode)
6435
+ continue;
6436
+
35966437 drm_mode_probed_add(connector, mode);
35976438 amdgpu_dm_connector->num_modes++;
35986439 }
....@@ -3610,6 +6451,15 @@
36106451 amdgpu_dm_connector->num_modes =
36116452 drm_add_edid_modes(connector, edid);
36126453
6454
+ /* sorting the probed modes before calling function
6455
+ * amdgpu_dm_get_native_mode() since EDID can have
6456
+ * more than one preferred mode. The modes that are
6457
+ * later in the probed mode list could be of higher
6458
+ * and preferred resolution. For example, 3840x2160
6459
+ * resolution in base EDID preferred timing and 4096x2160
6460
+ * preferred resolution in DID extension block later.
6461
+ */
6462
+ drm_mode_sort(&connector->probed_modes);
36136463 amdgpu_dm_get_native_mode(connector);
36146464 } else {
36156465 amdgpu_dm_connector->num_modes = 0;
....@@ -3618,17 +6468,16 @@
36186468
36196469 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
36206470 {
3621
- const struct drm_connector_helper_funcs *helper =
3622
- connector->helper_private;
36236471 struct amdgpu_dm_connector *amdgpu_dm_connector =
36246472 to_amdgpu_dm_connector(connector);
36256473 struct drm_encoder *encoder;
36266474 struct edid *edid = amdgpu_dm_connector->edid;
36276475
3628
- encoder = helper->best_encoder(connector);
6476
+ encoder = amdgpu_dm_connector_to_encoder(connector);
36296477
36306478 if (!edid || !drm_edid_is_valid(edid)) {
3631
- drm_add_modes_noedid(connector, 640, 480);
6479
+ amdgpu_dm_connector->num_modes =
6480
+ drm_add_modes_noedid(connector, 640, 480);
36326481 } else {
36336482 amdgpu_dm_connector_ddc_get_modes(connector, edid);
36346483 amdgpu_dm_connector_add_common_modes(encoder, connector);
....@@ -3644,7 +6493,7 @@
36446493 struct dc_link *link,
36456494 int link_index)
36466495 {
3647
- struct amdgpu_device *adev = dm->ddev->dev_private;
6496
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
36486497
36496498 /*
36506499 * Some of the properties below require access to state, like bpc.
....@@ -3660,21 +6509,23 @@
36606509 aconnector->base.stereo_allowed = false;
36616510 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
36626511 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6512
+ aconnector->audio_inst = -1;
36636513 mutex_init(&aconnector->hpd_lock);
36646514
3665
- /* configure support HPD hot plug connector_>polled default value is 0
6515
+ /*
6516
+ * configure support HPD hot plug connector_>polled default value is 0
36666517 * which means HPD hot plug not supported
36676518 */
36686519 switch (connector_type) {
36696520 case DRM_MODE_CONNECTOR_HDMIA:
36706521 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
36716522 aconnector->base.ycbcr_420_allowed =
3672
- link->link_enc->features.ycbcr420_supported ? true : false;
6523
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
36736524 break;
36746525 case DRM_MODE_CONNECTOR_DisplayPort:
36756526 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
36766527 aconnector->base.ycbcr_420_allowed =
3677
- link->link_enc->features.ycbcr420_supported ? true : false;
6528
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
36786529 break;
36796530 case DRM_MODE_CONNECTOR_DVID:
36806531 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
....@@ -3696,10 +6547,35 @@
36966547 drm_object_attach_property(&aconnector->base.base,
36976548 adev->mode_info.underscan_vborder_property,
36986549 0);
3699
- drm_object_attach_property(&aconnector->base.base,
3700
- adev->mode_info.max_bpc_property,
3701
- 0);
37026550
6551
+ if (!aconnector->mst_port)
6552
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6553
+
6554
+ /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6555
+ aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6556
+ aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6557
+
6558
+ if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6559
+ (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6560
+ drm_object_attach_property(&aconnector->base.base,
6561
+ adev->mode_info.abm_level_property, 0);
6562
+ }
6563
+
6564
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6565
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6566
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
6567
+ drm_object_attach_property(
6568
+ &aconnector->base.base,
6569
+ dm->ddev->mode_config.hdr_output_metadata_property, 0);
6570
+
6571
+ if (!aconnector->mst_port)
6572
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
6573
+
6574
+#ifdef CONFIG_DRM_AMD_DC_HDCP
6575
+ if (adev->dm.hdcp_workqueue)
6576
+ drm_connector_attach_content_protection_property(&aconnector->base, true);
6577
+#endif
6578
+ }
37036579 }
37046580
37056581 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
....@@ -3727,9 +6603,9 @@
37276603 cmd.payloads[i].data = msgs[i].buf;
37286604 }
37296605
3730
- if (dal_i2caux_submit_i2c_command(
3731
- ddc_service->ctx->i2caux,
3732
- ddc_service->ddc_pin,
6606
+ if (dc_submit_i2c(
6607
+ ddc_service->ctx->dc,
6608
+ ddc_service->ddc_pin->hw_info.ddc_channel,
37336609 &cmd))
37346610 result = num;
37356611
....@@ -3765,12 +6641,14 @@
37656641 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
37666642 i2c_set_adapdata(&i2c->base, i2c);
37676643 i2c->ddc_service = ddc_service;
6644
+ i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
37686645
37696646 return i2c;
37706647 }
37716648
37726649
3773
-/* Note: this function assumes that dc_link_detect() was called for the
6650
+/*
6651
+ * Note: this function assumes that dc_link_detect() was called for the
37746652 * dc_link which will be represented by this aconnector.
37756653 */
37766654 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
....@@ -3804,11 +6682,12 @@
38046682
38056683 connector_type = to_drm_connector_type(link->connector_signal);
38066684
3807
- res = drm_connector_init(
6685
+ res = drm_connector_init_with_ddc(
38086686 dm->ddev,
38096687 &aconnector->base,
38106688 &amdgpu_dm_connector_funcs,
3811
- connector_type);
6689
+ connector_type,
6690
+ &i2c->base);
38126691
38136692 if (res) {
38146693 DRM_ERROR("connector_init failed\n");
....@@ -3830,18 +6709,9 @@
38306709 drm_connector_attach_encoder(
38316710 &aconnector->base, &aencoder->base);
38326711
3833
- drm_connector_register(&aconnector->base);
3834
-#if defined(CONFIG_DEBUG_FS)
3835
- res = connector_debugfs_init(aconnector);
3836
- if (res) {
3837
- DRM_ERROR("Failed to create debugfs for connector");
3838
- goto out_free;
3839
- }
3840
-#endif
3841
-
38426712 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
38436713 || connector_type == DRM_MODE_CONNECTOR_eDP)
3844
- amdgpu_dm_initialize_dp_connector(dm, aconnector);
6714
+ amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
38456715
38466716 out_free:
38476717 if (res) {
....@@ -3874,7 +6744,7 @@
38746744 struct amdgpu_encoder *aencoder,
38756745 uint32_t link_index)
38766746 {
3877
- struct amdgpu_device *adev = dev->dev_private;
6747
+ struct amdgpu_device *adev = drm_to_adev(dev);
38786748
38796749 int res = drm_encoder_init(dev,
38806750 &aencoder->base,
....@@ -3899,8 +6769,10 @@
38996769 bool enable)
39006770 {
39016771 /*
3902
- * this is not correct translation but will work as soon as VBLANK
3903
- * constant is the same as PFLIP
6772
+ * We have no guarantee that the frontend index maps to the same
6773
+ * backend index - some even map to more than one.
6774
+ *
6775
+ * TODO: Use a different interrupt or check DC itself for the mapping.
39046776 */
39056777 int irq_type =
39066778 amdgpu_display_crtc_idx_to_irq_type(
....@@ -3923,6 +6795,19 @@
39236795 }
39246796 }
39256797
6798
+static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6799
+ struct amdgpu_crtc *acrtc)
6800
+{
6801
+ int irq_type =
6802
+ amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6803
+
6804
+ /**
6805
+ * This reads the current state for the IRQ and force reapplies
6806
+ * the setting to hardware.
6807
+ */
6808
+ amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6809
+}
6810
+
39266811 static bool
39276812 is_scaling_state_different(const struct dm_connector_state *dm_state,
39286813 const struct dm_connector_state *old_dm_state)
....@@ -3941,13 +6826,53 @@
39416826 return false;
39426827 }
39436828
6829
+#ifdef CONFIG_DRM_AMD_DC_HDCP
6830
+static bool is_content_protection_different(struct drm_connector_state *state,
6831
+ const struct drm_connector_state *old_state,
6832
+ const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6833
+{
6834
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6835
+
6836
+ if (old_state->hdcp_content_type != state->hdcp_content_type &&
6837
+ state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6838
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6839
+ return true;
6840
+ }
6841
+
6842
+ /* CP is being re enabled, ignore this */
6843
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6844
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6845
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6846
+ return false;
6847
+ }
6848
+
6849
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6850
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6851
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6852
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6853
+
6854
+ /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6855
+ * hot-plug, headless s3, dpms
6856
+ */
6857
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6858
+ aconnector->dc_sink != NULL)
6859
+ return true;
6860
+
6861
+ if (old_state->content_protection == state->content_protection)
6862
+ return false;
6863
+
6864
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6865
+ return true;
6866
+
6867
+ return false;
6868
+}
6869
+
6870
+#endif
39446871 static void remove_stream(struct amdgpu_device *adev,
39456872 struct amdgpu_crtc *acrtc,
39466873 struct dc_stream_state *stream)
39476874 {
39486875 /* this is the update mode case */
3949
- if (adev->dm.freesync_module)
3950
- mod_freesync_remove_stream(adev->dm.freesync_module, stream);
39516876
39526877 acrtc->otg_inst = -1;
39536878 acrtc->enabled = false;
....@@ -3960,12 +6885,8 @@
39606885 int x, y;
39616886 int xorigin = 0, yorigin = 0;
39626887
3963
- if (!crtc || !plane->state->fb) {
3964
- position->enable = false;
3965
- position->x = 0;
3966
- position->y = 0;
6888
+ if (!crtc || !plane->state->fb)
39676889 return 0;
3968
- }
39696890
39706891 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
39716892 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
....@@ -3978,9 +6899,11 @@
39786899
39796900 x = plane->state->crtc_x;
39806901 y = plane->state->crtc_y;
3981
- /* avivo cursor are offset into the total surface */
3982
- x += crtc->primary->state->src_x >> 16;
3983
- y += crtc->primary->state->src_y >> 16;
6902
+
6903
+ if (x <= -amdgpu_crtc->max_cursor_width ||
6904
+ y <= -amdgpu_crtc->max_cursor_height)
6905
+ return 0;
6906
+
39846907 if (x < 0) {
39856908 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
39866909 x = 0;
....@@ -3990,6 +6913,7 @@
39906913 y = 0;
39916914 }
39926915 position->enable = true;
6916
+ position->translate_by_source = true;
39936917 position->x = x;
39946918 position->y = y;
39956919 position->x_hotspot = xorigin;
....@@ -4001,12 +6925,13 @@
40016925 static void handle_cursor_update(struct drm_plane *plane,
40026926 struct drm_plane_state *old_plane_state)
40036927 {
6928
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
40046929 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
40056930 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
40066931 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
40076932 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
40086933 uint64_t address = afb ? afb->address : 0;
4009
- struct dc_cursor_position position;
6934
+ struct dc_cursor_position position = {0};
40106935 struct dc_cursor_attributes attributes;
40116936 int ret;
40126937
....@@ -4025,9 +6950,12 @@
40256950
40266951 if (!position.enable) {
40276952 /* turn off cursor */
4028
- if (crtc_state && crtc_state->stream)
6953
+ if (crtc_state && crtc_state->stream) {
6954
+ mutex_lock(&adev->dm.dc_lock);
40296955 dc_stream_set_cursor_position(crtc_state->stream,
40306956 &position);
6957
+ mutex_unlock(&adev->dm.dc_lock);
6958
+ }
40316959 return;
40326960 }
40336961
....@@ -4043,9 +6971,17 @@
40436971 attributes.rotation_angle = 0;
40446972 attributes.attribute_flags.value = 0;
40456973
6974
+ /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
6975
+ * legacy gamma setup.
6976
+ */
6977
+ if (crtc_state->cm_is_degamma_srgb &&
6978
+ adev->dm.dc->caps.color.dpp.gamma_corr)
6979
+ attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
6980
+
40466981 attributes.pitch = attributes.width;
40476982
40486983 if (crtc_state->stream) {
6984
+ mutex_lock(&adev->dm.dc_lock);
40496985 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
40506986 &attributes))
40516987 DRM_ERROR("DC failed to set cursor attributes\n");
....@@ -4053,6 +6989,7 @@
40536989 if (!dc_stream_set_cursor_position(crtc_state->stream,
40546990 &position))
40556991 DRM_ERROR("DC failed to set cursor position\n");
6992
+ mutex_unlock(&adev->dm.dc_lock);
40566993 }
40576994 }
40586995
....@@ -4074,221 +7011,241 @@
40747011 acrtc->crtc_id);
40757012 }
40767013
4077
-/*
4078
- * Executes flip
4079
- *
4080
- * Waits on all BO's fences and for proper vblank count
4081
- */
4082
-static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
4083
- struct drm_framebuffer *fb,
4084
- uint32_t target,
4085
- struct dc_state *state)
7014
+static void update_freesync_state_on_stream(
7015
+ struct amdgpu_display_manager *dm,
7016
+ struct dm_crtc_state *new_crtc_state,
7017
+ struct dc_stream_state *new_stream,
7018
+ struct dc_plane_state *surface,
7019
+ u32 flip_timestamp_in_us)
40867020 {
7021
+ struct mod_vrr_params vrr_params;
7022
+ struct dc_info_packet vrr_infopacket = {0};
7023
+ struct amdgpu_device *adev = dm->adev;
7024
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
40877025 unsigned long flags;
4088
- uint32_t target_vblank;
4089
- int r, vpos, hpos;
4090
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4091
- struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
4092
- struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
4093
- struct amdgpu_device *adev = crtc->dev->dev_private;
4094
- bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
4095
- struct dc_flip_addrs addr = { {0} };
4096
- /* TODO eliminate or rename surface_update */
4097
- struct dc_surface_update surface_updates[1] = { {0} };
4098
- struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
40997026
7027
+ if (!new_stream)
7028
+ return;
41007029
4101
- /* Prepare wait for target vblank early - before the fence-waits */
4102
- target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
4103
- amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
4104
-
4105
- /* TODO This might fail and hence better not used, wait
4106
- * explicitly on fences instead
4107
- * and in general should be called for
4108
- * blocking commit to as per framework helpers
7030
+ /*
7031
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
7032
+ * For now it's sufficient to just guard against these conditions.
41097033 */
4110
- r = amdgpu_bo_reserve(abo, true);
4111
- if (unlikely(r != 0)) {
4112
- DRM_ERROR("failed to reserve buffer before flip\n");
4113
- WARN_ON(1);
7034
+
7035
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7036
+ return;
7037
+
7038
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7039
+ vrr_params = acrtc->dm_irq_params.vrr_params;
7040
+
7041
+ if (surface) {
7042
+ mod_freesync_handle_preflip(
7043
+ dm->freesync_module,
7044
+ surface,
7045
+ new_stream,
7046
+ flip_timestamp_in_us,
7047
+ &vrr_params);
7048
+
7049
+ if (adev->family < AMDGPU_FAMILY_AI &&
7050
+ amdgpu_dm_vrr_active(new_crtc_state)) {
7051
+ mod_freesync_handle_v_update(dm->freesync_module,
7052
+ new_stream, &vrr_params);
7053
+
7054
+ /* Need to call this before the frame ends. */
7055
+ dc_stream_adjust_vmin_vmax(dm->dc,
7056
+ new_crtc_state->stream,
7057
+ &vrr_params.adjust);
7058
+ }
41147059 }
41157060
4116
- /* Wait for all fences on this FB */
4117
- WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
4118
- MAX_SCHEDULE_TIMEOUT) < 0);
7061
+ mod_freesync_build_vrr_infopacket(
7062
+ dm->freesync_module,
7063
+ new_stream,
7064
+ &vrr_params,
7065
+ PACKET_TYPE_VRR,
7066
+ TRANSFER_FUNC_UNKNOWN,
7067
+ &vrr_infopacket);
41197068
4120
- amdgpu_bo_unreserve(abo);
7069
+ new_crtc_state->freesync_timing_changed |=
7070
+ (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7071
+ &vrr_params.adjust,
7072
+ sizeof(vrr_params.adjust)) != 0);
41217073
4122
- /* Wait until we're out of the vertical blank period before the one
4123
- * targeted by the flip
4124
- */
4125
- while ((acrtc->enabled &&
4126
- (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
4127
- 0, &vpos, &hpos, NULL,
4128
- NULL, &crtc->hwmode)
4129
- & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
4130
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
4131
- (int)(target_vblank -
4132
- amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
4133
- usleep_range(1000, 1100);
4134
- }
7074
+ new_crtc_state->freesync_vrr_info_changed |=
7075
+ (memcmp(&new_crtc_state->vrr_infopacket,
7076
+ &vrr_infopacket,
7077
+ sizeof(vrr_infopacket)) != 0);
41357078
4136
- /* Flip */
4137
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
7079
+ acrtc->dm_irq_params.vrr_params = vrr_params;
7080
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
41387081
4139
- WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
4140
- WARN_ON(!acrtc_state->stream);
7082
+ new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7083
+ new_stream->vrr_infopacket = vrr_infopacket;
41417084
4142
- addr.address.grph.addr.low_part = lower_32_bits(afb->address);
4143
- addr.address.grph.addr.high_part = upper_32_bits(afb->address);
4144
- addr.flip_immediate = async_flip;
7085
+ if (new_crtc_state->freesync_vrr_info_changed)
7086
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7087
+ new_crtc_state->base.crtc->base.id,
7088
+ (int)new_crtc_state->base.vrr_enabled,
7089
+ (int)vrr_params.state);
41457090
4146
-
4147
- if (acrtc->base.state->event)
4148
- prepare_flip_isr(acrtc);
4149
-
4150
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4151
-
4152
- surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
4153
- surface_updates->flip_addr = &addr;
4154
-
4155
- dc_commit_updates_for_stream(adev->dm.dc,
4156
- surface_updates,
4157
- 1,
4158
- acrtc_state->stream,
4159
- NULL,
4160
- &surface_updates->surface,
4161
- state);
4162
-
4163
- DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
4164
- __func__,
4165
- addr.address.grph.addr.high_part,
4166
- addr.address.grph.addr.low_part);
7091
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
41677092 }
41687093
4169
-/*
4170
- * TODO this whole function needs to go
4171
- *
4172
- * dc_surface_update is needlessly complex. See if we can just replace this
4173
- * with a dc_plane_state and follow the atomic model a bit more closely here.
4174
- */
4175
-static bool commit_planes_to_stream(
4176
- struct dc *dc,
4177
- struct dc_plane_state **plane_states,
4178
- uint8_t new_plane_count,
4179
- struct dm_crtc_state *dm_new_crtc_state,
4180
- struct dm_crtc_state *dm_old_crtc_state,
4181
- struct dc_state *state)
7094
+static void update_stream_irq_parameters(
7095
+ struct amdgpu_display_manager *dm,
7096
+ struct dm_crtc_state *new_crtc_state)
41827097 {
4183
- /* no need to dynamically allocate this. it's pretty small */
4184
- struct dc_surface_update updates[MAX_SURFACES];
4185
- struct dc_flip_addrs *flip_addr;
4186
- struct dc_plane_info *plane_info;
4187
- struct dc_scaling_info *scaling_info;
7098
+ struct dc_stream_state *new_stream = new_crtc_state->stream;
7099
+ struct mod_vrr_params vrr_params;
7100
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
7101
+ struct amdgpu_device *adev = dm->adev;
7102
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7103
+ unsigned long flags;
7104
+
7105
+ if (!new_stream)
7106
+ return;
7107
+
7108
+ /*
7109
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
7110
+ * For now it's sufficient to just guard against these conditions.
7111
+ */
7112
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7113
+ return;
7114
+
7115
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7116
+ vrr_params = acrtc->dm_irq_params.vrr_params;
7117
+
7118
+ if (new_crtc_state->vrr_supported &&
7119
+ config.min_refresh_in_uhz &&
7120
+ config.max_refresh_in_uhz) {
7121
+ config.state = new_crtc_state->base.vrr_enabled ?
7122
+ VRR_STATE_ACTIVE_VARIABLE :
7123
+ VRR_STATE_INACTIVE;
7124
+ } else {
7125
+ config.state = VRR_STATE_UNSUPPORTED;
7126
+ }
7127
+
7128
+ mod_freesync_build_vrr_params(dm->freesync_module,
7129
+ new_stream,
7130
+ &config, &vrr_params);
7131
+
7132
+ new_crtc_state->freesync_timing_changed |=
7133
+ (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7134
+ &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7135
+
7136
+ new_crtc_state->freesync_config = config;
7137
+ /* Copy state for access from DM IRQ handler */
7138
+ acrtc->dm_irq_params.freesync_config = config;
7139
+ acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7140
+ acrtc->dm_irq_params.vrr_params = vrr_params;
7141
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7142
+}
7143
+
7144
+static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7145
+ struct dm_crtc_state *new_state)
7146
+{
7147
+ bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7148
+ bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7149
+
7150
+ if (!old_vrr_active && new_vrr_active) {
7151
+ /* Transition VRR inactive -> active:
7152
+ * While VRR is active, we must not disable vblank irq, as a
7153
+ * reenable after disable would compute bogus vblank/pflip
7154
+ * timestamps if it likely happened inside display front-porch.
7155
+ *
7156
+ * We also need vupdate irq for the actual core vblank handling
7157
+ * at end of vblank.
7158
+ */
7159
+ dm_set_vupdate_irq(new_state->base.crtc, true);
7160
+ drm_crtc_vblank_get(new_state->base.crtc);
7161
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7162
+ __func__, new_state->base.crtc->base.id);
7163
+ } else if (old_vrr_active && !new_vrr_active) {
7164
+ /* Transition VRR active -> inactive:
7165
+ * Allow vblank irq disable again for fixed refresh rate.
7166
+ */
7167
+ dm_set_vupdate_irq(new_state->base.crtc, false);
7168
+ drm_crtc_vblank_put(new_state->base.crtc);
7169
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7170
+ __func__, new_state->base.crtc->base.id);
7171
+ }
7172
+}
7173
+
7174
+static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7175
+{
7176
+ struct drm_plane *plane;
7177
+ struct drm_plane_state *old_plane_state, *new_plane_state;
41887178 int i;
4189
- struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
4190
- struct dc_stream_update *stream_update =
4191
- kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
41927179
4193
- if (!stream_update) {
4194
- BREAK_TO_DEBUGGER();
4195
- return false;
4196
- }
4197
-
4198
- flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
4199
- GFP_KERNEL);
4200
- plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
4201
- GFP_KERNEL);
4202
- scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
4203
- GFP_KERNEL);
4204
-
4205
- if (!flip_addr || !plane_info || !scaling_info) {
4206
- kfree(flip_addr);
4207
- kfree(plane_info);
4208
- kfree(scaling_info);
4209
- kfree(stream_update);
4210
- return false;
4211
- }
4212
-
4213
- memset(updates, 0, sizeof(updates));
4214
-
4215
- stream_update->src = dc_stream->src;
4216
- stream_update->dst = dc_stream->dst;
4217
- stream_update->out_transfer_func = dc_stream->out_transfer_func;
4218
-
4219
- for (i = 0; i < new_plane_count; i++) {
4220
- updates[i].surface = plane_states[i];
4221
- updates[i].gamma =
4222
- (struct dc_gamma *)plane_states[i]->gamma_correction;
4223
- updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4224
- flip_addr[i].address = plane_states[i]->address;
4225
- flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4226
- plane_info[i].color_space = plane_states[i]->color_space;
4227
- plane_info[i].format = plane_states[i]->format;
4228
- plane_info[i].plane_size = plane_states[i]->plane_size;
4229
- plane_info[i].rotation = plane_states[i]->rotation;
4230
- plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4231
- plane_info[i].stereo_format = plane_states[i]->stereo_format;
4232
- plane_info[i].tiling_info = plane_states[i]->tiling_info;
4233
- plane_info[i].visible = plane_states[i]->visible;
4234
- plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4235
- plane_info[i].dcc = plane_states[i]->dcc;
4236
- scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4237
- scaling_info[i].src_rect = plane_states[i]->src_rect;
4238
- scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4239
- scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4240
-
4241
- updates[i].flip_addr = &flip_addr[i];
4242
- updates[i].plane_info = &plane_info[i];
4243
- updates[i].scaling_info = &scaling_info[i];
4244
- }
4245
-
4246
- dc_commit_updates_for_stream(
4247
- dc,
4248
- updates,
4249
- new_plane_count,
4250
- dc_stream, stream_update, plane_states, state);
4251
-
4252
- kfree(flip_addr);
4253
- kfree(plane_info);
4254
- kfree(scaling_info);
4255
- kfree(stream_update);
4256
- return true;
7180
+ /*
7181
+ * TODO: Make this per-stream so we don't issue redundant updates for
7182
+ * commits with multiple streams.
7183
+ */
7184
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7185
+ new_plane_state, i)
7186
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
7187
+ handle_cursor_update(plane, old_plane_state);
42577188 }
42587189
42597190 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7191
+ struct dc_state *dc_state,
42607192 struct drm_device *dev,
42617193 struct amdgpu_display_manager *dm,
42627194 struct drm_crtc *pcrtc,
4263
- bool *wait_for_vblank)
7195
+ bool wait_for_vblank)
42647196 {
42657197 uint32_t i;
7198
+ uint64_t timestamp_ns;
42667199 struct drm_plane *plane;
42677200 struct drm_plane_state *old_plane_state, *new_plane_state;
4268
- struct dc_stream_state *dc_stream_attach;
4269
- struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
42707201 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
42717202 struct drm_crtc_state *new_pcrtc_state =
42727203 drm_atomic_get_new_crtc_state(state, pcrtc);
42737204 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
42747205 struct dm_crtc_state *dm_old_crtc_state =
42757206 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4276
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4277
- int planes_count = 0;
7207
+ int planes_count = 0, vpos, hpos;
7208
+ long r;
42787209 unsigned long flags;
7210
+ struct amdgpu_bo *abo;
7211
+ uint32_t target_vblank, last_flip_vblank;
7212
+ bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7213
+ bool pflip_present = false;
7214
+ struct {
7215
+ struct dc_surface_update surface_updates[MAX_SURFACES];
7216
+ struct dc_plane_info plane_infos[MAX_SURFACES];
7217
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
7218
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7219
+ struct dc_stream_update stream_update;
7220
+ } *bundle;
7221
+
7222
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7223
+
7224
+ if (!bundle) {
7225
+ dm_error("Failed to allocate update bundle\n");
7226
+ goto cleanup;
7227
+ }
7228
+
7229
+ /*
7230
+ * Disable the cursor first if we're disabling all the planes.
7231
+ * It'll remain on the screen after the planes are re-enabled
7232
+ * if we don't.
7233
+ */
7234
+ if (acrtc_state->active_planes == 0)
7235
+ amdgpu_dm_commit_cursors(state);
42797236
42807237 /* update planes when needed */
42817238 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
42827239 struct drm_crtc *crtc = new_plane_state->crtc;
42837240 struct drm_crtc_state *new_crtc_state;
42847241 struct drm_framebuffer *fb = new_plane_state->fb;
4285
- bool pflip_needed;
7242
+ bool plane_needs_flip;
7243
+ struct dc_plane_state *dc_plane;
42867244 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
42877245
4288
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
4289
- handle_cursor_update(plane, old_plane_state);
7246
+ /* Cursor plane is handled after stream updates */
7247
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
42907248 continue;
4291
- }
42927249
42937250 if (!fb || !crtc || pcrtc != crtc)
42947251 continue;
....@@ -4297,74 +7254,335 @@
42977254 if (!new_crtc_state->active)
42987255 continue;
42997256
4300
- pflip_needed = !state->allow_modeset;
7257
+ dc_plane = dm_new_plane_state->dc_state;
7258
+ if (!dc_plane)
7259
+ continue;
43017260
4302
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
4303
- if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
4304
- DRM_ERROR("%s: acrtc %d, already busy\n",
4305
- __func__,
4306
- acrtc_attach->crtc_id);
4307
- /* In commit tail framework this cannot happen */
4308
- WARN_ON(1);
7261
+ bundle->surface_updates[planes_count].surface = dc_plane;
7262
+ if (new_pcrtc_state->color_mgmt_changed) {
7263
+ bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7264
+ bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7265
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
43097266 }
4310
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
43117267
4312
- if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
4313
- WARN_ON(!dm_new_plane_state->dc_state);
7268
+ fill_dc_scaling_info(new_plane_state,
7269
+ &bundle->scaling_infos[planes_count]);
43147270
4315
- plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
7271
+ bundle->surface_updates[planes_count].scaling_info =
7272
+ &bundle->scaling_infos[planes_count];
43167273
4317
- dc_stream_attach = acrtc_state->stream;
4318
- planes_count++;
7274
+ plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
43197275
4320
- } else if (new_crtc_state->planes_changed) {
4321
- /* Assume even ONE crtc with immediate flip means
4322
- * entire can't wait for VBLANK
4323
- * TODO Check if it's correct
4324
- */
4325
- *wait_for_vblank =
4326
- new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
4327
- false : true;
7276
+ pflip_present = pflip_present || plane_needs_flip;
43287277
4329
- /* TODO: Needs rework for multiplane flip */
4330
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
4331
- drm_crtc_vblank_get(crtc);
4332
-
4333
- amdgpu_dm_do_flip(
4334
- crtc,
4335
- fb,
4336
- (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
4337
- dm_state->context);
7278
+ if (!plane_needs_flip) {
7279
+ planes_count += 1;
7280
+ continue;
43387281 }
7282
+
7283
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
7284
+
7285
+ /*
7286
+ * Wait for all fences on this FB. Do limited wait to avoid
7287
+ * deadlock during GPU reset when this fence will not signal
7288
+ * but we hold reservation lock for the BO.
7289
+ */
7290
+ r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7291
+ false,
7292
+ msecs_to_jiffies(5000));
7293
+ if (unlikely(r <= 0))
7294
+ DRM_ERROR("Waiting for fences timed out!");
7295
+
7296
+ fill_dc_plane_info_and_addr(
7297
+ dm->adev, new_plane_state,
7298
+ dm_new_plane_state->tiling_flags,
7299
+ &bundle->plane_infos[planes_count],
7300
+ &bundle->flip_addrs[planes_count].address,
7301
+ dm_new_plane_state->tmz_surface, false);
7302
+
7303
+ DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7304
+ new_plane_state->plane->index,
7305
+ bundle->plane_infos[planes_count].dcc.enable);
7306
+
7307
+ bundle->surface_updates[planes_count].plane_info =
7308
+ &bundle->plane_infos[planes_count];
7309
+
7310
+ /*
7311
+ * Only allow immediate flips for fast updates that don't
7312
+ * change FB pitch, DCC state, rotation or mirroing.
7313
+ */
7314
+ bundle->flip_addrs[planes_count].flip_immediate =
7315
+ crtc->state->async_flip &&
7316
+ acrtc_state->update_type == UPDATE_TYPE_FAST;
7317
+
7318
+ timestamp_ns = ktime_get_ns();
7319
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7320
+ bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7321
+ bundle->surface_updates[planes_count].surface = dc_plane;
7322
+
7323
+ if (!bundle->surface_updates[planes_count].surface) {
7324
+ DRM_ERROR("No surface for CRTC: id=%d\n",
7325
+ acrtc_attach->crtc_id);
7326
+ continue;
7327
+ }
7328
+
7329
+ if (plane == pcrtc->primary)
7330
+ update_freesync_state_on_stream(
7331
+ dm,
7332
+ acrtc_state,
7333
+ acrtc_state->stream,
7334
+ dc_plane,
7335
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7336
+
7337
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7338
+ __func__,
7339
+ bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7340
+ bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7341
+
7342
+ planes_count += 1;
43397343
43407344 }
43417345
4342
- if (planes_count) {
4343
- unsigned long flags;
4344
-
4345
- if (new_pcrtc_state->event) {
4346
-
4347
- drm_crtc_vblank_get(pcrtc);
4348
-
7346
+ if (pflip_present) {
7347
+ if (!vrr_active) {
7348
+ /* Use old throttling in non-vrr fixed refresh rate mode
7349
+ * to keep flip scheduling based on target vblank counts
7350
+ * working in a backwards compatible way, e.g., for
7351
+ * clients using the GLX_OML_sync_control extension or
7352
+ * DRI3/Present extension with defined target_msc.
7353
+ */
7354
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7355
+ }
7356
+ else {
7357
+ /* For variable refresh rate mode only:
7358
+ * Get vblank of last completed flip to avoid > 1 vrr
7359
+ * flips per video frame by use of throttling, but allow
7360
+ * flip programming anywhere in the possibly large
7361
+ * variable vrr vblank interval for fine-grained flip
7362
+ * timing control and more opportunity to avoid stutter
7363
+ * on late submission of flips.
7364
+ */
43497365 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4350
- prepare_flip_isr(acrtc_attach);
7366
+ last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
43517367 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
43527368 }
43537369
7370
+ target_vblank = last_flip_vblank + wait_for_vblank;
43547371
4355
- if (false == commit_planes_to_stream(dm->dc,
4356
- plane_states_constructed,
4357
- planes_count,
4358
- acrtc_state,
4359
- dm_old_crtc_state,
4360
- dm_state->context))
4361
- dm_error("%s: Failed to attach plane!\n", __func__);
4362
- } else {
4363
- /*TODO BUG Here should go disable planes on CRTC. */
7372
+ /*
7373
+ * Wait until we're out of the vertical blank period before the one
7374
+ * targeted by the flip
7375
+ */
7376
+ while ((acrtc_attach->enabled &&
7377
+ (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7378
+ 0, &vpos, &hpos, NULL,
7379
+ NULL, &pcrtc->hwmode)
7380
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7381
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7382
+ (int)(target_vblank -
7383
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7384
+ usleep_range(1000, 1100);
7385
+ }
7386
+
7387
+ /**
7388
+ * Prepare the flip event for the pageflip interrupt to handle.
7389
+ *
7390
+ * This only works in the case where we've already turned on the
7391
+ * appropriate hardware blocks (eg. HUBP) so in the transition case
7392
+ * from 0 -> n planes we have to skip a hardware generated event
7393
+ * and rely on sending it from software.
7394
+ */
7395
+ if (acrtc_attach->base.state->event &&
7396
+ acrtc_state->active_planes > 0) {
7397
+ drm_crtc_vblank_get(pcrtc);
7398
+
7399
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7400
+
7401
+ WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7402
+ prepare_flip_isr(acrtc_attach);
7403
+
7404
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7405
+ }
7406
+
7407
+ if (acrtc_state->stream) {
7408
+ if (acrtc_state->freesync_vrr_info_changed)
7409
+ bundle->stream_update.vrr_infopacket =
7410
+ &acrtc_state->stream->vrr_infopacket;
7411
+ }
7412
+ }
7413
+
7414
+ /* Update the planes if changed or disable if we don't have any. */
7415
+ if ((planes_count || acrtc_state->active_planes == 0) &&
7416
+ acrtc_state->stream) {
7417
+ bundle->stream_update.stream = acrtc_state->stream;
7418
+ if (new_pcrtc_state->mode_changed) {
7419
+ bundle->stream_update.src = acrtc_state->stream->src;
7420
+ bundle->stream_update.dst = acrtc_state->stream->dst;
7421
+ }
7422
+
7423
+ if (new_pcrtc_state->color_mgmt_changed) {
7424
+ /*
7425
+ * TODO: This isn't fully correct since we've actually
7426
+ * already modified the stream in place.
7427
+ */
7428
+ bundle->stream_update.gamut_remap =
7429
+ &acrtc_state->stream->gamut_remap_matrix;
7430
+ bundle->stream_update.output_csc_transform =
7431
+ &acrtc_state->stream->csc_color_matrix;
7432
+ bundle->stream_update.out_transfer_func =
7433
+ acrtc_state->stream->out_transfer_func;
7434
+ }
7435
+
7436
+ acrtc_state->stream->abm_level = acrtc_state->abm_level;
7437
+ if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7438
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
7439
+
7440
+ mutex_lock(&dm->dc_lock);
7441
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7442
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
7443
+ amdgpu_dm_psr_disable(acrtc_state->stream);
7444
+ mutex_unlock(&dm->dc_lock);
7445
+
7446
+ /*
7447
+ * If FreeSync state on the stream has changed then we need to
7448
+ * re-adjust the min/max bounds now that DC doesn't handle this
7449
+ * as part of commit.
7450
+ */
7451
+ if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7452
+ amdgpu_dm_vrr_active(acrtc_state)) {
7453
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7454
+ dc_stream_adjust_vmin_vmax(
7455
+ dm->dc, acrtc_state->stream,
7456
+ &acrtc_attach->dm_irq_params.vrr_params.adjust);
7457
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7458
+ }
7459
+ mutex_lock(&dm->dc_lock);
7460
+
7461
+ dc_commit_updates_for_stream(dm->dc,
7462
+ bundle->surface_updates,
7463
+ planes_count,
7464
+ acrtc_state->stream,
7465
+ &bundle->stream_update,
7466
+ dc_state);
7467
+
7468
+ /**
7469
+ * Enable or disable the interrupts on the backend.
7470
+ *
7471
+ * Most pipes are put into power gating when unused.
7472
+ *
7473
+ * When power gating is enabled on a pipe we lose the
7474
+ * interrupt enablement state when power gating is disabled.
7475
+ *
7476
+ * So we need to update the IRQ control state in hardware
7477
+ * whenever the pipe turns on (since it could be previously
7478
+ * power gated) or off (since some pipes can't be power gated
7479
+ * on some ASICs).
7480
+ */
7481
+ if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7482
+ dm_update_pflip_irq_state(drm_to_adev(dev),
7483
+ acrtc_attach);
7484
+
7485
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7486
+ acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7487
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7488
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
7489
+ else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7490
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7491
+ !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7492
+ amdgpu_dm_psr_enable(acrtc_state->stream);
7493
+ }
7494
+
7495
+ mutex_unlock(&dm->dc_lock);
7496
+ }
7497
+
7498
+ /*
7499
+ * Update cursor state *after* programming all the planes.
7500
+ * This avoids redundant programming in the case where we're going
7501
+ * to be disabling a single plane - those pipes are being disabled.
7502
+ */
7503
+ if (acrtc_state->active_planes)
7504
+ amdgpu_dm_commit_cursors(state);
7505
+
7506
+cleanup:
7507
+ kfree(bundle);
7508
+}
7509
+
7510
+static void amdgpu_dm_commit_audio(struct drm_device *dev,
7511
+ struct drm_atomic_state *state)
7512
+{
7513
+ struct amdgpu_device *adev = drm_to_adev(dev);
7514
+ struct amdgpu_dm_connector *aconnector;
7515
+ struct drm_connector *connector;
7516
+ struct drm_connector_state *old_con_state, *new_con_state;
7517
+ struct drm_crtc_state *new_crtc_state;
7518
+ struct dm_crtc_state *new_dm_crtc_state;
7519
+ const struct dc_stream_status *status;
7520
+ int i, inst;
7521
+
7522
+ /* Notify device removals. */
7523
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7524
+ if (old_con_state->crtc != new_con_state->crtc) {
7525
+ /* CRTC changes require notification. */
7526
+ goto notify;
7527
+ }
7528
+
7529
+ if (!new_con_state->crtc)
7530
+ continue;
7531
+
7532
+ new_crtc_state = drm_atomic_get_new_crtc_state(
7533
+ state, new_con_state->crtc);
7534
+
7535
+ if (!new_crtc_state)
7536
+ continue;
7537
+
7538
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7539
+ continue;
7540
+
7541
+ notify:
7542
+ aconnector = to_amdgpu_dm_connector(connector);
7543
+
7544
+ mutex_lock(&adev->dm.audio_lock);
7545
+ inst = aconnector->audio_inst;
7546
+ aconnector->audio_inst = -1;
7547
+ mutex_unlock(&adev->dm.audio_lock);
7548
+
7549
+ amdgpu_dm_audio_eld_notify(adev, inst);
7550
+ }
7551
+
7552
+ /* Notify audio device additions. */
7553
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
7554
+ if (!new_con_state->crtc)
7555
+ continue;
7556
+
7557
+ new_crtc_state = drm_atomic_get_new_crtc_state(
7558
+ state, new_con_state->crtc);
7559
+
7560
+ if (!new_crtc_state)
7561
+ continue;
7562
+
7563
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7564
+ continue;
7565
+
7566
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7567
+ if (!new_dm_crtc_state->stream)
7568
+ continue;
7569
+
7570
+ status = dc_stream_get_status(new_dm_crtc_state->stream);
7571
+ if (!status)
7572
+ continue;
7573
+
7574
+ aconnector = to_amdgpu_dm_connector(connector);
7575
+
7576
+ mutex_lock(&adev->dm.audio_lock);
7577
+ inst = status->audio_inst;
7578
+ aconnector->audio_inst = inst;
7579
+ mutex_unlock(&adev->dm.audio_lock);
7580
+
7581
+ amdgpu_dm_audio_eld_notify(adev, inst);
43647582 }
43657583 }
43667584
4367
-/**
7585
+/*
43687586 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
43697587 * @crtc_state: the DRM CRTC state
43707588 * @stream_state: the DC stream state.
....@@ -4382,51 +7600,31 @@
43827600 struct drm_atomic_state *state,
43837601 bool nonblock)
43847602 {
4385
- struct drm_crtc *crtc;
4386
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4387
- struct amdgpu_device *adev = dev->dev_private;
4388
- int i;
4389
-
43907603 /*
4391
- * We evade vblanks and pflips on crtc that
4392
- * should be changed. We do it here to flush & disable
4393
- * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4394
- * it will update crtc->dm_crtc_state->stream pointer which is used in
4395
- * the ISRs.
7604
+ * Add check here for SoC's that support hardware cursor plane, to
7605
+ * unset legacy_cursor_update
43967606 */
4397
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4398
- struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4399
- struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4400
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4401
-
4402
- if (drm_atomic_crtc_needs_modeset(new_crtc_state)
4403
- && dm_old_crtc_state->stream) {
4404
- /*
4405
- * CRC capture was enabled but not disabled.
4406
- * Release the vblank reference.
4407
- */
4408
- if (dm_new_crtc_state->crc_enabled) {
4409
- drm_crtc_vblank_put(crtc);
4410
- dm_new_crtc_state->crc_enabled = false;
4411
- }
4412
-
4413
- manage_dm_interrupts(adev, acrtc, false);
4414
- }
4415
- }
4416
- /* Add check here for SoC's that support hardware cursor plane, to
4417
- * unset legacy_cursor_update */
44187607
44197608 return drm_atomic_helper_commit(dev, state, nonblock);
44207609
44217610 /*TODO Handle EINTR, reenable IRQ*/
44227611 }
44237612
7613
+/**
7614
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7615
+ * @state: The atomic state to commit
7616
+ *
7617
+ * This will tell DC to commit the constructed DC state from atomic_check,
7618
+ * programming the hardware. Any failures here implies a hardware failure, since
7619
+ * atomic check should have filtered anything non-kosher.
7620
+ */
44247621 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
44257622 {
44267623 struct drm_device *dev = state->dev;
4427
- struct amdgpu_device *adev = dev->dev_private;
7624
+ struct amdgpu_device *adev = drm_to_adev(dev);
44287625 struct amdgpu_display_manager *dm = &adev->dm;
44297626 struct dm_atomic_state *dm_state;
7627
+ struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
44307628 uint32_t i, j;
44317629 struct drm_crtc *crtc;
44327630 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
....@@ -4436,10 +7634,36 @@
44367634 struct drm_connector_state *old_con_state, *new_con_state;
44377635 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
44387636 int crtc_disable_count = 0;
7637
+ bool mode_set_reset_required = false;
44397638
44407639 drm_atomic_helper_update_legacy_modeset_state(dev, state);
44417640
4442
- dm_state = to_dm_atomic_state(state);
7641
+ dm_state = dm_atomic_get_new_state(state);
7642
+ if (dm_state && dm_state->context) {
7643
+ dc_state = dm_state->context;
7644
+ } else {
7645
+ /* No state changes, retain current state. */
7646
+ dc_state_temp = dc_create_state(dm->dc);
7647
+ ASSERT(dc_state_temp);
7648
+ dc_state = dc_state_temp;
7649
+ dc_resource_state_copy_construct_current(dm->dc, dc_state);
7650
+ }
7651
+
7652
+ for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7653
+ new_crtc_state, i) {
7654
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7655
+
7656
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7657
+
7658
+ if (old_crtc_state->active &&
7659
+ (!new_crtc_state->active ||
7660
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7661
+ manage_dm_interrupts(adev, acrtc, false);
7662
+ dc_stream_release(dm_old_crtc_state->stream);
7663
+ }
7664
+ }
7665
+
7666
+ drm_atomic_helper_calc_timestamping_constants(state);
44437667
44447668 /* update changed items */
44457669 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
....@@ -4479,8 +7703,8 @@
44797703 * this could happen because of issues with
44807704 * userspace notifications delivery.
44817705 * In this case userspace tries to set mode on
4482
- * display which is disconnect in fact.
4483
- * dc_sink in NULL in this case on aconnector.
7706
+ * display which is disconnected in fact.
7707
+ * dc_sink is NULL in this case on aconnector.
44847708 * We expect reset mode will come soon.
44857709 *
44867710 * This can also happen when unplug is done
....@@ -4503,74 +7727,25 @@
45037727 acrtc->enabled = true;
45047728 acrtc->hw_mode = new_crtc_state->mode;
45057729 crtc->hwmode = new_crtc_state->mode;
7730
+ mode_set_reset_required = true;
45067731 } else if (modereset_required(new_crtc_state)) {
45077732 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4508
-
45097733 /* i.e. reset mode */
45107734 if (dm_old_crtc_state->stream)
45117735 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7736
+ mode_set_reset_required = true;
45127737 }
45137738 } /* for_each_crtc_in_state() */
45147739
4515
- /*
4516
- * Add streams after required streams from new and replaced streams
4517
- * are removed from freesync module
4518
- */
4519
- if (adev->dm.freesync_module) {
4520
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4521
- new_crtc_state, i) {
4522
- struct amdgpu_dm_connector *aconnector = NULL;
4523
- struct dm_connector_state *dm_new_con_state = NULL;
4524
- struct amdgpu_crtc *acrtc = NULL;
4525
- bool modeset_needed;
7740
+ if (dc_state) {
7741
+ /* if there mode set or reset, disable eDP PSR */
7742
+ if (mode_set_reset_required)
7743
+ amdgpu_dm_psr_disable_all(dm);
45267744
4527
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4528
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4529
- modeset_needed = modeset_required(
4530
- new_crtc_state,
4531
- dm_new_crtc_state->stream,
4532
- dm_old_crtc_state->stream);
4533
- /* We add stream to freesync if:
4534
- * 1. Said stream is not null, and
4535
- * 2. A modeset is requested. This means that the
4536
- * stream was removed previously, and needs to be
4537
- * replaced.
4538
- */
4539
- if (dm_new_crtc_state->stream == NULL ||
4540
- !modeset_needed)
4541
- continue;
4542
-
4543
- acrtc = to_amdgpu_crtc(crtc);
4544
-
4545
- aconnector =
4546
- amdgpu_dm_find_first_crtc_matching_connector(
4547
- state, crtc);
4548
- if (!aconnector) {
4549
- DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4550
- "find connector for acrtc "
4551
- "id:%d skipping freesync "
4552
- "init\n",
4553
- acrtc->crtc_id);
4554
- continue;
4555
- }
4556
-
4557
- mod_freesync_add_stream(adev->dm.freesync_module,
4558
- dm_new_crtc_state->stream,
4559
- &aconnector->caps);
4560
- new_con_state = drm_atomic_get_new_connector_state(
4561
- state, &aconnector->base);
4562
- dm_new_con_state = to_dm_connector_state(new_con_state);
4563
-
4564
- mod_freesync_set_user_enable(adev->dm.freesync_module,
4565
- &dm_new_crtc_state->stream,
4566
- 1,
4567
- &dm_new_con_state->user_enable);
4568
- }
4569
- }
4570
-
4571
- if (dm_state->context) {
4572
- dm_enable_per_frame_crtc_master_sync(dm_state->context);
4573
- WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
7745
+ dm_enable_per_frame_crtc_master_sync(dc_state);
7746
+ mutex_lock(&dm->dc_lock);
7747
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
7748
+ mutex_unlock(&dm->dc_lock);
45747749 }
45757750
45767751 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
....@@ -4583,18 +7758,56 @@
45837758 dc_stream_get_status(dm_new_crtc_state->stream);
45847759
45857760 if (!status)
7761
+ status = dc_stream_get_status_from_state(dc_state,
7762
+ dm_new_crtc_state->stream);
7763
+ if (!status)
45867764 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
45877765 else
45887766 acrtc->otg_inst = status->primary_otg_inst;
45897767 }
45907768 }
7769
+#ifdef CONFIG_DRM_AMD_DC_HDCP
7770
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7771
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7772
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7773
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
45917774
4592
- /* Handle scaling and underscan changes*/
7775
+ new_crtc_state = NULL;
7776
+
7777
+ if (acrtc)
7778
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7779
+
7780
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7781
+
7782
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7783
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7784
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7785
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7786
+ continue;
7787
+ }
7788
+
7789
+ if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7790
+ hdcp_update_display(
7791
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7792
+ new_con_state->hdcp_content_type,
7793
+ new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7794
+ : false);
7795
+ }
7796
+#endif
7797
+
7798
+ /* Handle connector state changes */
45937799 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
45947800 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
45957801 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
45967802 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7803
+ struct dc_surface_update dummy_updates[MAX_SURFACES];
7804
+ struct dc_stream_update stream_update;
7805
+ struct dc_info_packet hdr_packet;
45977806 struct dc_stream_status *status = NULL;
7807
+ bool abm_changed, hdr_changed, scaling_changed;
7808
+
7809
+ memset(&dummy_updates, 0, sizeof(dummy_updates));
7810
+ memset(&stream_update, 0, sizeof(stream_update));
45987811
45997812 if (acrtc) {
46007813 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
....@@ -4605,76 +7818,136 @@
46057818 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
46067819 continue;
46077820
4608
- /* Skip any thing not scale or underscan changes */
4609
- if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4610
- continue;
4611
-
46127821 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7822
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
46137823
4614
- update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4615
- dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
7824
+ scaling_changed = is_scaling_state_different(dm_new_con_state,
7825
+ dm_old_con_state);
46167826
4617
- if (!dm_new_crtc_state->stream)
7827
+ abm_changed = dm_new_crtc_state->abm_level !=
7828
+ dm_old_crtc_state->abm_level;
7829
+
7830
+ hdr_changed =
7831
+ is_hdr_metadata_different(old_con_state, new_con_state);
7832
+
7833
+ if (!scaling_changed && !abm_changed && !hdr_changed)
46187834 continue;
7835
+
7836
+ stream_update.stream = dm_new_crtc_state->stream;
7837
+ if (scaling_changed) {
7838
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7839
+ dm_new_con_state, dm_new_crtc_state->stream);
7840
+
7841
+ stream_update.src = dm_new_crtc_state->stream->src;
7842
+ stream_update.dst = dm_new_crtc_state->stream->dst;
7843
+ }
7844
+
7845
+ if (abm_changed) {
7846
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7847
+
7848
+ stream_update.abm_level = &dm_new_crtc_state->abm_level;
7849
+ }
7850
+
7851
+ if (hdr_changed) {
7852
+ fill_hdr_info_packet(new_con_state, &hdr_packet);
7853
+ stream_update.hdr_static_metadata = &hdr_packet;
7854
+ }
46197855
46207856 status = dc_stream_get_status(dm_new_crtc_state->stream);
46217857 WARN_ON(!status);
46227858 WARN_ON(!status->plane_count);
46237859
4624
- /*TODO How it works with MPO ?*/
4625
- if (!commit_planes_to_stream(
4626
- dm->dc,
4627
- status->plane_states,
4628
- status->plane_count,
4629
- dm_new_crtc_state,
4630
- to_dm_crtc_state(old_crtc_state),
4631
- dm_state->context))
4632
- dm_error("%s: Failed to update stream scaling!\n", __func__);
7860
+ /*
7861
+ * TODO: DC refuses to perform stream updates without a dc_surface_update.
7862
+ * Here we create an empty update on each plane.
7863
+ * To fix this, DC should permit updating only stream properties.
7864
+ */
7865
+ for (j = 0; j < status->plane_count; j++)
7866
+ dummy_updates[j].surface = status->plane_states[0];
7867
+
7868
+
7869
+ mutex_lock(&dm->dc_lock);
7870
+ dc_commit_updates_for_stream(dm->dc,
7871
+ dummy_updates,
7872
+ status->plane_count,
7873
+ dm_new_crtc_state->stream,
7874
+ &stream_update,
7875
+ dc_state);
7876
+ mutex_unlock(&dm->dc_lock);
46337877 }
46347878
7879
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
46357880 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4636
- new_crtc_state, i) {
4637
- /*
4638
- * loop to enable interrupts on newly arrived crtc
4639
- */
4640
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4641
- bool modeset_needed;
4642
-
7881
+ new_crtc_state, i) {
46437882 if (old_crtc_state->active && !new_crtc_state->active)
46447883 crtc_disable_count++;
46457884
46467885 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
46477886 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4648
- modeset_needed = modeset_required(
4649
- new_crtc_state,
4650
- dm_new_crtc_state->stream,
4651
- dm_old_crtc_state->stream);
46527887
4653
- if (dm_new_crtc_state->stream == NULL || !modeset_needed)
4654
- continue;
7888
+ /* For freesync config update on crtc state and params for irq */
7889
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
46557890
4656
- if (adev->dm.freesync_module)
4657
- mod_freesync_notify_mode_change(
4658
- adev->dm.freesync_module,
4659
- &dm_new_crtc_state->stream, 1);
4660
-
4661
- manage_dm_interrupts(adev, acrtc, true);
7891
+ /* Handle vrr on->off / off->on transitions */
7892
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7893
+ dm_new_crtc_state);
46627894 }
7895
+
7896
+ /**
7897
+ * Enable interrupts for CRTCs that are newly enabled or went through
7898
+ * a modeset. It was intentionally deferred until after the front end
7899
+ * state was modified to wait until the OTG was on and so the IRQ
7900
+ * handlers didn't access stale or invalid state.
7901
+ */
7902
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7903
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7904
+
7905
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7906
+
7907
+ if (new_crtc_state->active &&
7908
+ (!old_crtc_state->active ||
7909
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7910
+ dc_stream_retain(dm_new_crtc_state->stream);
7911
+ acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7912
+ manage_dm_interrupts(adev, acrtc, true);
7913
+
7914
+#ifdef CONFIG_DEBUG_FS
7915
+ /**
7916
+ * Frontend may have changed so reapply the CRC capture
7917
+ * settings for the stream.
7918
+ */
7919
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7920
+
7921
+ if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7922
+ amdgpu_dm_crtc_configure_crc_source(
7923
+ crtc, dm_new_crtc_state,
7924
+ dm_new_crtc_state->crc_src);
7925
+ }
7926
+#endif
7927
+ }
7928
+ }
7929
+
7930
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7931
+ if (new_crtc_state->async_flip)
7932
+ wait_for_vblank = false;
46637933
46647934 /* update planes when needed per crtc*/
46657935 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
46667936 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
46677937
46687938 if (dm_new_crtc_state->stream)
4669
- amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
7939
+ amdgpu_dm_commit_planes(state, dc_state, dev,
7940
+ dm, crtc, wait_for_vblank);
46707941 }
46717942
7943
+ /* Update audio instances for each connector. */
7944
+ amdgpu_dm_commit_audio(dev, state);
46727945
46737946 /*
46747947 * send vblank event on all events not handled in flip and
46757948 * mark consumed event for drm_atomic_helper_commit_hw_done
46767949 */
4677
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
7950
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
46787951 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
46797952
46807953 if (new_crtc_state->event)
....@@ -4682,29 +7955,27 @@
46827955
46837956 new_crtc_state->event = NULL;
46847957 }
4685
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7958
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
46867959
7960
+ /* Signal HW programming completion */
7961
+ drm_atomic_helper_commit_hw_done(state);
46877962
46887963 if (wait_for_vblank)
46897964 drm_atomic_helper_wait_for_flip_done(dev, state);
46907965
4691
- /*
4692
- * FIXME:
4693
- * Delay hw_done() until flip_done() is signaled. This is to block
4694
- * another commit from freeing the CRTC state while we're still
4695
- * waiting on flip_done.
4696
- */
4697
- drm_atomic_helper_commit_hw_done(state);
4698
-
46997966 drm_atomic_helper_cleanup_planes(dev, state);
47007967
4701
- /* Finally, drop a runtime PM reference for each newly disabled CRTC,
7968
+ /*
7969
+ * Finally, drop a runtime PM reference for each newly disabled CRTC,
47027970 * so we can put the GPU into runtime suspend if we're not driving any
47037971 * displays anymore
47047972 */
47057973 for (i = 0; i < crtc_disable_count; i++)
47067974 pm_runtime_put_autosuspend(dev->dev);
47077975 pm_runtime_mark_last_busy(dev->dev);
7976
+
7977
+ if (dc_state_temp)
7978
+ dc_release_state(dc_state_temp);
47087979 }
47097980
47107981
....@@ -4764,9 +8035,9 @@
47648035 }
47658036
47668037 /*
4767
- * This functions handle all cases when set mode does not come upon hotplug.
4768
- * This include when the same display is unplugged then plugged back into the
4769
- * same port and when we are running without usermode desktop manager supprot
8038
+ * This function handles all cases when set mode does not come upon hotplug.
8039
+ * This includes when a display is unplugged then plugged back into the
8040
+ * same port and when running without usermode desktop manager supprot
47708041 */
47718042 void dm_restore_drm_connector_state(struct drm_device *dev,
47728043 struct drm_connector *connector)
....@@ -4795,7 +8066,7 @@
47958066 dm_force_atomic_commit(&aconnector->base);
47968067 }
47978068
4798
-/*`
8069
+/*
47998070 * Grabs all modesetting locks to serialize against any blocking commits,
48008071 * Waits for completion of all non blocking commits.
48018072 */
....@@ -4806,7 +8077,8 @@
48068077 struct drm_crtc_commit *commit;
48078078 long ret;
48088079
4809
- /* Adding all modeset locks to aquire_ctx will
8080
+ /*
8081
+ * Adding all modeset locks to aquire_ctx will
48108082 * ensure that when the framework release it the
48118083 * extra locks we are locking here will get released to
48128084 */
....@@ -4825,7 +8097,8 @@
48258097 if (!commit)
48268098 continue;
48278099
4828
- /* Make sure all pending HW programming completed and
8100
+ /*
8101
+ * Make sure all pending HW programming completed and
48298102 * page flips done
48308103 */
48318104 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
....@@ -4844,197 +8117,273 @@
48448117 return ret < 0 ? ret : 0;
48458118 }
48468119
4847
-static int dm_update_crtcs_state(struct dc *dc,
4848
- struct drm_atomic_state *state,
4849
- bool enable,
4850
- bool *lock_and_validation_needed)
8120
+static void get_freesync_config_for_crtc(
8121
+ struct dm_crtc_state *new_crtc_state,
8122
+ struct dm_connector_state *new_con_state)
48518123 {
4852
- struct drm_crtc *crtc;
4853
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4854
- int i;
8124
+ struct mod_freesync_config config = {0};
8125
+ struct amdgpu_dm_connector *aconnector =
8126
+ to_amdgpu_dm_connector(new_con_state->base.connector);
8127
+ struct drm_display_mode *mode = &new_crtc_state->base.mode;
8128
+ int vrefresh = drm_mode_vrefresh(mode);
8129
+
8130
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8131
+ vrefresh >= aconnector->min_vfreq &&
8132
+ vrefresh <= aconnector->max_vfreq;
8133
+
8134
+ if (new_crtc_state->vrr_supported) {
8135
+ new_crtc_state->stream->ignore_msa_timing_param = true;
8136
+ config.state = new_crtc_state->base.vrr_enabled ?
8137
+ VRR_STATE_ACTIVE_VARIABLE :
8138
+ VRR_STATE_INACTIVE;
8139
+ config.min_refresh_in_uhz =
8140
+ aconnector->min_vfreq * 1000000;
8141
+ config.max_refresh_in_uhz =
8142
+ aconnector->max_vfreq * 1000000;
8143
+ config.vsif_supported = true;
8144
+ config.btr = true;
8145
+ }
8146
+
8147
+ new_crtc_state->freesync_config = config;
8148
+}
8149
+
8150
+static void reset_freesync_config_for_crtc(
8151
+ struct dm_crtc_state *new_crtc_state)
8152
+{
8153
+ new_crtc_state->vrr_supported = false;
8154
+
8155
+ memset(&new_crtc_state->vrr_infopacket, 0,
8156
+ sizeof(new_crtc_state->vrr_infopacket));
8157
+}
8158
+
8159
+static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8160
+ struct drm_atomic_state *state,
8161
+ struct drm_crtc *crtc,
8162
+ struct drm_crtc_state *old_crtc_state,
8163
+ struct drm_crtc_state *new_crtc_state,
8164
+ bool enable,
8165
+ bool *lock_and_validation_needed)
8166
+{
8167
+ struct dm_atomic_state *dm_state = NULL;
48558168 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4856
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
48578169 struct dc_stream_state *new_stream;
48588170 int ret = 0;
48598171
4860
- /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4861
- /* update changed items */
4862
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4863
- struct amdgpu_crtc *acrtc = NULL;
4864
- struct amdgpu_dm_connector *aconnector = NULL;
4865
- struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
4866
- struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
4867
- struct drm_plane_state *new_plane_state = NULL;
8172
+ /*
8173
+ * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8174
+ * update changed items
8175
+ */
8176
+ struct amdgpu_crtc *acrtc = NULL;
8177
+ struct amdgpu_dm_connector *aconnector = NULL;
8178
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8179
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
48688180
4869
- new_stream = NULL;
8181
+ new_stream = NULL;
48708182
4871
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4872
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4873
- acrtc = to_amdgpu_crtc(crtc);
8183
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8184
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8185
+ acrtc = to_amdgpu_crtc(crtc);
8186
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
48748187
4875
- new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
8188
+ /* TODO This hack should go away */
8189
+ if (aconnector && enable) {
8190
+ /* Make sure fake sink is created in plug-in scenario */
8191
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8192
+ &aconnector->base);
8193
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8194
+ &aconnector->base);
48768195
4877
- if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
8196
+ if (IS_ERR(drm_new_conn_state)) {
8197
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8198
+ goto fail;
8199
+ }
8200
+
8201
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8202
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8203
+
8204
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8205
+ goto skip_modeset;
8206
+
8207
+ new_stream = create_validate_stream_for_sink(aconnector,
8208
+ &new_crtc_state->mode,
8209
+ dm_new_conn_state,
8210
+ dm_old_crtc_state->stream);
8211
+
8212
+ /*
8213
+ * we can have no stream on ACTION_SET if a display
8214
+ * was disconnected during S3, in this case it is not an
8215
+ * error, the OS will be updated after detection, and
8216
+ * will do the right thing on next atomic commit
8217
+ */
8218
+
8219
+ if (!new_stream) {
8220
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8221
+ __func__, acrtc->base.base.id);
8222
+ ret = -ENOMEM;
8223
+ goto fail;
8224
+ }
8225
+
8226
+ /*
8227
+ * TODO: Check VSDB bits to decide whether this should
8228
+ * be enabled or not.
8229
+ */
8230
+ new_stream->triggered_crtc_reset.enabled =
8231
+ dm->force_timing_sync;
8232
+
8233
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8234
+
8235
+ ret = fill_hdr_info_packet(drm_new_conn_state,
8236
+ &new_stream->hdr_static_metadata);
8237
+ if (ret)
8238
+ goto fail;
8239
+
8240
+ /*
8241
+ * If we already removed the old stream from the context
8242
+ * (and set the new stream to NULL) then we can't reuse
8243
+ * the old stream even if the stream and scaling are unchanged.
8244
+ * We'll hit the BUG_ON and black screen.
8245
+ *
8246
+ * TODO: Refactor this function to allow this check to work
8247
+ * in all conditions.
8248
+ */
8249
+ if (dm_new_crtc_state->stream &&
8250
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8251
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8252
+ new_crtc_state->mode_changed = false;
8253
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8254
+ new_crtc_state->mode_changed);
8255
+ }
8256
+ }
8257
+
8258
+ /* mode_changed flag may get updated above, need to check again */
8259
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8260
+ goto skip_modeset;
8261
+
8262
+ DRM_DEBUG_DRIVER(
8263
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8264
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8265
+ "connectors_changed:%d\n",
8266
+ acrtc->crtc_id,
8267
+ new_crtc_state->enable,
8268
+ new_crtc_state->active,
8269
+ new_crtc_state->planes_changed,
8270
+ new_crtc_state->mode_changed,
8271
+ new_crtc_state->active_changed,
8272
+ new_crtc_state->connectors_changed);
8273
+
8274
+ /* Remove stream for any changed/disabled CRTC */
8275
+ if (!enable) {
8276
+
8277
+ if (!dm_old_crtc_state->stream)
8278
+ goto skip_modeset;
8279
+
8280
+ ret = dm_atomic_get_state(state, &dm_state);
8281
+ if (ret)
8282
+ goto fail;
8283
+
8284
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8285
+ crtc->base.id);
8286
+
8287
+ /* i.e. reset mode */
8288
+ if (dc_remove_stream_from_ctx(
8289
+ dm->dc,
8290
+ dm_state->context,
8291
+ dm_old_crtc_state->stream) != DC_OK) {
48788292 ret = -EINVAL;
48798293 goto fail;
48808294 }
48818295
4882
- aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8296
+ dc_stream_release(dm_old_crtc_state->stream);
8297
+ dm_new_crtc_state->stream = NULL;
48838298
4884
- /* TODO This hack should go away */
4885
- if (aconnector && enable) {
4886
- // Make sure fake sink is created in plug-in scenario
4887
- drm_new_conn_state = drm_atomic_get_new_connector_state(state,
4888
- &aconnector->base);
4889
- drm_old_conn_state = drm_atomic_get_old_connector_state(state,
4890
- &aconnector->base);
8299
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
48918300
4892
- if (IS_ERR(drm_new_conn_state)) {
4893
- ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
4894
- break;
4895
- }
8301
+ *lock_and_validation_needed = true;
48968302
4897
- dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
4898
- dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8303
+ } else {/* Add stream for any updated/enabled CRTC */
8304
+ /*
8305
+ * Quick fix to prevent NULL pointer on new_stream when
8306
+ * added MST connectors not found in existing crtc_state in the chained mode
8307
+ * TODO: need to dig out the root cause of that
8308
+ */
8309
+ if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8310
+ goto skip_modeset;
48998311
4900
- new_stream = create_stream_for_sink(aconnector,
4901
- &new_crtc_state->mode,
4902
- dm_new_conn_state);
8312
+ if (modereset_required(new_crtc_state))
8313
+ goto skip_modeset;
49038314
4904
- /*
4905
- * we can have no stream on ACTION_SET if a display
4906
- * was disconnected during S3, in this case it not and
4907
- * error, the OS will be updated after detection, and
4908
- * do the right thing on next atomic commit
4909
- */
8315
+ if (modeset_required(new_crtc_state, new_stream,
8316
+ dm_old_crtc_state->stream)) {
49108317
4911
- if (!new_stream) {
4912
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4913
- __func__, acrtc->base.base.id);
4914
- break;
4915
- }
8318
+ WARN_ON(dm_new_crtc_state->stream);
49168319
4917
- if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4918
- dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4919
- new_crtc_state->mode_changed = false;
4920
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4921
- new_crtc_state->mode_changed);
4922
- }
4923
- }
8320
+ ret = dm_atomic_get_state(state, &dm_state);
8321
+ if (ret)
8322
+ goto fail;
49248323
4925
- if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4926
- goto next_crtc;
8324
+ dm_new_crtc_state->stream = new_stream;
49278325
4928
- DRM_DEBUG_DRIVER(
4929
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4930
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4931
- "connectors_changed:%d\n",
4932
- acrtc->crtc_id,
4933
- new_crtc_state->enable,
4934
- new_crtc_state->active,
4935
- new_crtc_state->planes_changed,
4936
- new_crtc_state->mode_changed,
4937
- new_crtc_state->active_changed,
4938
- new_crtc_state->connectors_changed);
8326
+ dc_stream_retain(new_stream);
49398327
4940
- /* Remove stream for any changed/disabled CRTC */
4941
- if (!enable) {
8328
+ DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8329
+ crtc->base.id);
49428330
4943
- if (!dm_old_crtc_state->stream)
4944
- goto next_crtc;
4945
-
4946
- DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4947
- crtc->base.id);
4948
-
4949
- /* i.e. reset mode */
4950
- if (dc_remove_stream_from_ctx(
4951
- dc,
8331
+ if (dc_add_stream_to_ctx(
8332
+ dm->dc,
49528333 dm_state->context,
4953
- dm_old_crtc_state->stream) != DC_OK) {
8334
+ dm_new_crtc_state->stream) != DC_OK) {
49548335 ret = -EINVAL;
49558336 goto fail;
49568337 }
49578338
4958
- dc_stream_release(dm_old_crtc_state->stream);
4959
- dm_new_crtc_state->stream = NULL;
4960
-
49618339 *lock_and_validation_needed = true;
4962
-
4963
- } else {/* Add stream for any updated/enabled CRTC */
4964
- /*
4965
- * Quick fix to prevent NULL pointer on new_stream when
4966
- * added MST connectors not found in existing crtc_state in the chained mode
4967
- * TODO: need to dig out the root cause of that
4968
- */
4969
- if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4970
- goto next_crtc;
4971
-
4972
- if (modereset_required(new_crtc_state))
4973
- goto next_crtc;
4974
-
4975
- if (modeset_required(new_crtc_state, new_stream,
4976
- dm_old_crtc_state->stream)) {
4977
-
4978
- WARN_ON(dm_new_crtc_state->stream);
4979
-
4980
- dm_new_crtc_state->stream = new_stream;
4981
-
4982
- dc_stream_retain(new_stream);
4983
-
4984
- DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4985
- crtc->base.id);
4986
-
4987
- if (dc_add_stream_to_ctx(
4988
- dc,
4989
- dm_state->context,
4990
- dm_new_crtc_state->stream) != DC_OK) {
4991
- ret = -EINVAL;
4992
- goto fail;
4993
- }
4994
-
4995
- *lock_and_validation_needed = true;
4996
- }
4997
- }
4998
-
4999
-next_crtc:
5000
- /* Release extra reference */
5001
- if (new_stream)
5002
- dc_stream_release(new_stream);
5003
-
5004
- /*
5005
- * We want to do dc stream updates that do not require a
5006
- * full modeset below.
5007
- */
5008
- if (!(enable && aconnector && new_crtc_state->enable &&
5009
- new_crtc_state->active))
5010
- continue;
5011
- /*
5012
- * Given above conditions, the dc state cannot be NULL because:
5013
- * 1. We're in the process of enabling CRTCs (just been added
5014
- * to the dc context, or already is on the context)
5015
- * 2. Has a valid connector attached, and
5016
- * 3. Is currently active and enabled.
5017
- * => The dc stream state currently exists.
5018
- */
5019
- BUG_ON(dm_new_crtc_state->stream == NULL);
5020
-
5021
- /* Scaling or underscan settings */
5022
- if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
5023
- update_stream_scaling_settings(
5024
- &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
5025
-
5026
- /*
5027
- * Color management settings. We also update color properties
5028
- * when a modeset is needed, to ensure it gets reprogrammed.
5029
- */
5030
- if (dm_new_crtc_state->base.color_mgmt_changed ||
5031
- drm_atomic_crtc_needs_modeset(new_crtc_state)) {
5032
- ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
5033
- if (ret)
5034
- goto fail;
5035
- amdgpu_dm_set_ctm(dm_new_crtc_state);
50368340 }
50378341 }
8342
+
8343
+skip_modeset:
8344
+ /* Release extra reference */
8345
+ if (new_stream)
8346
+ dc_stream_release(new_stream);
8347
+
8348
+ /*
8349
+ * We want to do dc stream updates that do not require a
8350
+ * full modeset below.
8351
+ */
8352
+ if (!(enable && aconnector && new_crtc_state->active))
8353
+ return 0;
8354
+ /*
8355
+ * Given above conditions, the dc state cannot be NULL because:
8356
+ * 1. We're in the process of enabling CRTCs (just been added
8357
+ * to the dc context, or already is on the context)
8358
+ * 2. Has a valid connector attached, and
8359
+ * 3. Is currently active and enabled.
8360
+ * => The dc stream state currently exists.
8361
+ */
8362
+ BUG_ON(dm_new_crtc_state->stream == NULL);
8363
+
8364
+ /* Scaling or underscan settings */
8365
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8366
+ drm_atomic_crtc_needs_modeset(new_crtc_state))
8367
+ update_stream_scaling_settings(
8368
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8369
+
8370
+ /* ABM settings */
8371
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8372
+
8373
+ /*
8374
+ * Color management settings. We also update color properties
8375
+ * when a modeset is needed, to ensure it gets reprogrammed.
8376
+ */
8377
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
8378
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8379
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8380
+ if (ret)
8381
+ goto fail;
8382
+ }
8383
+
8384
+ /* Update Freesync settings. */
8385
+ get_freesync_config_for_crtc(dm_new_crtc_state,
8386
+ dm_new_conn_state);
50388387
50398388 return ret;
50408389
....@@ -5044,166 +8393,436 @@
50448393 return ret;
50458394 }
50468395
5047
-static int dm_update_planes_state(struct dc *dc,
5048
- struct drm_atomic_state *state,
5049
- bool enable,
5050
- bool *lock_and_validation_needed)
8396
+static bool should_reset_plane(struct drm_atomic_state *state,
8397
+ struct drm_plane *plane,
8398
+ struct drm_plane_state *old_plane_state,
8399
+ struct drm_plane_state *new_plane_state)
50518400 {
8401
+ struct drm_plane *other;
8402
+ struct drm_plane_state *old_other_state, *new_other_state;
8403
+ struct drm_crtc_state *new_crtc_state;
8404
+ int i;
8405
+
8406
+ /*
8407
+ * TODO: Remove this hack once the checks below are sufficient
8408
+ * enough to determine when we need to reset all the planes on
8409
+ * the stream.
8410
+ */
8411
+ if (state->allow_modeset)
8412
+ return true;
8413
+
8414
+ /* Exit early if we know that we're adding or removing the plane. */
8415
+ if (old_plane_state->crtc != new_plane_state->crtc)
8416
+ return true;
8417
+
8418
+ /* old crtc == new_crtc == NULL, plane not in context. */
8419
+ if (!new_plane_state->crtc)
8420
+ return false;
8421
+
8422
+ new_crtc_state =
8423
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8424
+
8425
+ if (!new_crtc_state)
8426
+ return true;
8427
+
8428
+ /* CRTC Degamma changes currently require us to recreate planes. */
8429
+ if (new_crtc_state->color_mgmt_changed)
8430
+ return true;
8431
+
8432
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8433
+ return true;
8434
+
8435
+ /*
8436
+ * If there are any new primary or overlay planes being added or
8437
+ * removed then the z-order can potentially change. To ensure
8438
+ * correct z-order and pipe acquisition the current DC architecture
8439
+ * requires us to remove and recreate all existing planes.
8440
+ *
8441
+ * TODO: Come up with a more elegant solution for this.
8442
+ */
8443
+ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8444
+ struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8445
+
8446
+ if (other->type == DRM_PLANE_TYPE_CURSOR)
8447
+ continue;
8448
+
8449
+ if (old_other_state->crtc != new_plane_state->crtc &&
8450
+ new_other_state->crtc != new_plane_state->crtc)
8451
+ continue;
8452
+
8453
+ if (old_other_state->crtc != new_other_state->crtc)
8454
+ return true;
8455
+
8456
+ /* Src/dst size and scaling updates. */
8457
+ if (old_other_state->src_w != new_other_state->src_w ||
8458
+ old_other_state->src_h != new_other_state->src_h ||
8459
+ old_other_state->crtc_w != new_other_state->crtc_w ||
8460
+ old_other_state->crtc_h != new_other_state->crtc_h)
8461
+ return true;
8462
+
8463
+ /* Rotation / mirroring updates. */
8464
+ if (old_other_state->rotation != new_other_state->rotation)
8465
+ return true;
8466
+
8467
+ /* Blending updates. */
8468
+ if (old_other_state->pixel_blend_mode !=
8469
+ new_other_state->pixel_blend_mode)
8470
+ return true;
8471
+
8472
+ /* Alpha updates. */
8473
+ if (old_other_state->alpha != new_other_state->alpha)
8474
+ return true;
8475
+
8476
+ /* Colorspace changes. */
8477
+ if (old_other_state->color_range != new_other_state->color_range ||
8478
+ old_other_state->color_encoding != new_other_state->color_encoding)
8479
+ return true;
8480
+
8481
+ /* Framebuffer checks fall at the end. */
8482
+ if (!old_other_state->fb || !new_other_state->fb)
8483
+ continue;
8484
+
8485
+ /* Pixel format changes can require bandwidth updates. */
8486
+ if (old_other_state->fb->format != new_other_state->fb->format)
8487
+ return true;
8488
+
8489
+ old_dm_plane_state = to_dm_plane_state(old_other_state);
8490
+ new_dm_plane_state = to_dm_plane_state(new_other_state);
8491
+
8492
+ /* Tiling and DCC changes also require bandwidth updates. */
8493
+ if (old_dm_plane_state->tiling_flags !=
8494
+ new_dm_plane_state->tiling_flags)
8495
+ return true;
8496
+ }
8497
+
8498
+ return false;
8499
+}
8500
+
8501
+static int dm_update_plane_state(struct dc *dc,
8502
+ struct drm_atomic_state *state,
8503
+ struct drm_plane *plane,
8504
+ struct drm_plane_state *old_plane_state,
8505
+ struct drm_plane_state *new_plane_state,
8506
+ bool enable,
8507
+ bool *lock_and_validation_needed)
8508
+{
8509
+
8510
+ struct dm_atomic_state *dm_state = NULL;
50528511 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
50538512 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5054
- struct drm_plane *plane;
5055
- struct drm_plane_state *old_plane_state, *new_plane_state;
50568513 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
5057
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
50588514 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
5059
- int i ;
5060
- /* TODO return page_flip_needed() function */
5061
- bool pflip_needed = !state->allow_modeset;
8515
+ struct amdgpu_crtc *new_acrtc;
8516
+ bool needs_reset;
50628517 int ret = 0;
50638518
50648519
5065
- /* Add new planes, in reverse order as DC expectation */
5066
- for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
5067
- new_plane_crtc = new_plane_state->crtc;
5068
- old_plane_crtc = old_plane_state->crtc;
5069
- dm_new_plane_state = to_dm_plane_state(new_plane_state);
5070
- dm_old_plane_state = to_dm_plane_state(old_plane_state);
8520
+ new_plane_crtc = new_plane_state->crtc;
8521
+ old_plane_crtc = old_plane_state->crtc;
8522
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
8523
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
50718524
5072
- /*TODO Implement atomic check for cursor plane */
5073
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
5074
- continue;
8525
+ /*TODO Implement better atomic check for cursor plane */
8526
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8527
+ if (!enable || !new_plane_crtc ||
8528
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
8529
+ return 0;
50758530
5076
- /* Remove any changed/removed planes */
5077
- if (!enable) {
5078
- if (pflip_needed &&
5079
- plane->type != DRM_PLANE_TYPE_OVERLAY)
5080
- continue;
8531
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
50818532
5082
- if (!old_plane_crtc)
5083
- continue;
5084
-
5085
- old_crtc_state = drm_atomic_get_old_crtc_state(
5086
- state, old_plane_crtc);
5087
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5088
-
5089
- if (!dm_old_crtc_state->stream)
5090
- continue;
5091
-
5092
- DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5093
- plane->base.id, old_plane_crtc->base.id);
5094
-
5095
- if (!dc_remove_plane_from_context(
5096
- dc,
5097
- dm_old_crtc_state->stream,
5098
- dm_old_plane_state->dc_state,
5099
- dm_state->context)) {
5100
-
5101
- ret = EINVAL;
5102
- return ret;
5103
- }
5104
-
5105
-
5106
- dc_plane_state_release(dm_old_plane_state->dc_state);
5107
- dm_new_plane_state->dc_state = NULL;
5108
-
5109
- *lock_and_validation_needed = true;
5110
-
5111
- } else { /* Add new planes */
5112
- struct dc_plane_state *dc_new_plane_state;
5113
-
5114
- if (drm_atomic_plane_disabling(plane->state, new_plane_state))
5115
- continue;
5116
-
5117
- if (!new_plane_crtc)
5118
- continue;
5119
-
5120
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
5121
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5122
-
5123
- if (!dm_new_crtc_state->stream)
5124
- continue;
5125
-
5126
- if (pflip_needed &&
5127
- plane->type != DRM_PLANE_TYPE_OVERLAY)
5128
- continue;
5129
-
5130
- WARN_ON(dm_new_plane_state->dc_state);
5131
-
5132
- dc_new_plane_state = dc_create_plane_state(dc);
5133
- if (!dc_new_plane_state)
5134
- return -ENOMEM;
5135
-
5136
- DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5137
- plane->base.id, new_plane_crtc->base.id);
5138
-
5139
- ret = fill_plane_attributes(
5140
- new_plane_crtc->dev->dev_private,
5141
- dc_new_plane_state,
5142
- new_plane_state,
5143
- new_crtc_state);
5144
- if (ret) {
5145
- dc_plane_state_release(dc_new_plane_state);
5146
- return ret;
5147
- }
5148
-
5149
- /*
5150
- * Any atomic check errors that occur after this will
5151
- * not need a release. The plane state will be attached
5152
- * to the stream, and therefore part of the atomic
5153
- * state. It'll be released when the atomic state is
5154
- * cleaned.
5155
- */
5156
- if (!dc_add_plane_to_context(
5157
- dc,
5158
- dm_new_crtc_state->stream,
5159
- dc_new_plane_state,
5160
- dm_state->context)) {
5161
-
5162
- dc_plane_state_release(dc_new_plane_state);
5163
- return -EINVAL;
5164
- }
5165
-
5166
- dm_new_plane_state->dc_state = dc_new_plane_state;
5167
-
5168
- /* Tell DC to do a full surface update every time there
5169
- * is a plane change. Inefficient, but works for now.
5170
- */
5171
- dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
5172
-
5173
- *lock_and_validation_needed = true;
8533
+ if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8534
+ (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8535
+ DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8536
+ new_plane_state->crtc_w, new_plane_state->crtc_h);
8537
+ return -EINVAL;
51748538 }
8539
+
8540
+ return 0;
8541
+ }
8542
+
8543
+ needs_reset = should_reset_plane(state, plane, old_plane_state,
8544
+ new_plane_state);
8545
+
8546
+ /* Remove any changed/removed planes */
8547
+ if (!enable) {
8548
+ if (!needs_reset)
8549
+ return 0;
8550
+
8551
+ if (!old_plane_crtc)
8552
+ return 0;
8553
+
8554
+ old_crtc_state = drm_atomic_get_old_crtc_state(
8555
+ state, old_plane_crtc);
8556
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8557
+
8558
+ if (!dm_old_crtc_state->stream)
8559
+ return 0;
8560
+
8561
+ DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8562
+ plane->base.id, old_plane_crtc->base.id);
8563
+
8564
+ ret = dm_atomic_get_state(state, &dm_state);
8565
+ if (ret)
8566
+ return ret;
8567
+
8568
+ if (!dc_remove_plane_from_context(
8569
+ dc,
8570
+ dm_old_crtc_state->stream,
8571
+ dm_old_plane_state->dc_state,
8572
+ dm_state->context)) {
8573
+
8574
+ return -EINVAL;
8575
+ }
8576
+
8577
+ if (dm_old_plane_state->dc_state)
8578
+ dc_plane_state_release(dm_old_plane_state->dc_state);
8579
+
8580
+ dm_new_plane_state->dc_state = NULL;
8581
+
8582
+ *lock_and_validation_needed = true;
8583
+
8584
+ } else { /* Add new planes */
8585
+ struct dc_plane_state *dc_new_plane_state;
8586
+
8587
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8588
+ return 0;
8589
+
8590
+ if (!new_plane_crtc)
8591
+ return 0;
8592
+
8593
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8594
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8595
+
8596
+ if (!dm_new_crtc_state->stream)
8597
+ return 0;
8598
+
8599
+ if (!needs_reset)
8600
+ return 0;
8601
+
8602
+ ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8603
+ if (ret)
8604
+ return ret;
8605
+
8606
+ WARN_ON(dm_new_plane_state->dc_state);
8607
+
8608
+ dc_new_plane_state = dc_create_plane_state(dc);
8609
+ if (!dc_new_plane_state)
8610
+ return -ENOMEM;
8611
+
8612
+ DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8613
+ plane->base.id, new_plane_crtc->base.id);
8614
+
8615
+ ret = fill_dc_plane_attributes(
8616
+ drm_to_adev(new_plane_crtc->dev),
8617
+ dc_new_plane_state,
8618
+ new_plane_state,
8619
+ new_crtc_state);
8620
+ if (ret) {
8621
+ dc_plane_state_release(dc_new_plane_state);
8622
+ return ret;
8623
+ }
8624
+
8625
+ ret = dm_atomic_get_state(state, &dm_state);
8626
+ if (ret) {
8627
+ dc_plane_state_release(dc_new_plane_state);
8628
+ return ret;
8629
+ }
8630
+
8631
+ /*
8632
+ * Any atomic check errors that occur after this will
8633
+ * not need a release. The plane state will be attached
8634
+ * to the stream, and therefore part of the atomic
8635
+ * state. It'll be released when the atomic state is
8636
+ * cleaned.
8637
+ */
8638
+ if (!dc_add_plane_to_context(
8639
+ dc,
8640
+ dm_new_crtc_state->stream,
8641
+ dc_new_plane_state,
8642
+ dm_state->context)) {
8643
+
8644
+ dc_plane_state_release(dc_new_plane_state);
8645
+ return -EINVAL;
8646
+ }
8647
+
8648
+ dm_new_plane_state->dc_state = dc_new_plane_state;
8649
+
8650
+ /* Tell DC to do a full surface update every time there
8651
+ * is a plane change. Inefficient, but works for now.
8652
+ */
8653
+ dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8654
+
8655
+ *lock_and_validation_needed = true;
51758656 }
51768657
51778658
51788659 return ret;
51798660 }
51808661
8662
+#if defined(CONFIG_DRM_AMD_DC_DCN)
8663
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8664
+{
8665
+ struct drm_connector *connector;
8666
+ struct drm_connector_state *conn_state, *old_conn_state;
8667
+ struct amdgpu_dm_connector *aconnector = NULL;
8668
+ int i;
8669
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
8670
+ if (!conn_state->crtc)
8671
+ conn_state = old_conn_state;
8672
+
8673
+ if (conn_state->crtc != crtc)
8674
+ continue;
8675
+
8676
+ aconnector = to_amdgpu_dm_connector(connector);
8677
+ if (!aconnector->port || !aconnector->mst_port)
8678
+ aconnector = NULL;
8679
+ else
8680
+ break;
8681
+ }
8682
+
8683
+ if (!aconnector)
8684
+ return 0;
8685
+
8686
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8687
+}
8688
+#endif
8689
+
8690
+static int validate_overlay(struct drm_atomic_state *state)
8691
+{
8692
+ int i;
8693
+ struct drm_plane *plane;
8694
+ struct drm_plane_state *old_plane_state, *new_plane_state;
8695
+ struct drm_plane_state *primary_state, *overlay_state = NULL;
8696
+
8697
+ /* Check if primary plane is contained inside overlay */
8698
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8699
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8700
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8701
+ return 0;
8702
+
8703
+ overlay_state = new_plane_state;
8704
+ continue;
8705
+ }
8706
+ }
8707
+
8708
+ /* check if we're making changes to the overlay plane */
8709
+ if (!overlay_state)
8710
+ return 0;
8711
+
8712
+ /* check if overlay plane is enabled */
8713
+ if (!overlay_state->crtc)
8714
+ return 0;
8715
+
8716
+ /* find the primary plane for the CRTC that the overlay is enabled on */
8717
+ primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8718
+ if (IS_ERR(primary_state))
8719
+ return PTR_ERR(primary_state);
8720
+
8721
+ /* check if primary plane is enabled */
8722
+ if (!primary_state->crtc)
8723
+ return 0;
8724
+
8725
+ /* Perform the bounds check to ensure the overlay plane covers the primary */
8726
+ if (primary_state->crtc_x < overlay_state->crtc_x ||
8727
+ primary_state->crtc_y < overlay_state->crtc_y ||
8728
+ primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8729
+ primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8730
+ DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
8731
+ return -EINVAL;
8732
+ }
8733
+
8734
+ return 0;
8735
+}
8736
+
8737
+/**
8738
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8739
+ * @dev: The DRM device
8740
+ * @state: The atomic state to commit
8741
+ *
8742
+ * Validate that the given atomic state is programmable by DC into hardware.
8743
+ * This involves constructing a &struct dc_state reflecting the new hardware
8744
+ * state we wish to commit, then querying DC to see if it is programmable. It's
8745
+ * important not to modify the existing DC state. Otherwise, atomic_check
8746
+ * may unexpectedly commit hardware changes.
8747
+ *
8748
+ * When validating the DC state, it's important that the right locks are
8749
+ * acquired. For full updates case which removes/adds/updates streams on one
8750
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8751
+ * that any such full update commit will wait for completion of any outstanding
8752
+ * flip using DRMs synchronization events.
8753
+ *
8754
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
8755
+ * might not seem necessary. This is because DC stream creation requires the
8756
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
8757
+ * be possible but non-trivial - a possible TODO item.
8758
+ *
8759
+ * Return: -Error code if validation failed.
8760
+ */
51818761 static int amdgpu_dm_atomic_check(struct drm_device *dev,
51828762 struct drm_atomic_state *state)
51838763 {
5184
- struct amdgpu_device *adev = dev->dev_private;
8764
+ struct amdgpu_device *adev = drm_to_adev(dev);
8765
+ struct dm_atomic_state *dm_state = NULL;
51858766 struct dc *dc = adev->dm.dc;
5186
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
51878767 struct drm_connector *connector;
51888768 struct drm_connector_state *old_con_state, *new_con_state;
51898769 struct drm_crtc *crtc;
51908770 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8771
+ struct drm_plane *plane;
8772
+ struct drm_plane_state *old_plane_state, *new_plane_state;
8773
+ enum dc_status status;
51918774 int ret, i;
5192
-
5193
- /*
5194
- * This bool will be set for true for any modeset/reset
5195
- * or plane update which implies non fast surface update.
5196
- */
51978775 bool lock_and_validation_needed = false;
8776
+
8777
+ amdgpu_check_debugfs_connector_property_change(adev, state);
51988778
51998779 ret = drm_atomic_helper_check_modeset(dev, state);
52008780 if (ret)
52018781 goto fail;
52028782
8783
+ /* Check connector changes */
8784
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8785
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8786
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8787
+
8788
+ /* Skip connectors that are disabled or part of modeset already. */
8789
+ if (!old_con_state->crtc && !new_con_state->crtc)
8790
+ continue;
8791
+
8792
+ if (!new_con_state->crtc)
8793
+ continue;
8794
+
8795
+ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8796
+ if (IS_ERR(new_crtc_state)) {
8797
+ ret = PTR_ERR(new_crtc_state);
8798
+ goto fail;
8799
+ }
8800
+
8801
+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
8802
+ dm_old_con_state->scaling != dm_new_con_state->scaling)
8803
+ new_crtc_state->connectors_changed = true;
8804
+ }
8805
+
8806
+#if defined(CONFIG_DRM_AMD_DC_DCN)
8807
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
8808
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8809
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8810
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
8811
+ if (ret)
8812
+ goto fail;
8813
+ }
8814
+ }
8815
+ }
8816
+#endif
52038817 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
52048818 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5205
- !new_crtc_state->color_mgmt_changed)
8819
+ !new_crtc_state->color_mgmt_changed &&
8820
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
52068821 continue;
8822
+
8823
+ ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8824
+ if (ret)
8825
+ goto fail;
52078826
52088827 if (!new_crtc_state->enable)
52098828 continue;
....@@ -5217,32 +8836,99 @@
52178836 goto fail;
52188837 }
52198838
5220
- dm_state->context = dc_create_state();
5221
- ASSERT(dm_state->context);
5222
- dc_resource_state_copy_construct_current(dc, dm_state->context);
8839
+ /*
8840
+ * Add all primary and overlay planes on the CRTC to the state
8841
+ * whenever a plane is enabled to maintain correct z-ordering
8842
+ * and to enable fast surface updates.
8843
+ */
8844
+ drm_for_each_crtc(crtc, dev) {
8845
+ bool modified = false;
8846
+
8847
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8848
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
8849
+ continue;
8850
+
8851
+ if (new_plane_state->crtc == crtc ||
8852
+ old_plane_state->crtc == crtc) {
8853
+ modified = true;
8854
+ break;
8855
+ }
8856
+ }
8857
+
8858
+ if (!modified)
8859
+ continue;
8860
+
8861
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8862
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
8863
+ continue;
8864
+
8865
+ new_plane_state =
8866
+ drm_atomic_get_plane_state(state, plane);
8867
+
8868
+ if (IS_ERR(new_plane_state)) {
8869
+ ret = PTR_ERR(new_plane_state);
8870
+ goto fail;
8871
+ }
8872
+ }
8873
+ }
8874
+
8875
+ /* Prepass for updating tiling flags on new planes. */
8876
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8877
+ struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8878
+ struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8879
+
8880
+ ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8881
+ &new_dm_plane_state->tmz_surface);
8882
+ if (ret)
8883
+ goto fail;
8884
+ }
52238885
52248886 /* Remove exiting planes if they are modified */
5225
- ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
5226
- if (ret) {
5227
- goto fail;
8887
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8888
+ ret = dm_update_plane_state(dc, state, plane,
8889
+ old_plane_state,
8890
+ new_plane_state,
8891
+ false,
8892
+ &lock_and_validation_needed);
8893
+ if (ret)
8894
+ goto fail;
52288895 }
52298896
52308897 /* Disable all crtcs which require disable */
5231
- ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
5232
- if (ret) {
5233
- goto fail;
8898
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8899
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
8900
+ old_crtc_state,
8901
+ new_crtc_state,
8902
+ false,
8903
+ &lock_and_validation_needed);
8904
+ if (ret)
8905
+ goto fail;
52348906 }
52358907
52368908 /* Enable all crtcs which require enable */
5237
- ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
5238
- if (ret) {
5239
- goto fail;
8909
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8910
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
8911
+ old_crtc_state,
8912
+ new_crtc_state,
8913
+ true,
8914
+ &lock_and_validation_needed);
8915
+ if (ret)
8916
+ goto fail;
52408917 }
52418918
5242
- /* Add new/modified planes */
5243
- ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
5244
- if (ret) {
8919
+ ret = validate_overlay(state);
8920
+ if (ret)
52458921 goto fail;
8922
+
8923
+ /* Add new/modified planes */
8924
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8925
+ ret = dm_update_plane_state(dc, state, plane,
8926
+ old_plane_state,
8927
+ new_plane_state,
8928
+ true,
8929
+ &lock_and_validation_needed);
8930
+ if (ret)
8931
+ goto fail;
52468932 }
52478933
52488934 /* Run this here since we want to validate the streams we created */
....@@ -5250,8 +8936,28 @@
52508936 if (ret)
52518937 goto fail;
52528938
8939
+ if (state->legacy_cursor_update) {
8940
+ /*
8941
+ * This is a fast cursor update coming from the plane update
8942
+ * helper, check if it can be done asynchronously for better
8943
+ * performance.
8944
+ */
8945
+ state->async_update =
8946
+ !drm_atomic_helper_async_check(dev, state);
8947
+
8948
+ /*
8949
+ * Skip the remaining global validation if this is an async
8950
+ * update. Cursor updates can be done without affecting
8951
+ * state or bandwidth calcs and this avoids the performance
8952
+ * penalty of locking the private state object and
8953
+ * allocating a new dc_state.
8954
+ */
8955
+ if (state->async_update)
8956
+ return 0;
8957
+ }
8958
+
52538959 /* Check scaling and underscan changes*/
5254
- /*TODO Removed scaling changes validation due to inability to commit
8960
+ /* TODO Removed scaling changes validation due to inability to commit
52558961 * new stream into context w\o causing full reset. Need to
52568962 * decide how to handle.
52578963 */
....@@ -5272,26 +8978,105 @@
52728978 lock_and_validation_needed = true;
52738979 }
52748980
5275
- /*
5276
- * For full updates case when
5277
- * removing/adding/updating streams on once CRTC while flipping
5278
- * on another CRTC,
5279
- * acquiring global lock will guarantee that any such full
5280
- * update commit
5281
- * will wait for completion of any outstanding flip using DRMs
5282
- * synchronization events.
8981
+ /**
8982
+ * Streams and planes are reset when there are changes that affect
8983
+ * bandwidth. Anything that affects bandwidth needs to go through
8984
+ * DC global validation to ensure that the configuration can be applied
8985
+ * to hardware.
8986
+ *
8987
+ * We have to currently stall out here in atomic_check for outstanding
8988
+ * commits to finish in this case because our IRQ handlers reference
8989
+ * DRM state directly - we can end up disabling interrupts too early
8990
+ * if we don't.
8991
+ *
8992
+ * TODO: Remove this stall and drop DM state private objects.
52838993 */
5284
-
52858994 if (lock_and_validation_needed) {
8995
+ ret = dm_atomic_get_state(state, &dm_state);
8996
+ if (ret)
8997
+ goto fail;
52868998
52878999 ret = do_aquire_global_lock(dev, state);
52889000 if (ret)
52899001 goto fail;
52909002
5291
- if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
9003
+#if defined(CONFIG_DRM_AMD_DC_DCN)
9004
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9005
+ goto fail;
9006
+
9007
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9008
+ if (ret)
9009
+ goto fail;
9010
+#endif
9011
+
9012
+ /*
9013
+ * Perform validation of MST topology in the state:
9014
+ * We need to perform MST atomic check before calling
9015
+ * dc_validate_global_state(), or there is a chance
9016
+ * to get stuck in an infinite loop and hang eventually.
9017
+ */
9018
+ ret = drm_dp_mst_atomic_check(state);
9019
+ if (ret)
9020
+ goto fail;
9021
+ status = dc_validate_global_state(dc, dm_state->context, false);
9022
+ if (status != DC_OK) {
9023
+ drm_dbg_atomic(dev,
9024
+ "DC global validation failure: %s (%d)",
9025
+ dc_status_to_str(status), status);
52929026 ret = -EINVAL;
52939027 goto fail;
52949028 }
9029
+ } else {
9030
+ /*
9031
+ * The commit is a fast update. Fast updates shouldn't change
9032
+ * the DC context, affect global validation, and can have their
9033
+ * commit work done in parallel with other commits not touching
9034
+ * the same resource. If we have a new DC context as part of
9035
+ * the DM atomic state from validation we need to free it and
9036
+ * retain the existing one instead.
9037
+ *
9038
+ * Furthermore, since the DM atomic state only contains the DC
9039
+ * context and can safely be annulled, we can free the state
9040
+ * and clear the associated private object now to free
9041
+ * some memory and avoid a possible use-after-free later.
9042
+ */
9043
+
9044
+ for (i = 0; i < state->num_private_objs; i++) {
9045
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
9046
+
9047
+ if (obj->funcs == adev->dm.atomic_obj.funcs) {
9048
+ int j = state->num_private_objs-1;
9049
+
9050
+ dm_atomic_destroy_state(obj,
9051
+ state->private_objs[i].state);
9052
+
9053
+ /* If i is not at the end of the array then the
9054
+ * last element needs to be moved to where i was
9055
+ * before the array can safely be truncated.
9056
+ */
9057
+ if (i != j)
9058
+ state->private_objs[i] =
9059
+ state->private_objs[j];
9060
+
9061
+ state->private_objs[j].ptr = NULL;
9062
+ state->private_objs[j].state = NULL;
9063
+ state->private_objs[j].old_state = NULL;
9064
+ state->private_objs[j].new_state = NULL;
9065
+
9066
+ state->num_private_objs = j;
9067
+ break;
9068
+ }
9069
+ }
9070
+ }
9071
+
9072
+ /* Store the overall update type for use later in atomic check. */
9073
+ for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9074
+ struct dm_crtc_state *dm_new_crtc_state =
9075
+ to_dm_crtc_state(new_crtc_state);
9076
+
9077
+ dm_new_crtc_state->update_type = lock_and_validation_needed ?
9078
+ UPDATE_TYPE_FULL :
9079
+ UPDATE_TYPE_FAST;
52959080 }
52969081
52979082 /* Must be success */
....@@ -5327,8 +9112,8 @@
53279112
53289113 return capable;
53299114 }
5330
-void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
5331
- struct edid *edid)
9115
+void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9116
+ struct edid *edid)
53329117 {
53339118 int i;
53349119 bool edid_check_required;
....@@ -5337,14 +9122,25 @@
53379122 struct detailed_data_monitor_range *range;
53389123 struct amdgpu_dm_connector *amdgpu_dm_connector =
53399124 to_amdgpu_dm_connector(connector);
5340
- struct dm_connector_state *dm_con_state;
9125
+ struct dm_connector_state *dm_con_state = NULL;
53419126
53429127 struct drm_device *dev = connector->dev;
5343
- struct amdgpu_device *adev = dev->dev_private;
9128
+ struct amdgpu_device *adev = drm_to_adev(dev);
9129
+ bool freesync_capable = false;
53449130
53459131 if (!connector->state) {
53469132 DRM_ERROR("%s - Connector has no state", __func__);
5347
- return;
9133
+ goto update;
9134
+ }
9135
+
9136
+ if (!edid) {
9137
+ dm_con_state = to_dm_connector_state(connector->state);
9138
+
9139
+ amdgpu_dm_connector->min_vfreq = 0;
9140
+ amdgpu_dm_connector->max_vfreq = 0;
9141
+ amdgpu_dm_connector->pixel_clock_mhz = 0;
9142
+
9143
+ goto update;
53489144 }
53499145
53509146 dm_con_state = to_dm_connector_state(connector->state);
....@@ -5352,10 +9148,10 @@
53529148 edid_check_required = false;
53539149 if (!amdgpu_dm_connector->dc_sink) {
53549150 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
5355
- return;
9151
+ goto update;
53569152 }
53579153 if (!adev->dm.freesync_module)
5358
- return;
9154
+ goto update;
53599155 /*
53609156 * if edid non zero restrict freesync only for dp and edp
53619157 */
....@@ -5367,7 +9163,6 @@
53679163 amdgpu_dm_connector);
53689164 }
53699165 }
5370
- dm_con_state->freesync_capable = false;
53719166 if (edid_check_required == true && (edid->version > 1 ||
53729167 (edid->version == 1 && edid->revision > 1))) {
53739168 for (i = 0; i < 4; i++) {
....@@ -5397,28 +9192,166 @@
53979192 }
53989193
53999194 if (amdgpu_dm_connector->max_vfreq -
5400
- amdgpu_dm_connector->min_vfreq > 10) {
5401
- amdgpu_dm_connector->caps.supported = true;
5402
- amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
5403
- amdgpu_dm_connector->min_vfreq * 1000000;
5404
- amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
5405
- amdgpu_dm_connector->max_vfreq * 1000000;
5406
- dm_con_state->freesync_capable = true;
9195
+ amdgpu_dm_connector->min_vfreq > 10) {
9196
+
9197
+ freesync_capable = true;
54079198 }
54089199 }
54099200
5410
- /*
5411
- * TODO figure out how to notify user-mode or DRM of freesync caps
5412
- * once we figure out how to deal with freesync in an upstreamable
5413
- * fashion
5414
- */
9201
+update:
9202
+ if (dm_con_state)
9203
+ dm_con_state->freesync_capable = freesync_capable;
54159204
9205
+ if (connector->vrr_capable_property)
9206
+ drm_connector_set_vrr_capable_property(connector,
9207
+ freesync_capable);
54169208 }
54179209
5418
-void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
9210
+static void amdgpu_dm_set_psr_caps(struct dc_link *link)
54199211 {
5420
- /*
5421
- * TODO fill in once we figure out how to deal with freesync in
5422
- * an upstreamable fashion
9212
+ uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9213
+
9214
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9215
+ return;
9216
+ if (link->type == dc_connection_none)
9217
+ return;
9218
+ if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9219
+ dpcd_data, sizeof(dpcd_data))) {
9220
+ link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9221
+
9222
+ if (dpcd_data[0] == 0) {
9223
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9224
+ link->psr_settings.psr_feature_enabled = false;
9225
+ } else {
9226
+ link->psr_settings.psr_version = DC_PSR_VERSION_1;
9227
+ link->psr_settings.psr_feature_enabled = true;
9228
+ }
9229
+
9230
+ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9231
+ }
9232
+}
9233
+
9234
+/*
9235
+ * amdgpu_dm_link_setup_psr() - configure psr link
9236
+ * @stream: stream state
9237
+ *
9238
+ * Return: true if success
9239
+ */
9240
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9241
+{
9242
+ struct dc_link *link = NULL;
9243
+ struct psr_config psr_config = {0};
9244
+ struct psr_context psr_context = {0};
9245
+ bool ret = false;
9246
+
9247
+ if (stream == NULL)
9248
+ return false;
9249
+
9250
+ link = stream->link;
9251
+
9252
+ psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9253
+
9254
+ if (psr_config.psr_version > 0) {
9255
+ psr_config.psr_exit_link_training_required = 0x1;
9256
+ psr_config.psr_frame_capture_indication_req = 0;
9257
+ psr_config.psr_rfb_setup_time = 0x37;
9258
+ psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9259
+ psr_config.allow_smu_optimizations = 0x0;
9260
+
9261
+ ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9262
+
9263
+ }
9264
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9265
+
9266
+ return ret;
9267
+}
9268
+
9269
+/*
9270
+ * amdgpu_dm_psr_enable() - enable psr f/w
9271
+ * @stream: stream state
9272
+ *
9273
+ * Return: true if success
9274
+ */
9275
+bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9276
+{
9277
+ struct dc_link *link = stream->link;
9278
+ unsigned int vsync_rate_hz = 0;
9279
+ struct dc_static_screen_params params = {0};
9280
+ /* Calculate number of static frames before generating interrupt to
9281
+ * enter PSR.
54239282 */
9283
+ // Init fail safe of 2 frames static
9284
+ unsigned int num_frames_static = 2;
9285
+
9286
+ DRM_DEBUG_DRIVER("Enabling psr...\n");
9287
+
9288
+ vsync_rate_hz = div64_u64(div64_u64((
9289
+ stream->timing.pix_clk_100hz * 100),
9290
+ stream->timing.v_total),
9291
+ stream->timing.h_total);
9292
+
9293
+ /* Round up
9294
+ * Calculate number of frames such that at least 30 ms of time has
9295
+ * passed.
9296
+ */
9297
+ if (vsync_rate_hz != 0) {
9298
+ unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9299
+ num_frames_static = (30000 / frame_time_microsec) + 1;
9300
+ }
9301
+
9302
+ params.triggers.cursor_update = true;
9303
+ params.triggers.overlay_update = true;
9304
+ params.triggers.surface_update = true;
9305
+ params.num_frames = num_frames_static;
9306
+
9307
+ dc_stream_set_static_screen_params(link->ctx->dc,
9308
+ &stream, 1,
9309
+ &params);
9310
+
9311
+ return dc_link_set_psr_allow_active(link, true, false);
9312
+}
9313
+
9314
+/*
9315
+ * amdgpu_dm_psr_disable() - disable psr f/w
9316
+ * @stream: stream state
9317
+ *
9318
+ * Return: true if success
9319
+ */
9320
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9321
+{
9322
+
9323
+ DRM_DEBUG_DRIVER("Disabling psr...\n");
9324
+
9325
+ return dc_link_set_psr_allow_active(stream->link, false, true);
9326
+}
9327
+
9328
+/*
9329
+ * amdgpu_dm_psr_disable() - disable psr f/w
9330
+ * if psr is enabled on any stream
9331
+ *
9332
+ * Return: true if success
9333
+ */
9334
+static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9335
+{
9336
+ DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9337
+ return dc_set_psr_allow_active(dm->dc, false);
9338
+}
9339
+
9340
+void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9341
+{
9342
+ struct amdgpu_device *adev = drm_to_adev(dev);
9343
+ struct dc *dc = adev->dm.dc;
9344
+ int i;
9345
+
9346
+ mutex_lock(&adev->dm.dc_lock);
9347
+ if (dc->current_state) {
9348
+ for (i = 0; i < dc->current_state->stream_count; ++i)
9349
+ dc->current_state->streams[i]
9350
+ ->triggered_crtc_reset.enabled =
9351
+ adev->dm.force_timing_sync;
9352
+
9353
+ dm_enable_per_frame_crtc_master_sync(dc->current_state);
9354
+ dc_trigger_sync(dc, dc->current_state);
9355
+ }
9356
+ mutex_unlock(&adev->dm.dc_lock);
54249357 }