forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/gpu/drm/nouveau/nouveau_display.c
....@@ -25,45 +25,31 @@
2525 */
2626
2727 #include <acpi/video.h>
28
-#include <drm/drmP.h>
28
+
2929 #include <drm/drm_atomic.h>
3030 #include <drm/drm_atomic_helper.h>
3131 #include <drm/drm_crtc_helper.h>
3232 #include <drm/drm_fb_helper.h>
33
-
34
-#include <nvif/class.h>
33
+#include <drm/drm_fourcc.h>
34
+#include <drm/drm_gem_framebuffer_helper.h>
35
+#include <drm/drm_probe_helper.h>
36
+#include <drm/drm_vblank.h>
3537
3638 #include "nouveau_fbcon.h"
37
-#include "dispnv04/hw.h"
3839 #include "nouveau_crtc.h"
39
-#include "nouveau_dma.h"
4040 #include "nouveau_gem.h"
4141 #include "nouveau_connector.h"
4242 #include "nv50_display.h"
4343
44
-#include "nouveau_fence.h"
45
-
44
+#include <nvif/class.h>
4645 #include <nvif/cl0046.h>
4746 #include <nvif/event.h>
48
-
49
-static int
50
-nouveau_display_vblank_handler(struct nvif_notify *notify)
51
-{
52
- struct nouveau_crtc *nv_crtc =
53
- container_of(notify, typeof(*nv_crtc), vblank);
54
- drm_crtc_handle_vblank(&nv_crtc->base);
55
- return NVIF_NOTIFY_KEEP;
56
-}
47
+#include <dispnv50/crc.h>
5748
5849 int
59
-nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
50
+nouveau_display_vblank_enable(struct drm_crtc *crtc)
6051 {
61
- struct drm_crtc *crtc;
6252 struct nouveau_crtc *nv_crtc;
63
-
64
- crtc = drm_crtc_from_index(dev, pipe);
65
- if (!crtc)
66
- return -EINVAL;
6753
6854 nv_crtc = nouveau_crtc(crtc);
6955 nvif_notify_get(&nv_crtc->vblank);
....@@ -72,14 +58,9 @@
7258 }
7359
7460 void
75
-nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
61
+nouveau_display_vblank_disable(struct drm_crtc *crtc)
7662 {
77
- struct drm_crtc *crtc;
7863 struct nouveau_crtc *nv_crtc;
79
-
80
- crtc = drm_crtc_from_index(dev, pipe);
81
- if (!crtc)
82
- return;
8364
8465 nv_crtc = nouveau_crtc(crtc);
8566 nvif_notify_put(&nv_crtc->vblank);
....@@ -138,102 +119,196 @@
138119 }
139120
140121 bool
141
-nouveau_display_scanoutpos(struct drm_device *dev, unsigned int pipe,
122
+nouveau_display_scanoutpos(struct drm_crtc *crtc,
142123 bool in_vblank_irq, int *vpos, int *hpos,
143124 ktime_t *stime, ktime_t *etime,
144125 const struct drm_display_mode *mode)
145126 {
146
- struct drm_crtc *crtc;
147
-
148
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
149
- if (nouveau_crtc(crtc)->index == pipe) {
150
- return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
151
- stime, etime);
152
- }
153
- }
154
-
155
- return false;
127
+ return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
128
+ stime, etime);
156129 }
130
+
131
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
132
+ .destroy = drm_gem_fb_destroy,
133
+ .create_handle = drm_gem_fb_create_handle,
134
+};
157135
158136 static void
159
-nouveau_display_vblank_fini(struct drm_device *dev)
137
+nouveau_decode_mod(struct nouveau_drm *drm,
138
+ uint64_t modifier,
139
+ uint32_t *tile_mode,
140
+ uint8_t *kind)
160141 {
161
- struct drm_crtc *crtc;
142
+ struct nouveau_display *disp = nouveau_display(drm->dev);
143
+ BUG_ON(!tile_mode || !kind);
162144
163
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
164
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
165
- nvif_notify_fini(&nv_crtc->vblank);
145
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
146
+ /* tile_mode will not be used in this case */
147
+ *tile_mode = 0;
148
+ *kind = 0;
149
+ } else {
150
+ /*
151
+ * Extract the block height and kind from the corresponding
152
+ * modifier fields. See drm_fourcc.h for details.
153
+ */
154
+
155
+ if ((modifier & (0xffull << 12)) == 0ull) {
156
+ /* Legacy modifier. Translate to this dev's 'kind.' */
157
+ modifier |= disp->format_modifiers[0] & (0xffull << 12);
158
+ }
159
+
160
+ *tile_mode = (uint32_t)(modifier & 0xF);
161
+ *kind = (uint8_t)((modifier >> 12) & 0xFF);
162
+
163
+ if (drm->client.device.info.chipset >= 0xc0)
164
+ *tile_mode <<= 4;
166165 }
167166 }
168167
169
-static int
170
-nouveau_display_vblank_init(struct drm_device *dev)
168
+void
169
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
170
+ uint32_t *tile_mode,
171
+ uint8_t *kind)
171172 {
172
- struct nouveau_display *disp = nouveau_display(dev);
173
- struct drm_crtc *crtc;
174
- int ret;
173
+ if (fb->flags & DRM_MODE_FB_MODIFIERS) {
174
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
175175
176
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
177
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
178
- ret = nvif_notify_init(&disp->disp.object,
179
- nouveau_display_vblank_handler, false,
180
- NV04_DISP_NTFY_VBLANK,
181
- &(struct nvif_notify_head_req_v0) {
182
- .head = nv_crtc->index,
183
- },
184
- sizeof(struct nvif_notify_head_req_v0),
185
- sizeof(struct nvif_notify_head_rep_v0),
186
- &nv_crtc->vblank);
187
- if (ret) {
188
- nouveau_display_vblank_fini(dev);
189
- return ret;
190
- }
176
+ nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
177
+ } else {
178
+ const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
179
+
180
+ *tile_mode = nvbo->mode;
181
+ *kind = nvbo->kind;
182
+ }
183
+}
184
+
185
+static const u64 legacy_modifiers[] = {
186
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
187
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
188
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
189
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
190
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
191
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
192
+ DRM_FORMAT_MOD_INVALID
193
+};
194
+
195
+static int
196
+nouveau_validate_decode_mod(struct nouveau_drm *drm,
197
+ uint64_t modifier,
198
+ uint32_t *tile_mode,
199
+ uint8_t *kind)
200
+{
201
+ struct nouveau_display *disp = nouveau_display(drm->dev);
202
+ int mod;
203
+
204
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
205
+ return -EINVAL;
191206 }
192207
193
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
194
- if (ret) {
195
- nouveau_display_vblank_fini(dev);
196
- return ret;
208
+ BUG_ON(!disp->format_modifiers);
209
+
210
+ for (mod = 0;
211
+ (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
212
+ (disp->format_modifiers[mod] != modifier);
213
+ mod++);
214
+
215
+ if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
216
+ for (mod = 0;
217
+ (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
218
+ (legacy_modifiers[mod] != modifier);
219
+ mod++);
220
+ if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
221
+ return -EINVAL;
197222 }
223
+
224
+ nouveau_decode_mod(drm, modifier, tile_mode, kind);
198225
199226 return 0;
200227 }
201228
202
-static void
203
-nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
229
+static inline uint32_t
230
+nouveau_get_width_in_blocks(uint32_t stride)
204231 {
205
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
232
+ /* GOBs per block in the x direction is always one, and GOBs are
233
+ * 64 bytes wide
234
+ */
235
+ static const uint32_t log_block_width = 6;
206236
207
- if (fb->nvbo)
208
- drm_gem_object_put_unlocked(&fb->nvbo->gem);
237
+ return (stride + (1 << log_block_width) - 1) >> log_block_width;
238
+}
209239
210
- drm_framebuffer_cleanup(drm_fb);
211
- kfree(fb);
240
+static inline uint32_t
241
+nouveau_get_height_in_blocks(struct nouveau_drm *drm,
242
+ uint32_t height,
243
+ uint32_t log_block_height_in_gobs)
244
+{
245
+ uint32_t log_gob_height;
246
+ uint32_t log_block_height;
247
+
248
+ BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
249
+
250
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
251
+ log_gob_height = 2;
252
+ else
253
+ log_gob_height = 3;
254
+
255
+ log_block_height = log_block_height_in_gobs + log_gob_height;
256
+
257
+ return (height + (1 << log_block_height) - 1) >> log_block_height;
212258 }
213259
214260 static int
215
-nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
216
- struct drm_file *file_priv,
217
- unsigned int *handle)
261
+nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
262
+ uint32_t offset, uint32_t stride, uint32_t h,
263
+ uint32_t tile_mode)
218264 {
219
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
265
+ uint32_t gob_size, bw, bh;
266
+ uint64_t bl_size;
220267
221
- return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
268
+ BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
269
+
270
+ if (drm->client.device.info.chipset >= 0xc0) {
271
+ if (tile_mode & 0xF)
272
+ return -EINVAL;
273
+ tile_mode >>= 4;
274
+ }
275
+
276
+ if (tile_mode & 0xFFFFFFF0)
277
+ return -EINVAL;
278
+
279
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
280
+ gob_size = 256;
281
+ else
282
+ gob_size = 512;
283
+
284
+ bw = nouveau_get_width_in_blocks(stride);
285
+ bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
286
+
287
+ bl_size = bw * bh * (1 << tile_mode) * gob_size;
288
+
289
+ DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
290
+ offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
291
+ nvbo->bo.mem.size);
292
+
293
+ if (bl_size + offset > nvbo->bo.mem.size)
294
+ return -ERANGE;
295
+
296
+ return 0;
222297 }
223
-
224
-static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
225
- .destroy = nouveau_user_framebuffer_destroy,
226
- .create_handle = nouveau_user_framebuffer_create_handle,
227
-};
228298
229299 int
230300 nouveau_framebuffer_new(struct drm_device *dev,
231301 const struct drm_mode_fb_cmd2 *mode_cmd,
232
- struct nouveau_bo *nvbo,
233
- struct nouveau_framebuffer **pfb)
302
+ struct drm_gem_object *gem,
303
+ struct drm_framebuffer **pfb)
234304 {
235305 struct nouveau_drm *drm = nouveau_drm(dev);
236
- struct nouveau_framebuffer *fb;
306
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
307
+ struct drm_framebuffer *fb;
308
+ const struct drm_format_info *info;
309
+ unsigned int width, height, i;
310
+ uint32_t tile_mode;
311
+ uint8_t kind;
237312 int ret;
238313
239314 /* YUV overlays have special requirements pre-NV50 */
....@@ -256,13 +331,50 @@
256331 return -EINVAL;
257332 }
258333
334
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
335
+ if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
336
+ &tile_mode, &kind)) {
337
+ DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
338
+ mode_cmd->modifier[0]);
339
+ return -EINVAL;
340
+ }
341
+ } else {
342
+ tile_mode = nvbo->mode;
343
+ kind = nvbo->kind;
344
+ }
345
+
346
+ info = drm_get_format_info(dev, mode_cmd);
347
+
348
+ for (i = 0; i < info->num_planes; i++) {
349
+ width = drm_format_info_plane_width(info,
350
+ mode_cmd->width,
351
+ i);
352
+ height = drm_format_info_plane_height(info,
353
+ mode_cmd->height,
354
+ i);
355
+
356
+ if (kind) {
357
+ ret = nouveau_check_bl_size(drm, nvbo,
358
+ mode_cmd->offsets[i],
359
+ mode_cmd->pitches[i],
360
+ height, tile_mode);
361
+ if (ret)
362
+ return ret;
363
+ } else {
364
+ uint32_t size = mode_cmd->pitches[i] * height;
365
+
366
+ if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
367
+ return -ERANGE;
368
+ }
369
+ }
370
+
259371 if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
260372 return -ENOMEM;
261373
262
- drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
263
- fb->nvbo = nvbo;
374
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
375
+ fb->obj[0] = gem;
264376
265
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
377
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
266378 if (ret)
267379 kfree(fb);
268380 return ret;
....@@ -273,21 +385,19 @@
273385 struct drm_file *file_priv,
274386 const struct drm_mode_fb_cmd2 *mode_cmd)
275387 {
276
- struct nouveau_framebuffer *fb;
277
- struct nouveau_bo *nvbo;
388
+ struct drm_framebuffer *fb;
278389 struct drm_gem_object *gem;
279390 int ret;
280391
281392 gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
282393 if (!gem)
283394 return ERR_PTR(-ENOENT);
284
- nvbo = nouveau_gem_object(gem);
285395
286
- ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
396
+ ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
287397 if (ret == 0)
288
- return &fb->base;
398
+ return fb;
289399
290
- drm_gem_object_put_unlocked(gem);
400
+ drm_gem_object_put(gem);
291401 return ERR_PTR(ret);
292402 }
293403
....@@ -347,29 +457,74 @@
347457 } \
348458 } while(0)
349459
460
+void
461
+nouveau_display_hpd_resume(struct drm_device *dev)
462
+{
463
+ struct nouveau_drm *drm = nouveau_drm(dev);
464
+
465
+ mutex_lock(&drm->hpd_lock);
466
+ drm->hpd_pending = ~0;
467
+ mutex_unlock(&drm->hpd_lock);
468
+
469
+ schedule_work(&drm->hpd_work);
470
+}
471
+
350472 static void
351473 nouveau_display_hpd_work(struct work_struct *work)
352474 {
353475 struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
476
+ struct drm_device *dev = drm->dev;
477
+ struct drm_connector *connector;
478
+ struct drm_connector_list_iter conn_iter;
479
+ u32 pending;
480
+ bool changed = false;
354481
355
- pm_runtime_get_sync(drm->dev->dev);
482
+ pm_runtime_get_sync(dev->dev);
356483
357
- drm_helper_hpd_irq_event(drm->dev);
484
+ mutex_lock(&drm->hpd_lock);
485
+ pending = drm->hpd_pending;
486
+ drm->hpd_pending = 0;
487
+ mutex_unlock(&drm->hpd_lock);
488
+
489
+ /* Nothing to do, exit early without updating the last busy counter */
490
+ if (!pending)
491
+ goto noop;
492
+
493
+ mutex_lock(&dev->mode_config.mutex);
494
+ drm_connector_list_iter_begin(dev, &conn_iter);
495
+
496
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
497
+ enum drm_connector_status old_status = connector->status;
498
+ u64 old_epoch_counter = connector->epoch_counter;
499
+
500
+ if (!(pending & drm_connector_mask(connector)))
501
+ continue;
502
+
503
+ connector->status = drm_helper_probe_detect(connector, NULL,
504
+ false);
505
+ if (old_epoch_counter == connector->epoch_counter)
506
+ continue;
507
+
508
+ changed = true;
509
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
510
+ connector->base.id, connector->name,
511
+ drm_get_connector_status_name(old_status),
512
+ drm_get_connector_status_name(connector->status),
513
+ old_epoch_counter, connector->epoch_counter);
514
+ }
515
+
516
+ drm_connector_list_iter_end(&conn_iter);
517
+ mutex_unlock(&dev->mode_config.mutex);
518
+
519
+ if (changed)
520
+ drm_kms_helper_hotplug_event(dev);
358521
359522 pm_runtime_mark_last_busy(drm->dev->dev);
360
- pm_runtime_put_sync(drm->dev->dev);
523
+noop:
524
+ pm_runtime_put_autosuspend(dev->dev);
361525 }
362526
363527 #ifdef CONFIG_ACPI
364
-
365
-/*
366
- * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
367
- * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
368
- * This should be dropped once that is merged.
369
- */
370
-#ifndef ACPI_VIDEO_NOTIFY_PROBE
371
-#define ACPI_VIDEO_NOTIFY_PROBE 0x81
372
-#endif
373528
374529 static int
375530 nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
....@@ -388,13 +543,12 @@
388543 * it's own hotplug events.
389544 */
390545 pm_runtime_put_autosuspend(drm->dev->dev);
391
- } else if (ret == 0) {
392
- /* This may be the only indication we receive
393
- * of a connector hotplug on a runtime
394
- * suspended GPU, schedule hpd_work to check.
546
+ } else if (ret == 0 || ret == -EINPROGRESS) {
547
+ /* We've started resuming the GPU already, so
548
+ * it will handle scheduling a full reprobe
549
+ * itself
395550 */
396551 NV_DEBUG(drm, "ACPI requested connector reprobe\n");
397
- schedule_work(&drm->hpd_work);
398552 pm_runtime_put_noidle(drm->dev->dev);
399553 } else {
400554 NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
....@@ -411,15 +565,25 @@
411565 #endif
412566
413567 int
414
-nouveau_display_init(struct drm_device *dev)
568
+nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
415569 {
416570 struct nouveau_display *disp = nouveau_display(dev);
417
- struct nouveau_drm *drm = nouveau_drm(dev);
418571 struct drm_connector *connector;
419572 struct drm_connector_list_iter conn_iter;
420573 int ret;
421574
422
- ret = disp->init(dev);
575
+ /*
576
+ * Enable hotplug interrupts (done as early as possible, since we need
577
+ * them for MST)
578
+ */
579
+ drm_connector_list_iter_begin(dev, &conn_iter);
580
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
581
+ struct nouveau_connector *conn = nouveau_connector(connector);
582
+ nvif_notify_get(&conn->hpd);
583
+ }
584
+ drm_connector_list_iter_end(&conn_iter);
585
+
586
+ ret = disp->init(dev, resume, runtime);
423587 if (ret)
424588 return ret;
425589
....@@ -428,16 +592,6 @@
428592 */
429593 drm_kms_helper_poll_enable(dev);
430594
431
- /* enable hotplug interrupts */
432
- drm_connector_list_iter_begin(dev, &conn_iter);
433
- nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
434
- struct nouveau_connector *conn = nouveau_connector(connector);
435
- nvif_notify_get(&conn->hpd);
436
- }
437
- drm_connector_list_iter_end(&conn_iter);
438
-
439
- /* enable flip completion events */
440
- nvif_notify_get(&drm->flip);
441595 return ret;
442596 }
443597
....@@ -453,11 +607,8 @@
453607 if (drm_drv_uses_atomic_modeset(dev))
454608 drm_atomic_helper_shutdown(dev);
455609 else
456
- drm_crtc_force_disable_all(dev);
610
+ drm_helper_force_disable_all(dev);
457611 }
458
-
459
- /* disable flip completion events */
460
- nvif_notify_put(&drm->flip);
461612
462613 /* disable hotplug interrupts */
463614 drm_connector_list_iter_begin(dev, &conn_iter);
....@@ -471,7 +622,7 @@
471622 cancel_work_sync(&drm->hpd_work);
472623
473624 drm_kms_helper_poll_disable(dev);
474
- disp->fini(dev);
625
+ disp->fini(dev, runtime, suspend);
475626 }
476627
477628 static void
....@@ -559,7 +710,8 @@
559710 drm_kms_helper_poll_disable(dev);
560711
561712 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
562
- ret = nvif_disp_ctor(&drm->client.device, 0, &disp->disp);
713
+ ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0,
714
+ &disp->disp);
563715 if (ret == 0) {
564716 nouveau_display_create_properties(dev);
565717 if (disp->disp.object.oclass < NV50_DISP)
....@@ -577,13 +729,16 @@
577729 drm_mode_config_reset(dev);
578730
579731 if (dev->mode_config.num_crtc) {
580
- ret = nouveau_display_vblank_init(dev);
732
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
581733 if (ret)
582734 goto vblank_err;
735
+
736
+ if (disp->disp.object.oclass >= NV50_DISP)
737
+ nv50_crc_init(dev);
583738 }
584739
585
- nouveau_backlight_init(dev);
586740 INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
741
+ mutex_init(&drm->hpd_lock);
587742 #ifdef CONFIG_ACPI
588743 drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
589744 register_acpi_notifier(&drm->acpi_nb);
....@@ -603,12 +758,11 @@
603758 nouveau_display_destroy(struct drm_device *dev)
604759 {
605760 struct nouveau_display *disp = nouveau_display(dev);
761
+ struct nouveau_drm *drm = nouveau_drm(dev);
606762
607763 #ifdef CONFIG_ACPI
608
- unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
764
+ unregister_acpi_notifier(&drm->acpi_nb);
609765 #endif
610
- nouveau_backlight_exit(dev);
611
- nouveau_display_vblank_fini(dev);
612766
613767 drm_kms_helper_poll_fini(dev);
614768 drm_mode_config_cleanup(dev);
....@@ -619,6 +773,7 @@
619773 nvif_disp_dtor(&disp->disp);
620774
621775 nouveau_drm(dev)->display = NULL;
776
+ mutex_destroy(&drm->hpd_lock);
622777 kfree(disp);
623778 }
624779
....@@ -626,7 +781,6 @@
626781 nouveau_display_suspend(struct drm_device *dev, bool runtime)
627782 {
628783 struct nouveau_display *disp = nouveau_display(dev);
629
- struct drm_crtc *crtc;
630784
631785 if (drm_drv_uses_atomic_modeset(dev)) {
632786 if (!runtime) {
....@@ -637,32 +791,9 @@
637791 return ret;
638792 }
639793 }
640
-
641
- nouveau_display_fini(dev, true, runtime);
642
- return 0;
643794 }
644795
645796 nouveau_display_fini(dev, true, runtime);
646
-
647
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
648
- struct nouveau_framebuffer *nouveau_fb;
649
-
650
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
651
- if (!nouveau_fb || !nouveau_fb->nvbo)
652
- continue;
653
-
654
- nouveau_bo_unpin(nouveau_fb->nvbo);
655
- }
656
-
657
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
658
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
659
- if (nv_crtc->cursor.nvbo) {
660
- if (nv_crtc->cursor.set_offset)
661
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
662
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
663
- }
664
- }
665
-
666797 return 0;
667798 }
668799
....@@ -670,275 +801,16 @@
670801 nouveau_display_resume(struct drm_device *dev, bool runtime)
671802 {
672803 struct nouveau_display *disp = nouveau_display(dev);
673
- struct nouveau_drm *drm = nouveau_drm(dev);
674
- struct drm_crtc *crtc;
675
- int ret;
804
+
805
+ nouveau_display_init(dev, true, runtime);
676806
677807 if (drm_drv_uses_atomic_modeset(dev)) {
678
- nouveau_display_init(dev);
679808 if (disp->suspend) {
680809 drm_atomic_helper_resume(dev, disp->suspend);
681810 disp->suspend = NULL;
682811 }
683812 return;
684813 }
685
-
686
- /* re-pin fb/cursors */
687
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
688
- struct nouveau_framebuffer *nouveau_fb;
689
-
690
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
691
- if (!nouveau_fb || !nouveau_fb->nvbo)
692
- continue;
693
-
694
- ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
695
- if (ret)
696
- NV_ERROR(drm, "Could not pin framebuffer\n");
697
- }
698
-
699
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
700
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
701
- if (!nv_crtc->cursor.nvbo)
702
- continue;
703
-
704
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
705
- if (!ret && nv_crtc->cursor.set_offset)
706
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
707
- if (ret)
708
- NV_ERROR(drm, "Could not pin/map cursor.\n");
709
- }
710
-
711
- nouveau_display_init(dev);
712
-
713
- /* Force CLUT to get re-loaded during modeset */
714
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
715
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
716
-
717
- nv_crtc->lut.depth = 0;
718
- }
719
-
720
- /* This should ensure we don't hit a locking problem when someone
721
- * wakes us up via a connector. We should never go into suspend
722
- * while the display is on anyways.
723
- */
724
- if (runtime)
725
- return;
726
-
727
- drm_helper_resume_force_mode(dev);
728
-
729
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
730
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
731
-
732
- if (!nv_crtc->cursor.nvbo)
733
- continue;
734
-
735
- if (nv_crtc->cursor.set_offset)
736
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
737
- nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
738
- nv_crtc->cursor_saved_y);
739
- }
740
-}
741
-
742
-static int
743
-nouveau_page_flip_emit(struct nouveau_channel *chan,
744
- struct nouveau_bo *old_bo,
745
- struct nouveau_bo *new_bo,
746
- struct nouveau_page_flip_state *s,
747
- struct nouveau_fence **pfence)
748
-{
749
- struct nouveau_fence_chan *fctx = chan->fence;
750
- struct nouveau_drm *drm = chan->drm;
751
- struct drm_device *dev = drm->dev;
752
- unsigned long flags;
753
- int ret;
754
-
755
- /* Queue it to the pending list */
756
- spin_lock_irqsave(&dev->event_lock, flags);
757
- list_add_tail(&s->head, &fctx->flip);
758
- spin_unlock_irqrestore(&dev->event_lock, flags);
759
-
760
- /* Synchronize with the old framebuffer */
761
- ret = nouveau_fence_sync(old_bo, chan, false, false);
762
- if (ret)
763
- goto fail;
764
-
765
- /* Emit the pageflip */
766
- ret = RING_SPACE(chan, 2);
767
- if (ret)
768
- goto fail;
769
-
770
- BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
771
- OUT_RING (chan, 0x00000000);
772
- FIRE_RING (chan);
773
-
774
- ret = nouveau_fence_new(chan, false, pfence);
775
- if (ret)
776
- goto fail;
777
-
778
- return 0;
779
-fail:
780
- spin_lock_irqsave(&dev->event_lock, flags);
781
- list_del(&s->head);
782
- spin_unlock_irqrestore(&dev->event_lock, flags);
783
- return ret;
784
-}
785
-
786
-int
787
-nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
788
- struct drm_pending_vblank_event *event, u32 flags,
789
- struct drm_modeset_acquire_ctx *ctx)
790
-{
791
- const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
792
- struct drm_device *dev = crtc->dev;
793
- struct nouveau_drm *drm = nouveau_drm(dev);
794
- struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
795
- struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
796
- struct nouveau_page_flip_state *s;
797
- struct nouveau_channel *chan;
798
- struct nouveau_cli *cli;
799
- struct nouveau_fence *fence;
800
- struct nv04_display *dispnv04 = nv04_display(dev);
801
- int head = nouveau_crtc(crtc)->index;
802
- int ret;
803
-
804
- chan = drm->channel;
805
- if (!chan)
806
- return -ENODEV;
807
- cli = (void *)chan->user.client;
808
-
809
- s = kzalloc(sizeof(*s), GFP_KERNEL);
810
- if (!s)
811
- return -ENOMEM;
812
-
813
- if (new_bo != old_bo) {
814
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
815
- if (ret)
816
- goto fail_free;
817
- }
818
-
819
- mutex_lock(&cli->mutex);
820
- ret = ttm_bo_reserve(&new_bo->bo, true, false, NULL);
821
- if (ret)
822
- goto fail_unpin;
823
-
824
- /* synchronise rendering channel with the kernel's channel */
825
- ret = nouveau_fence_sync(new_bo, chan, false, true);
826
- if (ret) {
827
- ttm_bo_unreserve(&new_bo->bo);
828
- goto fail_unpin;
829
- }
830
-
831
- if (new_bo != old_bo) {
832
- ttm_bo_unreserve(&new_bo->bo);
833
-
834
- ret = ttm_bo_reserve(&old_bo->bo, true, false, NULL);
835
- if (ret)
836
- goto fail_unpin;
837
- }
838
-
839
- /* Initialize a page flip struct */
840
- *s = (struct nouveau_page_flip_state)
841
- { { }, event, crtc, fb->format->cpp[0] * 8, fb->pitches[0],
842
- new_bo->bo.offset };
843
-
844
- /* Keep vblanks on during flip, for the target crtc of this flip */
845
- drm_crtc_vblank_get(crtc);
846
-
847
- /* Emit a page flip */
848
- if (swap_interval) {
849
- ret = RING_SPACE(chan, 8);
850
- if (ret)
851
- goto fail_unreserve;
852
-
853
- BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
854
- OUT_RING (chan, 0);
855
- BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
856
- OUT_RING (chan, head);
857
- BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
858
- OUT_RING (chan, 0);
859
- BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
860
- OUT_RING (chan, 0);
861
- }
862
-
863
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
864
-
865
- ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
866
- if (ret)
867
- goto fail_unreserve;
868
- mutex_unlock(&cli->mutex);
869
-
870
- /* Update the crtc struct and cleanup */
871
- crtc->primary->fb = fb;
872
-
873
- nouveau_bo_fence(old_bo, fence, false);
874
- ttm_bo_unreserve(&old_bo->bo);
875
- if (old_bo != new_bo)
876
- nouveau_bo_unpin(old_bo);
877
- nouveau_fence_unref(&fence);
878
- return 0;
879
-
880
-fail_unreserve:
881
- drm_crtc_vblank_put(crtc);
882
- ttm_bo_unreserve(&old_bo->bo);
883
-fail_unpin:
884
- mutex_unlock(&cli->mutex);
885
- if (old_bo != new_bo)
886
- nouveau_bo_unpin(new_bo);
887
-fail_free:
888
- kfree(s);
889
- return ret;
890
-}
891
-
892
-int
893
-nouveau_finish_page_flip(struct nouveau_channel *chan,
894
- struct nouveau_page_flip_state *ps)
895
-{
896
- struct nouveau_fence_chan *fctx = chan->fence;
897
- struct nouveau_drm *drm = chan->drm;
898
- struct drm_device *dev = drm->dev;
899
- struct nouveau_page_flip_state *s;
900
- unsigned long flags;
901
-
902
- spin_lock_irqsave(&dev->event_lock, flags);
903
-
904
- if (list_empty(&fctx->flip)) {
905
- NV_ERROR(drm, "unexpected pageflip\n");
906
- spin_unlock_irqrestore(&dev->event_lock, flags);
907
- return -EINVAL;
908
- }
909
-
910
- s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
911
- if (s->event) {
912
- drm_crtc_arm_vblank_event(s->crtc, s->event);
913
- } else {
914
- /* Give up ownership of vblank for page-flipped crtc */
915
- drm_crtc_vblank_put(s->crtc);
916
- }
917
-
918
- list_del(&s->head);
919
- if (ps)
920
- *ps = *s;
921
- kfree(s);
922
-
923
- spin_unlock_irqrestore(&dev->event_lock, flags);
924
- return 0;
925
-}
926
-
927
-int
928
-nouveau_flip_complete(struct nvif_notify *notify)
929
-{
930
- struct nouveau_drm *drm = container_of(notify, typeof(*drm), flip);
931
- struct nouveau_channel *chan = drm->channel;
932
- struct nouveau_page_flip_state state;
933
-
934
- if (!nouveau_finish_page_flip(chan, &state)) {
935
- nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
936
- state.offset + state.crtc->y *
937
- state.pitch + state.crtc->x *
938
- state.bpp / 8);
939
- }
940
-
941
- return NVIF_NOTIFY_KEEP;
942814 }
943815
944816 int
....@@ -964,8 +836,8 @@
964836 if (ret)
965837 return ret;
966838
967
- ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
968
- drm_gem_object_put_unlocked(&bo->gem);
839
+ ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
840
+ drm_gem_object_put(&bo->bo.base);
969841 return ret;
970842 }
971843
....@@ -979,8 +851,8 @@
979851 gem = drm_gem_object_lookup(file_priv, handle);
980852 if (gem) {
981853 struct nouveau_bo *bo = nouveau_gem_object(gem);
982
- *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
983
- drm_gem_object_put_unlocked(gem);
854
+ *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
855
+ drm_gem_object_put(gem);
984856 return 0;
985857 }
986858