forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/gpu/drm/vkms/vkms_crtc.c
....@@ -1,23 +1,54 @@
11 // SPDX-License-Identifier: GPL-2.0+
22
3
-#include "vkms_drv.h"
3
+#include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
5
-#include <drm/drm_crtc_helper.h>
5
+#include <drm/drm_probe_helper.h>
6
+#include <drm/drm_vblank.h>
7
+
8
+#include "vkms_drv.h"
69
710 static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
811 {
912 struct vkms_output *output = container_of(timer, struct vkms_output,
1013 vblank_hrtimer);
1114 struct drm_crtc *crtc = &output->crtc;
12
- int ret_overrun;
15
+ struct vkms_crtc_state *state;
16
+ u64 ret_overrun;
1317 bool ret;
1418
19
+ ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
20
+ output->period_ns);
21
+ if (ret_overrun != 1)
22
+ pr_warn("%s: vblank timer overrun\n", __func__);
23
+
24
+ spin_lock(&output->lock);
1525 ret = drm_crtc_handle_vblank(crtc);
1626 if (!ret)
1727 DRM_ERROR("vkms failure on handling vblank");
1828
19
- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
20
- output->period_ns);
29
+ state = output->composer_state;
30
+ spin_unlock(&output->lock);
31
+
32
+ if (state && output->composer_enabled) {
33
+ u64 frame = drm_crtc_accurate_vblank_count(crtc);
34
+
35
+ /* update frame_start only if a queued vkms_composer_worker()
36
+ * has read the data
37
+ */
38
+ spin_lock(&output->composer_lock);
39
+ if (!state->crc_pending)
40
+ state->frame_start = frame;
41
+ else
42
+ DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
43
+ state->frame_start, frame);
44
+ state->frame_end = frame;
45
+ state->crc_pending = true;
46
+ spin_unlock(&output->composer_lock);
47
+
48
+ ret = queue_work(output->composer_workq, &state->composer_work);
49
+ if (!ret)
50
+ DRM_DEBUG_DRIVER("Composer worker already queued\n");
51
+ }
2152
2253 return HRTIMER_RESTART;
2354 }
....@@ -46,31 +77,142 @@
4677 hrtimer_cancel(&out->vblank_hrtimer);
4778 }
4879
49
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
50
- int *max_error, ktime_t *vblank_time,
51
- bool in_vblank_irq)
80
+static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
81
+ int *max_error, ktime_t *vblank_time,
82
+ bool in_vblank_irq)
5283 {
84
+ struct drm_device *dev = crtc->dev;
85
+ unsigned int pipe = crtc->index;
5386 struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
5487 struct vkms_output *output = &vkmsdev->output;
88
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
5589
56
- *vblank_time = output->vblank_hrtimer.node.expires;
90
+ if (!READ_ONCE(vblank->enabled)) {
91
+ *vblank_time = ktime_get();
92
+ return true;
93
+ }
5794
58
- if (!in_vblank_irq)
59
- *vblank_time -= output->period_ns;
95
+ *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
96
+
97
+ if (WARN_ON(*vblank_time == vblank->time))
98
+ return true;
99
+
100
+ /*
101
+ * To prevent races we roll the hrtimer forward before we do any
102
+ * interrupt processing - this is how real hw works (the interrupt is
103
+ * only generated after all the vblank registers are updated) and what
104
+ * the vblank core expects. Therefore we need to always correct the
105
+ * timestampe by one frame.
106
+ */
107
+ *vblank_time -= output->period_ns;
60108
61109 return true;
110
+}
111
+
112
+static struct drm_crtc_state *
113
+vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
114
+{
115
+ struct vkms_crtc_state *vkms_state;
116
+
117
+ if (WARN_ON(!crtc->state))
118
+ return NULL;
119
+
120
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
121
+ if (!vkms_state)
122
+ return NULL;
123
+
124
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
125
+
126
+ INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
127
+
128
+ return &vkms_state->base;
129
+}
130
+
131
+static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
132
+ struct drm_crtc_state *state)
133
+{
134
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
135
+
136
+ __drm_atomic_helper_crtc_destroy_state(state);
137
+
138
+ WARN_ON(work_pending(&vkms_state->composer_work));
139
+ kfree(vkms_state->active_planes);
140
+ kfree(vkms_state);
141
+}
142
+
143
+static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
144
+{
145
+ struct vkms_crtc_state *vkms_state =
146
+ kzalloc(sizeof(*vkms_state), GFP_KERNEL);
147
+
148
+ if (crtc->state)
149
+ vkms_atomic_crtc_destroy_state(crtc, crtc->state);
150
+
151
+ __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
152
+ if (vkms_state)
153
+ INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
62154 }
63155
64156 static const struct drm_crtc_funcs vkms_crtc_funcs = {
65157 .set_config = drm_atomic_helper_set_config,
66158 .destroy = drm_crtc_cleanup,
67159 .page_flip = drm_atomic_helper_page_flip,
68
- .reset = drm_atomic_helper_crtc_reset,
69
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
70
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
160
+ .reset = vkms_atomic_crtc_reset,
161
+ .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
162
+ .atomic_destroy_state = vkms_atomic_crtc_destroy_state,
71163 .enable_vblank = vkms_enable_vblank,
72164 .disable_vblank = vkms_disable_vblank,
165
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
166
+ .get_crc_sources = vkms_get_crc_sources,
167
+ .set_crc_source = vkms_set_crc_source,
168
+ .verify_crc_source = vkms_verify_crc_source,
73169 };
170
+
171
+static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
172
+ struct drm_crtc_state *state)
173
+{
174
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
175
+ struct drm_plane *plane;
176
+ struct drm_plane_state *plane_state;
177
+ int i = 0, ret;
178
+
179
+ if (vkms_state->active_planes)
180
+ return 0;
181
+
182
+ ret = drm_atomic_add_affected_planes(state->state, crtc);
183
+ if (ret < 0)
184
+ return ret;
185
+
186
+ drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
187
+ plane_state = drm_atomic_get_existing_plane_state(state->state,
188
+ plane);
189
+ WARN_ON(!plane_state);
190
+
191
+ if (!plane_state->visible)
192
+ continue;
193
+
194
+ i++;
195
+ }
196
+
197
+ vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
198
+ if (!vkms_state->active_planes)
199
+ return -ENOMEM;
200
+ vkms_state->num_active_planes = i;
201
+
202
+ i = 0;
203
+ drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
204
+ plane_state = drm_atomic_get_existing_plane_state(state->state,
205
+ plane);
206
+
207
+ if (!plane_state->visible)
208
+ continue;
209
+
210
+ vkms_state->active_planes[i++] =
211
+ to_vkms_plane_state(plane_state);
212
+ }
213
+
214
+ return 0;
215
+}
74216
75217 static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
76218 struct drm_crtc_state *old_state)
....@@ -84,26 +226,43 @@
84226 drm_crtc_vblank_off(crtc);
85227 }
86228
229
+static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
230
+ struct drm_crtc_state *old_crtc_state)
231
+{
232
+ struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
233
+
234
+ /* This lock is held across the atomic commit to block vblank timer
235
+ * from scheduling vkms_composer_worker until the composer is updated
236
+ */
237
+ spin_lock_irq(&vkms_output->lock);
238
+}
239
+
87240 static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
88241 struct drm_crtc_state *old_crtc_state)
89242 {
90
- unsigned long flags;
243
+ struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
91244
92245 if (crtc->state->event) {
93
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
246
+ spin_lock(&crtc->dev->event_lock);
94247
95248 if (drm_crtc_vblank_get(crtc) != 0)
96249 drm_crtc_send_vblank_event(crtc, crtc->state->event);
97250 else
98251 drm_crtc_arm_vblank_event(crtc, crtc->state->event);
99252
100
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
253
+ spin_unlock(&crtc->dev->event_lock);
101254
102255 crtc->state->event = NULL;
103256 }
257
+
258
+ vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
259
+
260
+ spin_unlock_irq(&vkms_output->lock);
104261 }
105262
106263 static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
264
+ .atomic_check = vkms_crtc_atomic_check,
265
+ .atomic_begin = vkms_crtc_atomic_begin,
107266 .atomic_flush = vkms_crtc_atomic_flush,
108267 .atomic_enable = vkms_crtc_atomic_enable,
109268 .atomic_disable = vkms_crtc_atomic_disable,
....@@ -112,6 +271,7 @@
112271 int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
113272 struct drm_plane *primary, struct drm_plane *cursor)
114273 {
274
+ struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
115275 int ret;
116276
117277 ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
....@@ -123,5 +283,12 @@
123283
124284 drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
125285
286
+ spin_lock_init(&vkms_out->lock);
287
+ spin_lock_init(&vkms_out->composer_lock);
288
+
289
+ vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
290
+ if (!vkms_out->composer_workq)
291
+ return -ENOMEM;
292
+
126293 return ret;
127294 }