forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/drm_lock.c
....@@ -1,4 +1,4 @@
1
-/**
1
+/*
22 * \file drm_lock.c
33 * IOCTLs for locking
44 *
....@@ -36,13 +36,17 @@
3636 #include <linux/export.h>
3737 #include <linux/sched/signal.h>
3838
39
-#include <drm/drmP.h>
40
-#include "drm_legacy.h"
39
+#include <drm/drm.h>
40
+#include <drm/drm_drv.h>
41
+#include <drm/drm_file.h>
42
+#include <drm/drm_print.h>
43
+
4144 #include "drm_internal.h"
45
+#include "drm_legacy.h"
4246
4347 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
4448
45
-/**
49
+/*
4650 * Take the heavyweight lock.
4751 *
4852 * \param lock lock pointer.
....@@ -89,7 +93,7 @@
8993 return 0;
9094 }
9195
92
-/**
96
+/*
9397 * This takes a lock forcibly and hands it to context. Should ONLY be used
9498 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
9599 *
....@@ -146,7 +150,7 @@
146150 return 0;
147151 }
148152
149
-/**
153
+/*
150154 * Lock ioctl.
151155 *
152156 * \param inode device inode.
....@@ -166,7 +170,7 @@
166170 int ret = 0;
167171
168172 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
169
- return -EINVAL;
173
+ return -EOPNOTSUPP;
170174
171175 ++file_priv->lock_count;
172176
....@@ -239,7 +243,7 @@
239243 return 0;
240244 }
241245
242
-/**
246
+/*
243247 * Unlock ioctl.
244248 *
245249 * \param inode device inode.
....@@ -256,7 +260,7 @@
256260 struct drm_master *master = file_priv->master;
257261
258262 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
259
- return -EINVAL;
263
+ return -EOPNOTSUPP;
260264
261265 if (lock->context == DRM_KERNEL_CONTEXT) {
262266 DRM_ERROR("Process %d using kernel context %d\n",
....@@ -271,7 +275,7 @@
271275 return 0;
272276 }
273277
274
-/**
278
+/*
275279 * This function returns immediately and takes the hw lock
276280 * with the kernel context if it is free, otherwise it gets the highest priority when and if
277281 * it is eventually released.
....@@ -283,7 +287,6 @@
283287 * This should be sufficient to wait for GPU idle without
284288 * having to worry about starvation.
285289 */
286
-
287290 void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
288291 {
289292 int ret;
....@@ -327,6 +330,7 @@
327330 struct drm_file *file_priv)
328331 {
329332 struct drm_master *master = file_priv->master;
333
+
330334 return (file_priv->lock_count && master->lock.hw_lock &&
331335 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
332336 master->lock.file_priv == file_priv);
....@@ -347,3 +351,23 @@
347351 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
348352 }
349353 }
354
+
355
+void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
356
+{
357
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
358
+ return;
359
+
360
+ /*
361
+ * Since the master is disappearing, so is the
362
+ * possibility to lock.
363
+ */
364
+ mutex_lock(&dev->struct_mutex);
365
+ if (master->lock.hw_lock) {
366
+ if (dev->sigdata.lock == master->lock.hw_lock)
367
+ dev->sigdata.lock = NULL;
368
+ master->lock.hw_lock = NULL;
369
+ master->lock.file_priv = NULL;
370
+ wake_up_interruptible_all(&master->lock.lock_queue);
371
+ }
372
+ mutex_unlock(&dev->struct_mutex);
373
+}