From f70575805708cabdedea7498aaa3f710fde4d920 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 31 Jan 2024 03:29:01 +0000
Subject: [PATCH] add lvds1024*800

---
 kernel/drivers/gpu/drm/drm_lock.c |   46 +++++++++++++++++++++++++++++++++++-----------
 1 files changed, 35 insertions(+), 11 deletions(-)

diff --git a/kernel/drivers/gpu/drm/drm_lock.c b/kernel/drivers/gpu/drm/drm_lock.c
index 96bb6ba..1efbd53 100644
--- a/kernel/drivers/gpu/drm/drm_lock.c
+++ b/kernel/drivers/gpu/drm/drm_lock.c
@@ -1,4 +1,4 @@
-/**
+/*
  * \file drm_lock.c
  * IOCTLs for locking
  *
@@ -36,13 +36,17 @@
 #include <linux/export.h>
 #include <linux/sched/signal.h>
 
-#include <drm/drmP.h>
-#include "drm_legacy.h"
+#include <drm/drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
 #include "drm_internal.h"
+#include "drm_legacy.h"
 
 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
 
-/**
+/*
  * Take the heavyweight lock.
  *
  * \param lock lock pointer.
@@ -89,7 +93,7 @@
 	return 0;
 }
 
-/**
+/*
  * This takes a lock forcibly and hands it to context.	Should ONLY be used
  * inside *_unlock to give lock to kernel before calling *_dma_schedule.
  *
@@ -146,7 +150,7 @@
 	return 0;
 }
 
-/**
+/*
  * Lock ioctl.
  *
  * \param inode device inode.
@@ -166,7 +170,7 @@
 	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EINVAL;
+		return -EOPNOTSUPP;
 
 	++file_priv->lock_count;
 
@@ -239,7 +243,7 @@
 	return 0;
 }
 
-/**
+/*
  * Unlock ioctl.
  *
  * \param inode device inode.
@@ -256,7 +260,7 @@
 	struct drm_master *master = file_priv->master;
 
 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EINVAL;
+		return -EOPNOTSUPP;
 
 	if (lock->context == DRM_KERNEL_CONTEXT) {
 		DRM_ERROR("Process %d using kernel context %d\n",
@@ -271,7 +275,7 @@
 	return 0;
 }
 
-/**
+/*
  * This function returns immediately and takes the hw lock
  * with the kernel context if it is free, otherwise it gets the highest priority when and if
  * it is eventually released.
@@ -283,7 +287,6 @@
  * This should be sufficient to wait for GPU idle without
  * having to worry about starvation.
  */
-
 void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
 {
 	int ret;
@@ -327,6 +330,7 @@
 				     struct drm_file *file_priv)
 {
 	struct drm_master *master = file_priv->master;
+
 	return (file_priv->lock_count && master->lock.hw_lock &&
 		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
 		master->lock.file_priv == file_priv);
@@ -347,3 +351,23 @@
 				     _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
 	}
 }
+
+void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
+{
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
+		return;
+
+	/*
+	 * Since the master is disappearing, so is the
+	 * possibility to lock.
+	 */
+	mutex_lock(&dev->struct_mutex);
+	if (master->lock.hw_lock) {
+		if (dev->sigdata.lock == master->lock.hw_lock)
+			dev->sigdata.lock = NULL;
+		master->lock.hw_lock = NULL;
+		master->lock.file_priv = NULL;
+		wake_up_interruptible_all(&master->lock.lock_queue);
+	}
+	mutex_unlock(&dev->struct_mutex);
+}

--
Gitblit v1.6.2