From 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 13 May 2024 10:30:14 +0000
Subject: [PATCH] modify sin led gpio

---
 kernel/drivers/gpu/drm/i915/i915_sysfs.c |  291 ++++++++++++++++++++++++++++-----------------------------
 1 files changed, 143 insertions(+), 148 deletions(-)

diff --git a/kernel/drivers/gpu/drm/i915/i915_sysfs.c b/kernel/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6b..ac40a95 100644
--- a/kernel/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/kernel/drivers/gpu/drm/i915/i915_sysfs.c
@@ -29,8 +29,15 @@
 #include <linux/module.h>
 #include <linux/stat.h>
 #include <linux/sysfs.h>
-#include "intel_drv.h"
+
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+#include "gt/sysfs_engines.h"
+
 #include "i915_drv.h"
+#include "i915_sysfs.h"
+#include "intel_pm.h"
+#include "intel_sideband.h"
 
 static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
 {
@@ -42,11 +49,11 @@
 static u32 calc_residency(struct drm_i915_private *dev_priv,
 			  i915_reg_t reg)
 {
-	u64 res;
+	intel_wakeref_t wakeref;
+	u64 res = 0;
 
-	intel_runtime_pm_get(dev_priv);
-	res = intel_rc6_residency_us(dev_priv, reg);
-	intel_runtime_pm_put(dev_priv);
+	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+		res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
 
 	return DIV_ROUND_CLOSEST_ULL(res, 1000);
 }
@@ -139,12 +146,12 @@
 };
 #endif
 
-static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
 {
-	if (!HAS_L3_DPF(dev_priv))
+	if (!HAS_L3_DPF(i915))
 		return -EPERM;
 
-	if (offset % 4 != 0)
+	if (!IS_ALIGNED(offset, sizeof(u32)))
 		return -EINVAL;
 
 	if (offset >= GEN7_L3LOG_SIZE)
@@ -159,31 +166,24 @@
 	     loff_t offset, size_t count)
 {
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
 	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
-	count = round_down(count, 4);
-
-	ret = l3_access_valid(dev_priv, offset);
+	ret = l3_access_valid(i915, offset);
 	if (ret)
 		return ret;
 
+	count = round_down(count, sizeof(u32));
 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+	memset(buf, 0, count);
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
-	if (dev_priv->l3_parity.remap_info[slice])
+	spin_lock(&i915->gem.contexts.lock);
+	if (i915->l3_parity.remap_info[slice])
 		memcpy(buf,
-		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
+		       i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
 		       count);
-	else
-		memset(buf, 0, count);
-
-	mutex_unlock(&dev->struct_mutex);
+	spin_unlock(&i915->gem.contexts.lock);
 
 	return count;
 }
@@ -194,46 +194,49 @@
 	      loff_t offset, size_t count)
 {
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_device *dev = &dev_priv->drm;
-	struct i915_gem_context *ctx;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
 	int slice = (int)(uintptr_t)attr->private;
-	u32 **remap_info;
+	u32 *remap_info, *freeme = NULL;
+	struct i915_gem_context *ctx;
 	int ret;
 
-	ret = l3_access_valid(dev_priv, offset);
+	ret = l3_access_valid(i915, offset);
 	if (ret)
 		return ret;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
+	if (count < sizeof(u32))
+		return -EINVAL;
 
-	remap_info = &dev_priv->l3_parity.remap_info[slice];
-	if (!*remap_info) {
-		*remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
-		if (!*remap_info) {
-			ret = -ENOMEM;
-			goto out;
-		}
+	remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+	if (!remap_info)
+		return -ENOMEM;
+
+	spin_lock(&i915->gem.contexts.lock);
+
+	if (i915->l3_parity.remap_info[slice]) {
+		freeme = remap_info;
+		remap_info = i915->l3_parity.remap_info[slice];
+	} else {
+		i915->l3_parity.remap_info[slice] = remap_info;
 	}
 
-	/* TODO: Ideally we really want a GPU reset here to make sure errors
+	count = round_down(count, sizeof(u32));
+	memcpy(remap_info + offset / sizeof(u32), buf, count);
+
+	/* NB: We defer the remapping until we switch to the context */
+	list_for_each_entry(ctx, &i915->gem.contexts.list, link)
+		ctx->remap_slice |= BIT(slice);
+
+	spin_unlock(&i915->gem.contexts.lock);
+	kfree(freeme);
+
+	/*
+	 * TODO: Ideally we really want a GPU reset here to make sure errors
 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
 	 * at this point it is left as a TODO.
 	*/
-	memcpy(*remap_info + (offset/4), buf, count);
 
-	/* NB: We defer the remapping until we switch to the context */
-	list_for_each_entry(ctx, &dev_priv->contexts.list, link)
-		ctx->remap_slice |= (1<<slice);
-
-	ret = count;
-
-out:
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
+	return count;
 }
 
 static const struct bin_attribute dpf_attrs = {
@@ -257,45 +260,30 @@
 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	int ret;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt.rps;
 
-	intel_runtime_pm_get(dev_priv);
-
-	mutex_lock(&dev_priv->pcu_lock);
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		u32 freq;
-		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-		ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
-	} else {
-		ret = intel_gpu_freq(dev_priv,
-				     intel_get_cagf(dev_priv,
-						    I915_READ(GEN6_RPSTAT1)));
-	}
-	mutex_unlock(&dev_priv->pcu_lock);
-
-	intel_runtime_pm_put(dev_priv);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			intel_rps_read_actual_frequency(rps));
 }
 
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.cur_freq));
+			intel_gpu_freq(rps, rps->cur_freq));
 }
 
 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &i915->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.boost_freq));
+			intel_gpu_freq(rps, rps->boost_freq));
 }
 
 static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -303,7 +291,7 @@
 				       const char *buf, size_t count)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	bool boost = false;
 	ssize_t ret;
 	u32 val;
@@ -313,16 +301,16 @@
 		return ret;
 
 	/* Validate against (static) hardware limits */
-	val = intel_freq_opcode(dev_priv, val);
+	val = intel_freq_opcode(rps, val);
 	if (val < rps->min_freq || val > rps->max_freq)
 		return -EINVAL;
 
-	mutex_lock(&dev_priv->pcu_lock);
+	mutex_lock(&rps->lock);
 	if (val != rps->boost_freq) {
 		rps->boost_freq = val;
 		boost = atomic_read(&rps->num_waiters);
 	}
-	mutex_unlock(&dev_priv->pcu_lock);
+	mutex_unlock(&rps->lock);
 	if (boost)
 		schedule_work(&rps->work);
 
@@ -333,19 +321,19 @@
 				     struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.efficient_freq));
+			intel_gpu_freq(rps, rps->efficient_freq));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.max_freq_softlimit));
+			intel_gpu_freq(rps, rps->max_freq_softlimit));
 }
 
 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -353,31 +341,27 @@
 				     const char *buf, size_t count)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 val;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	ssize_t ret;
+	u32 val;
 
 	ret = kstrtou32(buf, 0, &val);
 	if (ret)
 		return ret;
 
-	intel_runtime_pm_get(dev_priv);
+	mutex_lock(&rps->lock);
 
-	mutex_lock(&dev_priv->pcu_lock);
-
-	val = intel_freq_opcode(dev_priv, val);
-
+	val = intel_freq_opcode(rps, val);
 	if (val < rps->min_freq ||
 	    val > rps->max_freq ||
 	    val < rps->min_freq_softlimit) {
-		mutex_unlock(&dev_priv->pcu_lock);
-		intel_runtime_pm_put(dev_priv);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unlock;
 	}
 
 	if (val > rps->rp0_freq)
 		DRM_DEBUG("User requested overclocking to %d\n",
-			  intel_gpu_freq(dev_priv, val));
+			  intel_gpu_freq(rps, val));
 
 	rps->max_freq_softlimit = val;
 
@@ -385,14 +369,15 @@
 		      rps->min_freq_softlimit,
 		      rps->max_freq_softlimit);
 
-	/* We still need *_set_rps to process the new max_delay and
+	/*
+	 * We still need *_set_rps to process the new max_delay and
 	 * update the interrupt limits and PMINTRMSK even though
-	 * frequency request may be unchanged. */
-	ret = intel_set_rps(dev_priv, val);
+	 * frequency request may be unchanged.
+	 */
+	intel_rps_set(rps, val);
 
-	mutex_unlock(&dev_priv->pcu_lock);
-
-	intel_runtime_pm_put(dev_priv);
+unlock:
+	mutex_unlock(&rps->lock);
 
 	return ret ?: count;
 }
@@ -400,10 +385,10 @@
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.min_freq_softlimit));
+			intel_gpu_freq(rps, rps->min_freq_softlimit));
 }
 
 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -411,26 +396,22 @@
 				     const char *buf, size_t count)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 val;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	ssize_t ret;
+	u32 val;
 
 	ret = kstrtou32(buf, 0, &val);
 	if (ret)
 		return ret;
 
-	intel_runtime_pm_get(dev_priv);
+	mutex_lock(&rps->lock);
 
-	mutex_lock(&dev_priv->pcu_lock);
-
-	val = intel_freq_opcode(dev_priv, val);
-
+	val = intel_freq_opcode(rps, val);
 	if (val < rps->min_freq ||
 	    val > rps->max_freq ||
 	    val > rps->max_freq_softlimit) {
-		mutex_unlock(&dev_priv->pcu_lock);
-		intel_runtime_pm_put(dev_priv);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unlock;
 	}
 
 	rps->min_freq_softlimit = val;
@@ -439,14 +420,15 @@
 		      rps->min_freq_softlimit,
 		      rps->max_freq_softlimit);
 
-	/* We still need *_set_rps to process the new min_delay and
+	/*
+	 * We still need *_set_rps to process the new min_delay and
 	 * update the interrupt limits and PMINTRMSK even though
-	 * frequency request may be unchanged. */
-	ret = intel_set_rps(dev_priv, val);
+	 * frequency request may be unchanged.
+	 */
+	intel_rps_set(rps, val);
 
-	mutex_unlock(&dev_priv->pcu_lock);
-
-	intel_runtime_pm_put(dev_priv);
+unlock:
+	mutex_unlock(&rps->lock);
 
 	return ret ?: count;
 }
@@ -468,22 +450,22 @@
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	u32 val;
 
 	if (attr == &dev_attr_gt_RP0_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->rp0_freq);
+		val = intel_gpu_freq(rps, rps->rp0_freq);
 	else if (attr == &dev_attr_gt_RP1_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->rp1_freq);
+		val = intel_gpu_freq(rps, rps->rp1_freq);
 	else if (attr == &dev_attr_gt_RPn_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->min_freq);
+		val = intel_gpu_freq(rps, rps->min_freq);
 	else
 		BUG();
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 
-static const struct attribute *gen6_attrs[] = {
+static const struct attribute * const gen6_attrs[] = {
 	&dev_attr_gt_act_freq_mhz.attr,
 	&dev_attr_gt_cur_freq_mhz.attr,
 	&dev_attr_gt_boost_freq_mhz.attr,
@@ -495,7 +477,7 @@
 	NULL,
 };
 
-static const struct attribute *vlv_attrs[] = {
+static const struct attribute * const vlv_attrs[] = {
 	&dev_attr_gt_act_freq_mhz.attr,
 	&dev_attr_gt_cur_freq_mhz.attr,
 	&dev_attr_gt_boost_freq_mhz.attr,
@@ -516,26 +498,32 @@
 {
 
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_i915_error_state_buf error_str;
-	struct i915_gpu_state *gpu;
-	ssize_t ret;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+	struct i915_gpu_coredump *gpu;
+	ssize_t ret = 0;
 
-	ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
-	if (ret)
-		return ret;
+	/*
+	 * FIXME: Concurrent clients triggering resets and reading + clearing
+	 * dumps can cause inconsistent sysfs reads when a user calls in with a
+	 * non-zero offset to complete a prior partial read but the
+	 * gpu_coredump has been cleared or replaced.
+	 */
 
-	gpu = i915_first_error_state(dev_priv);
-	ret = i915_error_state_to_str(&error_str, gpu);
-	if (ret)
-		goto out;
+	gpu = i915_first_error_state(i915);
+	if (IS_ERR(gpu)) {
+		ret = PTR_ERR(gpu);
+	} else if (gpu) {
+		ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+		i915_gpu_coredump_put(gpu);
+	} else {
+		const char *str = "No error state collected\n";
+		size_t len = strlen(str);
 
-	ret = count < error_str.bytes ? count : error_str.bytes;
-	memcpy(buf, error_str.buf, ret);
-
-out:
-	i915_gpu_state_put(gpu);
-	i915_error_state_buf_release(&error_str);
+		if (off < len) {
+			ret = min_t(size_t, count, len - off);
+			memcpy(buf, str + off, ret);
+		}
+	}
 
 	return ret;
 }
@@ -547,7 +535,7 @@
 	struct device *kdev = kobj_to_dev(kobj);
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-	DRM_DEBUG_DRIVER("Resetting error state\n");
+	drm_dbg(&dev_priv->drm, "Resetting error state\n");
 	i915_reset_error_state(dev_priv);
 
 	return count;
@@ -586,31 +574,36 @@
 		ret = sysfs_merge_group(&kdev->kobj,
 					&rc6_attr_group);
 		if (ret)
-			DRM_ERROR("RC6 residency sysfs setup failed\n");
+			drm_err(&dev_priv->drm,
+				"RC6 residency sysfs setup failed\n");
 	}
 	if (HAS_RC6p(dev_priv)) {
 		ret = sysfs_merge_group(&kdev->kobj,
 					&rc6p_attr_group);
 		if (ret)
-			DRM_ERROR("RC6p residency sysfs setup failed\n");
+			drm_err(&dev_priv->drm,
+				"RC6p residency sysfs setup failed\n");
 	}
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		ret = sysfs_merge_group(&kdev->kobj,
 					&media_rc6_attr_group);
 		if (ret)
-			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+			drm_err(&dev_priv->drm,
+				"Media RC6 residency sysfs setup failed\n");
 	}
 #endif
 	if (HAS_L3_DPF(dev_priv)) {
 		ret = device_create_bin_file(kdev, &dpf_attrs);
 		if (ret)
-			DRM_ERROR("l3 parity sysfs setup failed\n");
+			drm_err(&dev_priv->drm,
+				"l3 parity sysfs setup failed\n");
 
 		if (NUM_L3_SLICES(dev_priv) > 1) {
 			ret = device_create_bin_file(kdev,
 						     &dpf_attrs_1);
 			if (ret)
-				DRM_ERROR("l3 parity slice 1 setup failed\n");
+				drm_err(&dev_priv->drm,
+					"l3 parity slice 1 setup failed\n");
 		}
 	}
 
@@ -620,9 +613,11 @@
 	else if (INTEL_GEN(dev_priv) >= 6)
 		ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
 	if (ret)
-		DRM_ERROR("RPS sysfs setup failed\n");
+		drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
 
 	i915_setup_error_capture(kdev);
+
+	intel_engines_add_sysfs(dev_priv);
 }
 
 void i915_teardown_sysfs(struct drm_i915_private *dev_priv)

--
Gitblit v1.6.2