From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 02:45:28 +0000
Subject: [PATCH] add boot partition  size

---
 kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c |  120 ++++++++++++++++++++++++++++++++++++------------------------
 1 files changed, 72 insertions(+), 48 deletions(-)

diff --git a/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 2d6f5ec..8ea6c49 100644
--- a/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -28,7 +28,6 @@
  *    Christian König <christian.koenig@amd.com>
  */
 
-#include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
@@ -36,7 +35,6 @@
 struct amdgpu_sync_entry {
 	struct hlist_node	node;
 	struct dma_fence	*fence;
-	bool	explicit;
 };
 
 static struct kmem_cache *amdgpu_sync_slab;
@@ -130,7 +128,7 @@
  * Tries to add the fence to an existing hash entry. Returns true when an entry
  * was found, false otherwise.
  */
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
 {
 	struct amdgpu_sync_entry *e;
 
@@ -139,10 +137,6 @@
 			continue;
 
 		amdgpu_sync_keep_later(&e->fence, f);
-
-		/* Preserve eplicit flag to not loose pipe line sync */
-		e->explicit |= explicit;
-
 		return true;
 	}
 	return false;
@@ -152,28 +146,23 @@
  * amdgpu_sync_fence - remember to sync to this fence
  *
  * @sync: sync object to add fence to
- * @fence: fence to sync to
+ * @f: fence to sync to
  *
+ * Add the fence to the sync object.
  */
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct dma_fence *f, bool explicit)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
 {
 	struct amdgpu_sync_entry *e;
 
 	if (!f)
 		return 0;
-	if (amdgpu_sync_same_dev(adev, f) &&
-	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
-		amdgpu_sync_keep_later(&sync->last_vm_update, f);
 
-	if (amdgpu_sync_add_later(sync, f, explicit))
+	if (amdgpu_sync_add_later(sync, f))
 		return 0;
 
 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
 	if (!e)
 		return -ENOMEM;
-
-	e->explicit = explicit;
 
 	hash_add(sync->fences, &e->node, f->context);
 	e->fence = dma_fence_get(f);
@@ -181,22 +170,39 @@
 }
 
 /**
+ * amdgpu_sync_vm_fence - remember to sync to this VM fence
+ *
+ * @adev: amdgpu device
+ * @sync: sync object to add fence to
+ * @fence: the VM fence to add
+ *
+ * Add the fence to the sync object and remember it as VM update.
+ */
+int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
+{
+	if (!fence)
+		return 0;
+
+	amdgpu_sync_keep_later(&sync->last_vm_update, fence);
+	return amdgpu_sync_fence(sync, fence);
+}
+
+/**
  * amdgpu_sync_resv - sync to a reservation object
  *
  * @sync: sync object to add fences from reservation object to
  * @resv: reservation object with embedded fence
- * @explicit_sync: true if we should only sync to the exclusive fence
+ * @mode: how owner affects which fences we sync to
+ * @owner: owner of the planned job submission
  *
  * Sync to the fence
  */
-int amdgpu_sync_resv(struct amdgpu_device *adev,
-		     struct amdgpu_sync *sync,
-		     struct reservation_object *resv,
-		     void *owner, bool explicit_sync)
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
+		     void *owner)
 {
-	struct reservation_object_list *flist;
+	struct dma_resv_list *flist;
 	struct dma_fence *f;
-	void *fence_owner;
 	unsigned i;
 	int r = 0;
 
@@ -204,43 +210,64 @@
 		return -EINVAL;
 
 	/* always sync to the exclusive fence */
-	f = reservation_object_get_excl(resv);
-	r = amdgpu_sync_fence(adev, sync, f, false);
+	f = dma_resv_get_excl(resv);
+	r = amdgpu_sync_fence(sync, f);
 
-	flist = reservation_object_get_list(resv);
+	flist = dma_resv_get_list(resv);
 	if (!flist || r)
 		return r;
 
 	for (i = 0; i < flist->shared_count; ++i) {
+		void *fence_owner;
+
 		f = rcu_dereference_protected(flist->shared[i],
-					      reservation_object_held(resv));
+					      dma_resv_held(resv));
+
+		fence_owner = amdgpu_sync_get_owner(f);
+
+		/* Always sync to moves, no matter what */
+		if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
+			r = amdgpu_sync_fence(sync, f);
+			if (r)
+				break;
+		}
+
 		/* We only want to trigger KFD eviction fences on
 		 * evict or move jobs. Skip KFD fences otherwise.
 		 */
-		fence_owner = amdgpu_sync_get_owner(f);
 		if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
 		    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
 			continue;
 
-		if (amdgpu_sync_same_dev(adev, f)) {
-			/* VM updates are only interesting
-			 * for other VM updates and moves.
-			 */
-			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
-			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
-			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
-			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
-				continue;
+		/* Never sync to VM updates either. */
+		if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+		    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+			continue;
 
-			/* Ignore fence from the same owner and explicit one as
-			 * long as it isn't undefined.
-			 */
-			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-			    (fence_owner == owner || explicit_sync))
+		/* Ignore fences depending on the sync mode */
+		switch (mode) {
+		case AMDGPU_SYNC_ALWAYS:
+			break;
+
+		case AMDGPU_SYNC_NE_OWNER:
+			if (amdgpu_sync_same_dev(adev, f) &&
+			    fence_owner == owner)
 				continue;
+			break;
+
+		case AMDGPU_SYNC_EQ_OWNER:
+			if (amdgpu_sync_same_dev(adev, f) &&
+			    fence_owner != owner)
+				continue;
+			break;
+
+		case AMDGPU_SYNC_EXPLICIT:
+			continue;
 		}
 
-		r = amdgpu_sync_fence(adev, sync, f, false);
+		WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
+		     "Adding eviction fence to sync obj");
+		r = amdgpu_sync_fence(sync, f);
 		if (r)
 			break;
 	}
@@ -295,11 +322,10 @@
  * amdgpu_sync_get_fence - get the next fence from the sync object
  *
  * @sync: sync object to use
- * @explicit: true if the next fence is explicit
  *
  * Get and removes the next fence from the sync object not signaled yet.
  */
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 {
 	struct amdgpu_sync_entry *e;
 	struct hlist_node *tmp;
@@ -308,8 +334,6 @@
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 
 		f = e->fence;
-		if (explicit)
-			*explicit = e->explicit;
 
 		hash_del(&e->node);
 		kmem_cache_free(amdgpu_sync_slab, e);
@@ -341,7 +365,7 @@
 	hash_for_each_safe(source->fences, i, tmp, e, node) {
 		f = e->fence;
 		if (!dma_fence_is_signaled(f)) {
-			r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
+			r = amdgpu_sync_fence(clone, f);
 			if (r)
 				return r;
 		} else {

--
Gitblit v1.6.2