hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/drivers/gpu/drm/ttm/ttm_execbuf_util.c
....@@ -39,17 +39,7 @@
3939 list_for_each_entry_continue_reverse(entry, list, head) {
4040 struct ttm_buffer_object *bo = entry->bo;
4141
42
- reservation_object_unlock(bo->resv);
43
- }
44
-}
45
-
46
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
47
-{
48
- struct ttm_validate_buffer *entry;
49
-
50
- list_for_each_entry(entry, list, head) {
51
- struct ttm_buffer_object *bo = entry->bo;
52
- ttm_bo_del_from_lru(bo);
42
+ dma_resv_unlock(bo->base.resv);
5343 }
5444 }
5545
....@@ -57,22 +47,18 @@
5747 struct list_head *list)
5848 {
5949 struct ttm_validate_buffer *entry;
60
- struct ttm_bo_global *glob;
6150
6251 if (list_empty(list))
6352 return;
6453
65
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
66
- glob = entry->bo->bdev->glob;
67
-
68
- spin_lock(&glob->lru_lock);
54
+ spin_lock(&ttm_bo_glob.lru_lock);
6955 list_for_each_entry(entry, list, head) {
7056 struct ttm_buffer_object *bo = entry->bo;
7157
72
- ttm_bo_add_to_lru(bo);
73
- reservation_object_unlock(bo->resv);
58
+ ttm_bo_move_to_lru_tail(bo, NULL);
59
+ dma_resv_unlock(bo->base.resv);
7460 }
75
- spin_unlock(&glob->lru_lock);
61
+ spin_unlock(&ttm_bo_glob.lru_lock);
7662
7763 if (ticket)
7864 ww_acquire_fini(ticket);
....@@ -95,15 +81,11 @@
9581 struct list_head *list, bool intr,
9682 struct list_head *dups)
9783 {
98
- struct ttm_bo_global *glob;
9984 struct ttm_validate_buffer *entry;
10085 int ret;
10186
10287 if (list_empty(list))
10388 return 0;
104
-
105
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
106
- glob = entry->bo->bdev->glob;
10789
10890 if (ticket)
10991 ww_acquire_init(ticket, &reservation_ww_class);
....@@ -111,13 +93,8 @@
11193 list_for_each_entry(entry, list, head) {
11294 struct ttm_buffer_object *bo = entry->bo;
11395
114
- ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
115
- if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
116
- reservation_object_unlock(bo->resv);
117
-
118
- ret = -EBUSY;
119
-
120
- } else if (ret == -EALREADY && dups) {
96
+ ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
97
+ if (ret == -EALREADY && dups) {
12198 struct ttm_validate_buffer *safe = entry;
12299 entry = list_prev_entry(entry, head);
123100 list_del(&safe->head);
....@@ -126,10 +103,11 @@
126103 }
127104
128105 if (!ret) {
129
- if (!entry->shared)
106
+ if (!entry->num_shared)
130107 continue;
131108
132
- ret = reservation_object_reserve_shared(bo->resv);
109
+ ret = dma_resv_reserve_shared(bo->base.resv,
110
+ entry->num_shared);
133111 if (!ret)
134112 continue;
135113 }
....@@ -141,21 +119,14 @@
141119 ttm_eu_backoff_reservation_reverse(list, entry);
142120
143121 if (ret == -EDEADLK) {
144
- if (intr) {
145
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
146
- ticket);
147
- } else {
148
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
149
- ret = 0;
150
- }
122
+ ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
151123 }
152124
153
- if (!ret && entry->shared)
154
- ret = reservation_object_reserve_shared(bo->resv);
125
+ if (!ret && entry->num_shared)
126
+ ret = dma_resv_reserve_shared(bo->base.resv,
127
+ entry->num_shared);
155128
156129 if (unlikely(ret != 0)) {
157
- if (ret == -EINTR)
158
- ret = -ERESTARTSYS;
159130 if (ticket) {
160131 ww_acquire_done(ticket);
161132 ww_acquire_fini(ticket);
....@@ -170,11 +141,6 @@
170141 list_add(&entry->head, list);
171142 }
172143
173
- if (ticket)
174
- ww_acquire_done(ticket);
175
- spin_lock(&glob->lru_lock);
176
- ttm_eu_del_from_lru_locked(list);
177
- spin_unlock(&glob->lru_lock);
178144 return 0;
179145 }
180146 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
....@@ -184,31 +150,22 @@
184150 struct dma_fence *fence)
185151 {
186152 struct ttm_validate_buffer *entry;
187
- struct ttm_buffer_object *bo;
188
- struct ttm_bo_global *glob;
189
- struct ttm_bo_device *bdev;
190
- struct ttm_bo_driver *driver;
191153
192154 if (list_empty(list))
193155 return;
194156
195
- bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
196
- bdev = bo->bdev;
197
- driver = bdev->driver;
198
- glob = bo->bdev->glob;
199
-
200
- spin_lock(&glob->lru_lock);
201
-
157
+ spin_lock(&ttm_bo_glob.lru_lock);
202158 list_for_each_entry(entry, list, head) {
203
- bo = entry->bo;
204
- if (entry->shared)
205
- reservation_object_add_shared_fence(bo->resv, fence);
159
+ struct ttm_buffer_object *bo = entry->bo;
160
+
161
+ if (entry->num_shared)
162
+ dma_resv_add_shared_fence(bo->base.resv, fence);
206163 else
207
- reservation_object_add_excl_fence(bo->resv, fence);
208
- ttm_bo_add_to_lru(bo);
209
- reservation_object_unlock(bo->resv);
164
+ dma_resv_add_excl_fence(bo->base.resv, fence);
165
+ ttm_bo_move_to_lru_tail(bo, NULL);
166
+ dma_resv_unlock(bo->base.resv);
210167 }
211
- spin_unlock(&glob->lru_lock);
168
+ spin_unlock(&ttm_bo_glob.lru_lock);
212169 if (ticket)
213170 ww_acquire_fini(ticket);
214171 }