forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/qxl/qxl_object.c
....@@ -33,13 +33,14 @@
3333 struct qxl_device *qdev;
3434
3535 bo = to_qxl_bo(tbo);
36
- qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
36
+ qdev = to_qxl(bo->tbo.base.dev);
3737
3838 qxl_surface_evict(qdev, bo, false);
39
+ WARN_ON_ONCE(bo->map_count > 0);
3940 mutex_lock(&qdev->gem.mutex);
4041 list_del_init(&bo->list);
4142 mutex_unlock(&qdev->gem.mutex);
42
- drm_gem_object_release(&bo->gem_base);
43
+ drm_gem_object_release(&bo->tbo.base);
4344 kfree(bo);
4445 }
4546
....@@ -53,19 +54,34 @@
5354 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
5455 {
5556 u32 c = 0;
56
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57
- unsigned i;
57
+ u32 pflag = 0;
58
+ unsigned int i;
59
+
60
+ if (pinned)
61
+ pflag |= TTM_PL_FLAG_NO_EVICT;
62
+ if (qbo->tbo.base.size <= PAGE_SIZE)
63
+ pflag |= TTM_PL_FLAG_TOPDOWN;
5864
5965 qbo->placement.placement = qbo->placements;
6066 qbo->placement.busy_placement = qbo->placements;
61
- if (domain == QXL_GEM_DOMAIN_VRAM)
62
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
63
- if (domain == QXL_GEM_DOMAIN_SURFACE)
64
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
65
- if (domain == QXL_GEM_DOMAIN_CPU)
66
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
67
- if (!c)
68
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
67
+ if (domain == QXL_GEM_DOMAIN_VRAM) {
68
+ qbo->placements[c].mem_type = TTM_PL_VRAM;
69
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
70
+ }
71
+ if (domain == QXL_GEM_DOMAIN_SURFACE) {
72
+ qbo->placements[c].mem_type = TTM_PL_PRIV;
73
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
74
+ qbo->placements[c].mem_type = TTM_PL_VRAM;
75
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
76
+ }
77
+ if (domain == QXL_GEM_DOMAIN_CPU) {
78
+ qbo->placements[c].mem_type = TTM_PL_SYSTEM;
79
+ qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
80
+ }
81
+ if (!c) {
82
+ qbo->placements[c].mem_type = TTM_PL_SYSTEM;
83
+ qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
84
+ }
6985 qbo->placement.num_placement = c;
7086 qbo->placement.num_busy_placement = c;
7187 for (i = 0; i < c; ++i) {
....@@ -74,9 +90,21 @@
7490 }
7591 }
7692
93
+static const struct drm_gem_object_funcs qxl_object_funcs = {
94
+ .free = qxl_gem_object_free,
95
+ .open = qxl_gem_object_open,
96
+ .close = qxl_gem_object_close,
97
+ .pin = qxl_gem_prime_pin,
98
+ .unpin = qxl_gem_prime_unpin,
99
+ .get_sg_table = qxl_gem_prime_get_sg_table,
100
+ .vmap = qxl_gem_prime_vmap,
101
+ .vunmap = qxl_gem_prime_vunmap,
102
+ .mmap = drm_gem_ttm_mmap,
103
+ .print_info = drm_gem_ttm_print_info,
104
+};
77105
78
-int qxl_bo_create(struct qxl_device *qdev,
79
- unsigned long size, bool kernel, bool pinned, u32 domain,
106
+int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
107
+ bool kernel, bool pinned, u32 domain, u32 priority,
80108 struct qxl_surface *surf,
81109 struct qxl_bo **bo_ptr)
82110 {
....@@ -93,11 +121,12 @@
93121 if (bo == NULL)
94122 return -ENOMEM;
95123 size = roundup(size, PAGE_SIZE);
96
- r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size);
124
+ r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
97125 if (unlikely(r)) {
98126 kfree(bo);
99127 return r;
100128 }
129
+ bo->tbo.base.funcs = &qxl_object_funcs;
101130 bo->type = domain;
102131 bo->pin_count = pinned ? 1 : 0;
103132 bo->surface_id = 0;
....@@ -108,6 +137,7 @@
108137
109138 qxl_ttm_placement_from_domain(bo, domain, pinned);
110139
140
+ bo->tbo.priority = priority;
111141 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
112142 &bo->placement, 0, !kernel, size,
113143 NULL, NULL, &qxl_ttm_bo_destroy);
....@@ -130,6 +160,7 @@
130160 if (bo->kptr) {
131161 if (ptr)
132162 *ptr = bo->kptr;
163
+ bo->map_count++;
133164 return 0;
134165 }
135166 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
....@@ -138,13 +169,14 @@
138169 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
139170 if (ptr)
140171 *ptr = bo->kptr;
172
+ bo->map_count = 1;
141173 return 0;
142174 }
143175
144176 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
145177 struct qxl_bo *bo, int page_offset)
146178 {
147
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
179
+ unsigned long offset;
148180 void *rptr;
149181 int ret;
150182 struct io_mapping *map;
....@@ -156,11 +188,8 @@
156188 else
157189 goto fallback;
158190
159
- (void) ttm_mem_io_lock(man, false);
160
- ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
161
- ttm_mem_io_unlock(man);
162
-
163
- return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
191
+ offset = bo->tbo.mem.start << PAGE_SHIFT;
192
+ return io_mapping_map_atomic_wc(map, offset + page_offset);
164193 fallback:
165194 if (bo->kptr) {
166195 rptr = bo->kptr + (page_offset * PAGE_SIZE);
....@@ -179,6 +208,9 @@
179208 {
180209 if (bo->kptr == NULL)
181210 return;
211
+ bo->map_count--;
212
+ if (bo->map_count > 0)
213
+ return;
182214 bo->kptr = NULL;
183215 ttm_bo_kunmap(&bo->kmap);
184216 }
....@@ -186,22 +218,12 @@
186218 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
187219 struct qxl_bo *bo, void *pmap)
188220 {
189
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
190
- struct io_mapping *map;
191
-
192
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
193
- map = qdev->vram_mapping;
194
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
195
- map = qdev->surface_mapping;
196
- else
221
+ if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
222
+ (bo->tbo.mem.mem_type != TTM_PL_PRIV))
197223 goto fallback;
198224
199225 io_mapping_unmap_atomic(pmap);
200
-
201
- (void) ttm_mem_io_lock(man, false);
202
- ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
203
- ttm_mem_io_unlock(man);
204
- return ;
226
+ return;
205227 fallback:
206228 qxl_bo_kunmap(bo);
207229 }
....@@ -211,34 +233,30 @@
211233 if ((*bo) == NULL)
212234 return;
213235
214
- drm_gem_object_put_unlocked(&(*bo)->gem_base);
236
+ drm_gem_object_put(&(*bo)->tbo.base);
215237 *bo = NULL;
216238 }
217239
218240 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
219241 {
220
- drm_gem_object_get(&bo->gem_base);
242
+ drm_gem_object_get(&bo->tbo.base);
221243 return bo;
222244 }
223245
224
-static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
246
+static int __qxl_bo_pin(struct qxl_bo *bo)
225247 {
226248 struct ttm_operation_ctx ctx = { false, false };
227
- struct drm_device *ddev = bo->gem_base.dev;
249
+ struct drm_device *ddev = bo->tbo.base.dev;
228250 int r;
229251
230252 if (bo->pin_count) {
231253 bo->pin_count++;
232
- if (gpu_addr)
233
- *gpu_addr = qxl_bo_gpu_offset(bo);
234254 return 0;
235255 }
236
- qxl_ttm_placement_from_domain(bo, domain, true);
256
+ qxl_ttm_placement_from_domain(bo, bo->type, true);
237257 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
238258 if (likely(r == 0)) {
239259 bo->pin_count = 1;
240
- if (gpu_addr != NULL)
241
- *gpu_addr = qxl_bo_gpu_offset(bo);
242260 }
243261 if (unlikely(r != 0))
244262 dev_err(ddev->dev, "%p pin failed\n", bo);
....@@ -248,7 +266,7 @@
248266 static int __qxl_bo_unpin(struct qxl_bo *bo)
249267 {
250268 struct ttm_operation_ctx ctx = { false, false };
251
- struct drm_device *ddev = bo->gem_base.dev;
269
+ struct drm_device *ddev = bo->tbo.base.dev;
252270 int r, i;
253271
254272 if (!bo->pin_count) {
....@@ -266,21 +284,20 @@
266284 return r;
267285 }
268286
269
-
270287 /*
271288 * Reserve the BO before pinning the object. If the BO was reserved
272289 * beforehand, use the internal version directly __qxl_bo_pin.
273290 *
274291 */
275
-int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
292
+int qxl_bo_pin(struct qxl_bo *bo)
276293 {
277294 int r;
278295
279
- r = qxl_bo_reserve(bo, false);
296
+ r = qxl_bo_reserve(bo);
280297 if (r)
281298 return r;
282299
283
- r = __qxl_bo_pin(bo, bo->type, NULL);
300
+ r = __qxl_bo_pin(bo);
284301 qxl_bo_unreserve(bo);
285302 return r;
286303 }
....@@ -294,7 +311,7 @@
294311 {
295312 int r;
296313
297
- r = qxl_bo_reserve(bo, false);
314
+ r = qxl_bo_reserve(bo);
298315 if (r)
299316 return r;
300317
....@@ -312,13 +329,13 @@
312329 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
313330 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
314331 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
315
- &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
316
- *((unsigned long *)&bo->gem_base.refcount));
332
+ &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
333
+ *((unsigned long *)&bo->tbo.base.refcount));
317334 mutex_lock(&qdev->gem.mutex);
318335 list_del_init(&bo->list);
319336 mutex_unlock(&qdev->gem.mutex);
320337 /* this should unref the ttm bo */
321
- drm_gem_object_put_unlocked(&bo->gem_base);
338
+ drm_gem_object_put(&bo->tbo.base);
322339 }
323340 }
324341
....@@ -335,13 +352,14 @@
335352 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
336353 {
337354 int ret;
355
+
338356 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
339357 /* allocate a surface id for this surface now */
340358 ret = qxl_surface_id_alloc(qdev, bo);
341359 if (ret)
342360 return ret;
343361
344
- ret = qxl_hw_surface_alloc(qdev, bo, NULL);
362
+ ret = qxl_hw_surface_alloc(qdev, bo);
345363 if (ret)
346364 return ret;
347365 }