forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/gpu/drm/i915/selftests/mock_gem_device.c
....@@ -24,72 +24,61 @@
2424
2525 #include <linux/pm_domain.h>
2626 #include <linux/pm_runtime.h>
27
+#include <linux/iommu.h>
2728
28
-#include "mock_engine.h"
29
-#include "mock_context.h"
29
+#include <drm/drm_managed.h>
30
+
31
+#include "gt/intel_gt.h"
32
+#include "gt/intel_gt_requests.h"
33
+#include "gt/mock_engine.h"
34
+#include "intel_memory_region.h"
35
+
3036 #include "mock_request.h"
3137 #include "mock_gem_device.h"
32
-#include "mock_gem_object.h"
3338 #include "mock_gtt.h"
3439 #include "mock_uncore.h"
40
+#include "mock_region.h"
41
+
42
+#include "gem/selftests/mock_context.h"
43
+#include "gem/selftests/mock_gem_object.h"
3544
3645 void mock_device_flush(struct drm_i915_private *i915)
3746 {
47
+ struct intel_gt *gt = &i915->gt;
3848 struct intel_engine_cs *engine;
3949 enum intel_engine_id id;
4050
41
- lockdep_assert_held(&i915->drm.struct_mutex);
42
-
43
- for_each_engine(engine, i915, id)
44
- mock_engine_flush(engine);
45
-
46
- i915_retire_requests(i915);
47
- GEM_BUG_ON(i915->gt.active_requests);
51
+ do {
52
+ for_each_engine(engine, gt, id)
53
+ mock_engine_flush(engine);
54
+ } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT));
4855 }
4956
5057 static void mock_device_release(struct drm_device *dev)
5158 {
5259 struct drm_i915_private *i915 = to_i915(dev);
53
- struct intel_engine_cs *engine;
54
- enum intel_engine_id id;
5560
56
- mutex_lock(&i915->drm.struct_mutex);
61
+ if (!i915->do_release)
62
+ goto out;
63
+
5764 mock_device_flush(i915);
58
- i915_gem_contexts_lost(i915);
59
- mutex_unlock(&i915->drm.struct_mutex);
65
+ intel_gt_driver_remove(&i915->gt);
6066
61
- cancel_delayed_work_sync(&i915->gt.retire_work);
62
- cancel_delayed_work_sync(&i915->gt.idle_work);
67
+ i915_gem_driver_release__contexts(i915);
68
+
6369 i915_gem_drain_workqueue(i915);
64
-
65
- mutex_lock(&i915->drm.struct_mutex);
66
- for_each_engine(engine, i915, id)
67
- mock_engine_free(engine);
68
- i915_gem_contexts_fini(i915);
69
- mutex_unlock(&i915->drm.struct_mutex);
70
-
71
- drain_workqueue(i915->wq);
7270 i915_gem_drain_freed_objects(i915);
7371
74
- mutex_lock(&i915->drm.struct_mutex);
75
- mock_fini_ggtt(i915);
76
- mutex_unlock(&i915->drm.struct_mutex);
77
- WARN_ON(!list_empty(&i915->gt.timelines));
78
-
72
+ mock_fini_ggtt(&i915->ggtt);
7973 destroy_workqueue(i915->wq);
8074
81
- kmem_cache_destroy(i915->priorities);
82
- kmem_cache_destroy(i915->dependencies);
83
- kmem_cache_destroy(i915->requests);
84
- kmem_cache_destroy(i915->vmas);
85
- kmem_cache_destroy(i915->objects);
86
-
87
- i915_gemfs_fini(i915);
75
+ intel_gt_driver_late_release(&i915->gt);
76
+ intel_memory_regions_driver_release(i915);
8877
8978 drm_mode_config_cleanup(&i915->drm);
9079
91
- drm_dev_fini(&i915->drm);
92
- put_device(&i915->drm.pdev->dev);
80
+out:
81
+ i915_params_free(&i915->params);
9382 }
9483
9584 static struct drm_driver mock_driver = {
....@@ -106,14 +95,6 @@
10695 struct pci_dev *pdev = to_pci_dev(dev);
10796
10897 kfree(pdev);
109
-}
110
-
111
-static void mock_retire_work_handler(struct work_struct *work)
112
-{
113
-}
114
-
115
-static void mock_idle_work_handler(struct work_struct *work)
116
-{
11798 }
11899
119100 static int pm_domain_resume(struct device *dev)
....@@ -135,24 +116,42 @@
135116
136117 struct drm_i915_private *mock_gem_device(void)
137118 {
119
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
120
+ static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
121
+#endif
138122 struct drm_i915_private *i915;
139123 struct pci_dev *pdev;
140
- int err;
141124
142
- pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
125
+ pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
143126 if (!pdev)
144
- goto err;
145
-
127
+ return NULL;
146128 device_initialize(&pdev->dev);
147129 pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
148130 pdev->dev.release = release_dev;
149131 dev_set_name(&pdev->dev, "mock");
150
- dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
132
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
151133
152134 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
153
- /* hack to disable iommu for the fake device; force identity mapping */
154
- pdev->dev.archdata.iommu = (void *)-1;
135
+ /* HACK to disable iommu for the fake device; force identity mapping */
136
+ pdev->dev.iommu = &fake_iommu;
155137 #endif
138
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
139
+ put_device(&pdev->dev);
140
+ return NULL;
141
+ }
142
+
143
+ i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
144
+ struct drm_i915_private, drm);
145
+ if (IS_ERR(i915)) {
146
+ pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
147
+ devres_release_group(&pdev->dev, NULL);
148
+ put_device(&pdev->dev);
149
+
150
+ return NULL;
151
+ }
152
+
153
+ pci_set_drvdata(pdev, i915);
154
+ i915->drm.pdev = pdev;
156155
157156 dev_pm_domain_set(&pdev->dev, &pm_domain);
158157 pm_runtime_enable(&pdev->dev);
....@@ -160,16 +159,10 @@
160159 if (pm_runtime_enabled(&pdev->dev))
161160 WARN_ON(pm_runtime_get_sync(&pdev->dev));
162161
163
- i915 = (struct drm_i915_private *)(pdev + 1);
164
- pci_set_drvdata(pdev, i915);
165162
166
- err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
167
- if (err) {
168
- pr_err("Failed to initialise mock GEM device: err=%d\n", err);
169
- goto put_device;
170
- }
171
- i915->drm.pdev = pdev;
172
- i915->drm.dev_private = i915;
163
+ i915_params_copy(&i915->params, &i915_modparams);
164
+
165
+ intel_runtime_pm_init_early(&i915->runtime_pm);
173166
174167 /* Using the global GTT may ask questions about KMS users, so prepare */
175168 drm_mode_config_init(&i915->drm);
....@@ -181,11 +174,15 @@
181174 I915_GTT_PAGE_SIZE_64K |
182175 I915_GTT_PAGE_SIZE_2M;
183176
184
- mock_uncore_init(i915);
185
- i915_gem_init__mm(i915);
177
+ mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
178
+ intel_memory_regions_hw_probe(i915);
186179
187
- init_waitqueue_head(&i915->gpu_error.wait_queue);
188
- init_waitqueue_head(&i915->gpu_error.reset_queue);
180
+ mock_uncore_init(&i915->uncore, i915);
181
+
182
+ i915_gem_init__mm(i915);
183
+ intel_gt_init_early(&i915->gt, i915);
184
+ atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
185
+ i915->gt.awake = -ENODEV;
189186
190187 i915->wq = alloc_ordered_workqueue("mock", 0);
191188 if (!i915->wq)
....@@ -193,79 +190,43 @@
193190
194191 mock_init_contexts(i915);
195192
196
- INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
197
- INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
193
+ mock_init_ggtt(i915, &i915->ggtt);
194
+ i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
198195
199
- i915->gt.awake = true;
196
+ mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
197
+ i915->gt.info.engine_mask = BIT(0);
200198
201
- i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
202
- if (!i915->objects)
203
- goto err_wq;
204
-
205
- i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
206
- if (!i915->vmas)
207
- goto err_objects;
208
-
209
- i915->requests = KMEM_CACHE(mock_request,
210
- SLAB_HWCACHE_ALIGN |
211
- SLAB_RECLAIM_ACCOUNT |
212
- SLAB_TYPESAFE_BY_RCU);
213
- if (!i915->requests)
214
- goto err_vmas;
215
-
216
- i915->dependencies = KMEM_CACHE(i915_dependency,
217
- SLAB_HWCACHE_ALIGN |
218
- SLAB_RECLAIM_ACCOUNT);
219
- if (!i915->dependencies)
220
- goto err_requests;
221
-
222
- i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
223
- if (!i915->priorities)
224
- goto err_dependencies;
225
-
226
- INIT_LIST_HEAD(&i915->gt.timelines);
227
- INIT_LIST_HEAD(&i915->gt.active_rings);
228
- INIT_LIST_HEAD(&i915->gt.closed_vma);
229
-
230
- mutex_lock(&i915->drm.struct_mutex);
231
-
232
- mock_init_ggtt(i915);
233
-
234
- mkwrite_device_info(i915)->ring_mask = BIT(0);
235
- i915->kernel_context = mock_context(i915, NULL);
236
- if (!i915->kernel_context)
199
+ i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
200
+ if (!i915->gt.engine[RCS0])
237201 goto err_unlock;
238202
239
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
240
- if (!i915->engine[RCS])
203
+ if (mock_engine_init(i915->gt.engine[RCS0]))
241204 goto err_context;
242205
243
- mutex_unlock(&i915->drm.struct_mutex);
206
+ __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
207
+ intel_engines_driver_register(i915);
244208
245
- WARN_ON(i915_gemfs_init(i915));
209
+ i915->do_release = true;
246210
247211 return i915;
248212
249213 err_context:
250
- i915_gem_contexts_fini(i915);
214
+ intel_gt_driver_remove(&i915->gt);
251215 err_unlock:
252
- mutex_unlock(&i915->drm.struct_mutex);
253
- kmem_cache_destroy(i915->priorities);
254
-err_dependencies:
255
- kmem_cache_destroy(i915->dependencies);
256
-err_requests:
257
- kmem_cache_destroy(i915->requests);
258
-err_vmas:
259
- kmem_cache_destroy(i915->vmas);
260
-err_objects:
261
- kmem_cache_destroy(i915->objects);
262
-err_wq:
263216 destroy_workqueue(i915->wq);
264217 err_drv:
218
+ intel_gt_driver_late_release(&i915->gt);
219
+ intel_memory_regions_driver_release(i915);
265220 drm_mode_config_cleanup(&i915->drm);
266
- drm_dev_fini(&i915->drm);
267
-put_device:
268
- put_device(&pdev->dev);
269
-err:
221
+ mock_destroy_device(i915);
222
+
270223 return NULL;
271224 }
225
+
226
+void mock_destroy_device(struct drm_i915_private *i915)
227
+{
228
+ struct device *dev = i915->drm.dev;
229
+
230
+ devres_release_group(dev, NULL);
231
+ put_device(dev);
232
+}