forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/gpu/drm/i915/i915_query.c
....@@ -7,15 +7,34 @@
77 #include <linux/nospec.h>
88
99 #include "i915_drv.h"
10
+#include "i915_perf.h"
1011 #include "i915_query.h"
1112 #include <uapi/drm/i915_drm.h>
13
+
14
+static int copy_query_item(void *query_hdr, size_t query_sz,
15
+ u32 total_length,
16
+ struct drm_i915_query_item *query_item)
17
+{
18
+ if (query_item->length == 0)
19
+ return total_length;
20
+
21
+ if (query_item->length < total_length)
22
+ return -EINVAL;
23
+
24
+ if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
25
+ query_sz))
26
+ return -EFAULT;
27
+
28
+ return 0;
29
+}
1230
1331 static int query_topology_info(struct drm_i915_private *dev_priv,
1432 struct drm_i915_query_item *query_item)
1533 {
16
- const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
34
+ const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
1735 struct drm_i915_query_topology_info topo;
1836 u32 slice_length, subslice_length, eu_length, total_length;
37
+ int ret;
1938
2039 if (query_item->flags != 0)
2140 return -EINVAL;
....@@ -26,30 +45,18 @@
2645 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
2746
2847 slice_length = sizeof(sseu->slice_mask);
29
- subslice_length = sseu->max_slices *
30
- DIV_ROUND_UP(sseu->max_subslices,
31
- sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
32
- eu_length = sseu->max_slices * sseu->max_subslices *
33
- DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
48
+ subslice_length = sseu->max_slices * sseu->ss_stride;
49
+ eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
50
+ total_length = sizeof(topo) + slice_length + subslice_length +
51
+ eu_length;
3452
35
- total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
36
-
37
- if (query_item->length == 0)
38
- return total_length;
39
-
40
- if (query_item->length < total_length)
41
- return -EINVAL;
42
-
43
- if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
44
- sizeof(topo)))
45
- return -EFAULT;
53
+ ret = copy_query_item(&topo, sizeof(topo), total_length,
54
+ query_item);
55
+ if (ret != 0)
56
+ return ret;
4657
4758 if (topo.flags != 0)
4859 return -EINVAL;
49
-
50
- if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr),
51
- total_length))
52
- return -EFAULT;
5360
5461 memset(&topo, 0, sizeof(topo));
5562 topo.max_slices = sseu->max_slices;
....@@ -57,25 +64,24 @@
5764 topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
5865
5966 topo.subslice_offset = slice_length;
60
- topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
67
+ topo.subslice_stride = sseu->ss_stride;
6168 topo.eu_offset = slice_length + subslice_length;
62
- topo.eu_stride =
63
- DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
69
+ topo.eu_stride = sseu->eu_stride;
6470
65
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
71
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
6672 &topo, sizeof(topo)))
6773 return -EFAULT;
6874
69
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
75
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
7076 &sseu->slice_mask, slice_length))
7177 return -EFAULT;
7278
73
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
79
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
7480 sizeof(topo) + slice_length),
7581 sseu->subslice_mask, subslice_length))
7682 return -EFAULT;
7783
78
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
84
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
7985 sizeof(topo) +
8086 slice_length + subslice_length),
8187 sseu->eu_mask, eu_length))
....@@ -84,9 +90,340 @@
8490 return total_length;
8591 }
8692
93
+static int
94
+query_engine_info(struct drm_i915_private *i915,
95
+ struct drm_i915_query_item *query_item)
96
+{
97
+ struct drm_i915_query_engine_info __user *query_ptr =
98
+ u64_to_user_ptr(query_item->data_ptr);
99
+ struct drm_i915_engine_info __user *info_ptr;
100
+ struct drm_i915_query_engine_info query;
101
+ struct drm_i915_engine_info info = { };
102
+ unsigned int num_uabi_engines = 0;
103
+ struct intel_engine_cs *engine;
104
+ int len, ret;
105
+
106
+ if (query_item->flags)
107
+ return -EINVAL;
108
+
109
+ for_each_uabi_engine(engine, i915)
110
+ num_uabi_engines++;
111
+
112
+ len = struct_size(query_ptr, engines, num_uabi_engines);
113
+
114
+ ret = copy_query_item(&query, sizeof(query), len, query_item);
115
+ if (ret != 0)
116
+ return ret;
117
+
118
+ if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
119
+ query.rsvd[2])
120
+ return -EINVAL;
121
+
122
+ info_ptr = &query_ptr->engines[0];
123
+
124
+ for_each_uabi_engine(engine, i915) {
125
+ info.engine.engine_class = engine->uabi_class;
126
+ info.engine.engine_instance = engine->uabi_instance;
127
+ info.capabilities = engine->uabi_capabilities;
128
+
129
+ if (copy_to_user(info_ptr, &info, sizeof(info)))
130
+ return -EFAULT;
131
+
132
+ query.num_engines++;
133
+ info_ptr++;
134
+ }
135
+
136
+ if (copy_to_user(query_ptr, &query, sizeof(query)))
137
+ return -EFAULT;
138
+
139
+ return len;
140
+}
141
+
142
+static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
143
+ u64 user_regs_ptr,
144
+ u32 kernel_n_regs)
145
+{
146
+ /*
147
+ * We'll just put the number of registers, and won't copy the
148
+ * register.
149
+ */
150
+ if (user_n_regs == 0)
151
+ return 0;
152
+
153
+ if (user_n_regs < kernel_n_regs)
154
+ return -EINVAL;
155
+
156
+ return 0;
157
+}
158
+
159
+static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
160
+ u32 kernel_n_regs,
161
+ u64 user_regs_ptr,
162
+ u32 *user_n_regs)
163
+{
164
+ u32 __user *p = u64_to_user_ptr(user_regs_ptr);
165
+ u32 r;
166
+
167
+ if (*user_n_regs == 0) {
168
+ *user_n_regs = kernel_n_regs;
169
+ return 0;
170
+ }
171
+
172
+ *user_n_regs = kernel_n_regs;
173
+
174
+ if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
175
+ return -EFAULT;
176
+
177
+ for (r = 0; r < kernel_n_regs; r++, p += 2) {
178
+ unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
179
+ p, Efault);
180
+ unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
181
+ }
182
+ user_write_access_end();
183
+ return 0;
184
+Efault:
185
+ user_write_access_end();
186
+ return -EFAULT;
187
+}
188
+
189
+static int query_perf_config_data(struct drm_i915_private *i915,
190
+ struct drm_i915_query_item *query_item,
191
+ bool use_uuid)
192
+{
193
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
194
+ u64_to_user_ptr(query_item->data_ptr);
195
+ struct drm_i915_perf_oa_config __user *user_config_ptr =
196
+ u64_to_user_ptr(query_item->data_ptr +
197
+ sizeof(struct drm_i915_query_perf_config));
198
+ struct drm_i915_perf_oa_config user_config;
199
+ struct i915_perf *perf = &i915->perf;
200
+ struct i915_oa_config *oa_config;
201
+ char uuid[UUID_STRING_LEN + 1];
202
+ u64 config_id;
203
+ u32 flags, total_size;
204
+ int ret;
205
+
206
+ if (!perf->i915)
207
+ return -ENODEV;
208
+
209
+ total_size =
210
+ sizeof(struct drm_i915_query_perf_config) +
211
+ sizeof(struct drm_i915_perf_oa_config);
212
+
213
+ if (query_item->length == 0)
214
+ return total_size;
215
+
216
+ if (query_item->length < total_size) {
217
+ DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
218
+ query_item->length, total_size);
219
+ return -EINVAL;
220
+ }
221
+
222
+ if (get_user(flags, &user_query_config_ptr->flags))
223
+ return -EFAULT;
224
+
225
+ if (flags != 0)
226
+ return -EINVAL;
227
+
228
+ if (use_uuid) {
229
+ struct i915_oa_config *tmp;
230
+ int id;
231
+
232
+ BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
233
+
234
+ memset(&uuid, 0, sizeof(uuid));
235
+ if (copy_from_user(uuid, user_query_config_ptr->uuid,
236
+ sizeof(user_query_config_ptr->uuid)))
237
+ return -EFAULT;
238
+
239
+ oa_config = NULL;
240
+ rcu_read_lock();
241
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
242
+ if (!strcmp(tmp->uuid, uuid)) {
243
+ oa_config = i915_oa_config_get(tmp);
244
+ break;
245
+ }
246
+ }
247
+ rcu_read_unlock();
248
+ } else {
249
+ if (get_user(config_id, &user_query_config_ptr->config))
250
+ return -EFAULT;
251
+
252
+ oa_config = i915_perf_get_oa_config(perf, config_id);
253
+ }
254
+ if (!oa_config)
255
+ return -ENOENT;
256
+
257
+ if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
258
+ ret = -EFAULT;
259
+ goto out;
260
+ }
261
+
262
+ ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
263
+ user_config.boolean_regs_ptr,
264
+ oa_config->b_counter_regs_len);
265
+ if (ret)
266
+ goto out;
267
+
268
+ ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
269
+ user_config.flex_regs_ptr,
270
+ oa_config->flex_regs_len);
271
+ if (ret)
272
+ goto out;
273
+
274
+ ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
275
+ user_config.mux_regs_ptr,
276
+ oa_config->mux_regs_len);
277
+ if (ret)
278
+ goto out;
279
+
280
+ ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
281
+ oa_config->b_counter_regs_len,
282
+ user_config.boolean_regs_ptr,
283
+ &user_config.n_boolean_regs);
284
+ if (ret)
285
+ goto out;
286
+
287
+ ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
288
+ oa_config->flex_regs_len,
289
+ user_config.flex_regs_ptr,
290
+ &user_config.n_flex_regs);
291
+ if (ret)
292
+ goto out;
293
+
294
+ ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
295
+ oa_config->mux_regs_len,
296
+ user_config.mux_regs_ptr,
297
+ &user_config.n_mux_regs);
298
+ if (ret)
299
+ goto out;
300
+
301
+ memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
302
+
303
+ if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
304
+ ret = -EFAULT;
305
+ goto out;
306
+ }
307
+
308
+ ret = total_size;
309
+
310
+out:
311
+ i915_oa_config_put(oa_config);
312
+ return ret;
313
+}
314
+
315
+static size_t sizeof_perf_config_list(size_t count)
316
+{
317
+ return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
318
+}
319
+
320
+static size_t sizeof_perf_metrics(struct i915_perf *perf)
321
+{
322
+ struct i915_oa_config *tmp;
323
+ size_t i;
324
+ int id;
325
+
326
+ i = 1;
327
+ rcu_read_lock();
328
+ idr_for_each_entry(&perf->metrics_idr, tmp, id)
329
+ i++;
330
+ rcu_read_unlock();
331
+
332
+ return sizeof_perf_config_list(i);
333
+}
334
+
335
+static int query_perf_config_list(struct drm_i915_private *i915,
336
+ struct drm_i915_query_item *query_item)
337
+{
338
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
339
+ u64_to_user_ptr(query_item->data_ptr);
340
+ struct i915_perf *perf = &i915->perf;
341
+ u64 *oa_config_ids = NULL;
342
+ int alloc, n_configs;
343
+ u32 flags;
344
+ int ret;
345
+
346
+ if (!perf->i915)
347
+ return -ENODEV;
348
+
349
+ if (query_item->length == 0)
350
+ return sizeof_perf_metrics(perf);
351
+
352
+ if (get_user(flags, &user_query_config_ptr->flags))
353
+ return -EFAULT;
354
+
355
+ if (flags != 0)
356
+ return -EINVAL;
357
+
358
+ n_configs = 1;
359
+ do {
360
+ struct i915_oa_config *tmp;
361
+ u64 *ids;
362
+ int id;
363
+
364
+ ids = krealloc(oa_config_ids,
365
+ n_configs * sizeof(*oa_config_ids),
366
+ GFP_KERNEL);
367
+ if (!ids)
368
+ return -ENOMEM;
369
+
370
+ alloc = fetch_and_zero(&n_configs);
371
+
372
+ ids[n_configs++] = 1ull; /* reserved for test_config */
373
+ rcu_read_lock();
374
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
375
+ if (n_configs < alloc)
376
+ ids[n_configs] = id;
377
+ n_configs++;
378
+ }
379
+ rcu_read_unlock();
380
+
381
+ oa_config_ids = ids;
382
+ } while (n_configs > alloc);
383
+
384
+ if (query_item->length < sizeof_perf_config_list(n_configs)) {
385
+ DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
386
+ query_item->length,
387
+ sizeof_perf_config_list(n_configs));
388
+ kfree(oa_config_ids);
389
+ return -EINVAL;
390
+ }
391
+
392
+ if (put_user(n_configs, &user_query_config_ptr->config)) {
393
+ kfree(oa_config_ids);
394
+ return -EFAULT;
395
+ }
396
+
397
+ ret = copy_to_user(user_query_config_ptr + 1,
398
+ oa_config_ids,
399
+ n_configs * sizeof(*oa_config_ids));
400
+ kfree(oa_config_ids);
401
+ if (ret)
402
+ return -EFAULT;
403
+
404
+ return sizeof_perf_config_list(n_configs);
405
+}
406
+
407
+static int query_perf_config(struct drm_i915_private *i915,
408
+ struct drm_i915_query_item *query_item)
409
+{
410
+ switch (query_item->flags) {
411
+ case DRM_I915_QUERY_PERF_CONFIG_LIST:
412
+ return query_perf_config_list(i915, query_item);
413
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
414
+ return query_perf_config_data(i915, query_item, true);
415
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
416
+ return query_perf_config_data(i915, query_item, false);
417
+ default:
418
+ return -EINVAL;
419
+ }
420
+}
421
+
87422 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
88423 struct drm_i915_query_item *query_item) = {
89424 query_topology_info,
425
+ query_engine_info,
426
+ query_perf_config,
90427 };
91428
92429 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)