forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/i915/i915_utils.h
....@@ -25,6 +25,17 @@
2525 #ifndef __I915_UTILS_H
2626 #define __I915_UTILS_H
2727
28
+#include <linux/list.h>
29
+#include <linux/overflow.h>
30
+#include <linux/sched.h>
31
+#include <linux/types.h>
32
+#include <linux/workqueue.h>
33
+
34
+struct drm_i915_private;
35
+struct timer_list;
36
+
37
+#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
38
+
2839 #undef WARN_ON
2940 /* Many gcc seem to no see through this and fall over :( */
3041 #if 0
....@@ -43,18 +54,61 @@
4354 #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
4455 __stringify(x), (long)(x))
4556
46
-#if defined(GCC_VERSION) && GCC_VERSION >= 70000
47
-#define add_overflows(A, B) \
48
- __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
57
+void __printf(3, 4)
58
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
59
+ const char *fmt, ...);
60
+
61
+#define i915_report_error(dev_priv, fmt, ...) \
62
+ __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
63
+
64
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
65
+
66
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
67
+ const char *func, int line);
68
+#define i915_inject_probe_error(_i915, _err) \
69
+ __i915_inject_probe_error((_i915), (_err), __func__, __LINE__)
70
+bool i915_error_injected(void);
71
+
4972 #else
50
-#define add_overflows(A, B) ({ \
73
+
74
+#define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; })
75
+#define i915_error_injected() false
76
+
77
+#endif
78
+
79
+#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
80
+
81
+#define i915_probe_error(i915, fmt, ...) \
82
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
83
+ fmt, ##__VA_ARGS__)
84
+
85
+#if defined(GCC_VERSION) && GCC_VERSION >= 70000
86
+#define add_overflows_t(T, A, B) \
87
+ __builtin_add_overflow_p((A), (B), (T)0)
88
+#else
89
+#define add_overflows_t(T, A, B) ({ \
5190 typeof(A) a = (A); \
5291 typeof(B) b = (B); \
53
- a + b < a; \
92
+ (T)(a + b) < a; \
5493 })
5594 #endif
5695
96
+#define add_overflows(A, B) \
97
+ add_overflows_t(typeof((A) + (B)), (A), (B))
98
+
5799 #define range_overflows(start, size, max) ({ \
100
+ typeof(start) start__ = (start); \
101
+ typeof(size) size__ = (size); \
102
+ typeof(max) max__ = (max); \
103
+ (void)(&start__ == &size__); \
104
+ (void)(&start__ == &max__); \
105
+ start__ >= max__ || size__ > max__ - start__; \
106
+})
107
+
108
+#define range_overflows_t(type, start, size, max) \
109
+ range_overflows((type)(start), (type)(size), (type)(max))
110
+
111
+#define range_overflows_end(start, size, max) ({ \
58112 typeof(start) start__ = (start); \
59113 typeof(size) size__ = (size); \
60114 typeof(max) max__ = (max); \
....@@ -63,12 +117,45 @@
63117 start__ > max__ || size__ > max__ - start__; \
64118 })
65119
66
-#define range_overflows_t(type, start, size, max) \
67
- range_overflows((type)(start), (type)(size), (type)(max))
120
+#define range_overflows_end_t(type, start, size, max) \
121
+ range_overflows_end((type)(start), (type)(size), (type)(max))
68122
69123 /* Note we don't consider signbits :| */
70124 #define overflows_type(x, T) \
71
- (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
125
+ (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
126
+
127
+static inline bool
128
+__check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
129
+{
130
+ size_t sz;
131
+
132
+ if (check_mul_overflow(count, arr, &sz))
133
+ return false;
134
+
135
+ if (check_add_overflow(sz, base, &sz))
136
+ return false;
137
+
138
+ *size = sz;
139
+ return true;
140
+}
141
+
142
+/**
143
+ * check_struct_size() - Calculate size of structure with trailing array.
144
+ * @p: Pointer to the structure.
145
+ * @member: Name of the array member.
146
+ * @n: Number of elements in the array.
147
+ * @sz: Total size of structure and array
148
+ *
149
+ * Calculates size of memory needed for structure @p followed by an
150
+ * array of @n @member elements, like struct_size() but reports
151
+ * whether it overflowed, and the resultant size in @sz
152
+ *
153
+ * Return: false if the calculation overflowed.
154
+ */
155
+#define check_struct_size(p, member, n, sz) \
156
+ likely(__check_struct_size(sizeof(*(p)), \
157
+ sizeof(*(p)->member) + __must_be_array((p)->member), \
158
+ n, sz))
72159
73160 #define ptr_mask_bits(ptr, n) ({ \
74161 unsigned long __v = (unsigned long)(ptr); \
....@@ -89,10 +176,22 @@
89176 ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
90177 })
91178
179
+#define ptr_dec(ptr) ({ \
180
+ unsigned long __v = (unsigned long)(ptr); \
181
+ (typeof(ptr))(__v - 1); \
182
+})
183
+
184
+#define ptr_inc(ptr) ({ \
185
+ unsigned long __v = (unsigned long)(ptr); \
186
+ (typeof(ptr))(__v + 1); \
187
+})
188
+
92189 #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
93190 #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
94191 #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
95192 #define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
193
+
194
+#define struct_member(T, member) (((T *)0)->member)
96195
97196 #define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
98197
....@@ -100,6 +199,37 @@
100199 typeof(*ptr) __T = *(ptr); \
101200 *(ptr) = (typeof(*ptr))0; \
102201 __T; \
202
+})
203
+
204
+/*
205
+ * container_of_user: Extract the superclass from a pointer to a member.
206
+ *
207
+ * Exactly like container_of() with the exception that it plays nicely
208
+ * with sparse for __user @ptr.
209
+ */
210
+#define container_of_user(ptr, type, member) ({ \
211
+ void __user *__mptr = (void __user *)(ptr); \
212
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \
213
+ !__same_type(*(ptr), void), \
214
+ "pointer type mismatch in container_of()"); \
215
+ ((type __user *)(__mptr - offsetof(type, member))); })
216
+
217
+/*
218
+ * check_user_mbz: Check that a user value exists and is zero
219
+ *
220
+ * Frequently in our uABI we reserve space for future extensions, and
221
+ * two ensure that userspace is prepared we enforce that space must
222
+ * be zero. (Then any future extension can safely assume a default value
223
+ * of 0.)
224
+ *
225
+ * check_user_mbz() combines checking that the user pointer is accessible
226
+ * and that the contained value is zero.
227
+ *
228
+ * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
229
+ */
230
+#define check_user_mbz(U) ({ \
231
+ typeof(*(U)) mbz__; \
232
+ get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
103233 })
104234
105235 static inline u64 ptr_to_u64(const void *ptr)
....@@ -118,12 +248,9 @@
118248 __idx; \
119249 })
120250
121
-#include <linux/list.h>
122
-
123
-static inline int list_is_first(const struct list_head *list,
124
- const struct list_head *head)
251
+static inline bool is_power_of_2_u64(u64 n)
125252 {
126
- return head->next == list;
253
+ return (n != 0 && ((n & (n - 1)) == 0));
127254 }
128255
129256 static inline void __list_del_many(struct list_head *head,
....@@ -133,18 +260,153 @@
133260 WRITE_ONCE(head->next, first);
134261 }
135262
136
-/*
137
- * Wait until the work is finally complete, even if it tries to postpone
138
- * by requeueing itself. Note, that if the worker never cancels itself,
139
- * we will spin forever.
140
- */
141
-static inline void drain_delayed_work(struct delayed_work *dw)
263
+static inline int list_is_last_rcu(const struct list_head *list,
264
+ const struct list_head *head)
142265 {
143
- do {
144
- while (flush_delayed_work(dw))
145
- ;
146
- } while (delayed_work_pending(dw));
266
+ return READ_ONCE(list->next) == head;
147267 }
268
+
269
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
270
+{
271
+ unsigned long j = msecs_to_jiffies(m);
272
+
273
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
274
+}
275
+
276
+/*
277
+ * If you need to wait X milliseconds between events A and B, but event B
278
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
279
+ * when event A happened, then just before event B you call this function and
280
+ * pass the timestamp as the first argument, and X as the second argument.
281
+ */
282
+static inline void
283
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
284
+{
285
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
286
+
287
+ /*
288
+ * Don't re-read the value of "jiffies" every time since it may change
289
+ * behind our back and break the math.
290
+ */
291
+ tmp_jiffies = jiffies;
292
+ target_jiffies = timestamp_jiffies +
293
+ msecs_to_jiffies_timeout(to_wait_ms);
294
+
295
+ if (time_after(target_jiffies, tmp_jiffies)) {
296
+ remaining_jiffies = target_jiffies - tmp_jiffies;
297
+ while (remaining_jiffies)
298
+ remaining_jiffies =
299
+ schedule_timeout_uninterruptible(remaining_jiffies);
300
+ }
301
+}
302
+
303
+/**
304
+ * __wait_for - magic wait macro
305
+ *
306
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
307
+ * important that we check the condition again after having timed out, since the
308
+ * timeout could be due to preemption or similar and we've never had a chance to
309
+ * check the condition before the timeout.
310
+ */
311
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
312
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
313
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
314
+ int ret__; \
315
+ might_sleep(); \
316
+ for (;;) { \
317
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
318
+ OP; \
319
+ /* Guarantee COND check prior to timeout */ \
320
+ barrier(); \
321
+ if (COND) { \
322
+ ret__ = 0; \
323
+ break; \
324
+ } \
325
+ if (expired__) { \
326
+ ret__ = -ETIMEDOUT; \
327
+ break; \
328
+ } \
329
+ usleep_range(wait__, wait__ * 2); \
330
+ if (wait__ < (Wmax)) \
331
+ wait__ <<= 1; \
332
+ } \
333
+ ret__; \
334
+})
335
+
336
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
337
+ (Wmax))
338
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
339
+
340
+/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
341
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
342
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
343
+#else
344
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
345
+#endif
346
+
347
+#define _wait_for_atomic(COND, US, ATOMIC) \
348
+({ \
349
+ int cpu, ret, timeout = (US) * 1000; \
350
+ u64 base; \
351
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
352
+ if (!(ATOMIC)) { \
353
+ preempt_disable(); \
354
+ cpu = smp_processor_id(); \
355
+ } \
356
+ base = local_clock(); \
357
+ for (;;) { \
358
+ u64 now = local_clock(); \
359
+ if (!(ATOMIC)) \
360
+ preempt_enable(); \
361
+ /* Guarantee COND check prior to timeout */ \
362
+ barrier(); \
363
+ if (COND) { \
364
+ ret = 0; \
365
+ break; \
366
+ } \
367
+ if (now - base >= timeout) { \
368
+ ret = -ETIMEDOUT; \
369
+ break; \
370
+ } \
371
+ cpu_relax(); \
372
+ if (!(ATOMIC)) { \
373
+ preempt_disable(); \
374
+ if (unlikely(cpu != smp_processor_id())) { \
375
+ timeout -= now - base; \
376
+ cpu = smp_processor_id(); \
377
+ base = local_clock(); \
378
+ } \
379
+ } \
380
+ } \
381
+ ret; \
382
+})
383
+
384
+#define wait_for_us(COND, US) \
385
+({ \
386
+ int ret__; \
387
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
388
+ if ((US) > 10) \
389
+ ret__ = _wait_for((COND), (US), 10, 10); \
390
+ else \
391
+ ret__ = _wait_for_atomic((COND), (US), 0); \
392
+ ret__; \
393
+})
394
+
395
+#define wait_for_atomic_us(COND, US) \
396
+({ \
397
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
398
+ BUILD_BUG_ON((US) > 50000); \
399
+ _wait_for_atomic((COND), (US), 1); \
400
+})
401
+
402
+#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
403
+
404
+#define KHz(x) (1000 * (x))
405
+#define MHz(x) KHz(1000 * (x))
406
+
407
+#define KBps(x) (1000 * (x))
408
+#define MBps(x) KBps(1000 * (x))
409
+#define GBps(x) ((u64)1000 * MBps((x)))
148410
149411 static inline const char *yesno(bool v)
150412 {
....@@ -161,4 +423,37 @@
161423 return v ? "enabled" : "disabled";
162424 }
163425
426
+void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
427
+static inline void __add_taint_for_CI(unsigned int taint)
428
+{
429
+ /*
430
+ * The system is "ok", just about surviving for the user, but
431
+ * CI results are now unreliable as the HW is very suspect.
432
+ * CI checks the taint state after every test and will reboot
433
+ * the machine if the kernel is tainted.
434
+ */
435
+ add_taint(taint, LOCKDEP_STILL_OK);
436
+}
437
+
438
+void cancel_timer(struct timer_list *t);
439
+void set_timer_ms(struct timer_list *t, unsigned long timeout);
440
+
441
+static inline bool timer_expired(const struct timer_list *t)
442
+{
443
+ return READ_ONCE(t->expires) && !timer_pending(t);
444
+}
445
+
446
+/*
447
+ * This is a lookalike for IS_ENABLED() that takes a kconfig value,
448
+ * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero
449
+ * i.e. whether the configuration is active. Wrapping up the config inside
450
+ * a boolean context prevents clang and smatch from complaining about potential
451
+ * issues in confusing logical-&& with bitwise-& for constants.
452
+ *
453
+ * Sadly IS_ENABLED() itself does not work with kconfig values.
454
+ *
455
+ * Returns 0 if @config is 0, 1 if set to any value.
456
+ */
457
+#define IS_ACTIVE(config) ((config) != 0)
458
+
164459 #endif /* !__I915_UTILS_H */