1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef __I915_UTILS_H
26 #define __I915_UTILS_H
27
28 #include <linux/list.h>
29 #include <linux/overflow.h>
30 #include <linux/sched.h>
31 #include <linux/types.h>
32 #include <linux/workqueue.h>
33
34 struct drm_i915_private;
35
36 #undef WARN_ON
37 /* Many gcc seem to no see through this and fall over :( */
38 #if 0
39 #define WARN_ON(x) ({ \
40 bool __i915_warn_cond = (x); \
41 if (__builtin_constant_p(__i915_warn_cond)) \
42 BUILD_BUG_ON(__i915_warn_cond); \
43 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
44 #else
45 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
46 #endif
47
48 #undef WARN_ON_ONCE
49 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
50
51 #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
52 __stringify(x), (long)(x))
53
54 void __printf(3, 4)
55 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
56 const char *fmt, ...);
57
58 #define i915_report_error(dev_priv, fmt, ...) \
59 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
60
61 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
62
63 int __i915_inject_load_error(struct drm_i915_private *i915, int err,
64 const char *func, int line);
65 #define i915_inject_load_error(_i915, _err) \
66 __i915_inject_load_error((_i915), (_err), __func__, __LINE__)
67 bool i915_error_injected(void);
68
69 #else
70
71 #define i915_inject_load_error(_i915, _err) 0
72 #define i915_error_injected() false
73
74 #endif
75
76 #define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV)
77
78 #define i915_probe_error(i915, fmt, ...) \
79 __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
80 fmt, ##__VA_ARGS__)
81
82 #if defined(GCC_VERSION) && GCC_VERSION >= 70000
83 #define add_overflows_t(T, A, B) \
84 __builtin_add_overflow_p((A), (B), (T)0)
85 #else
86 #define add_overflows_t(T, A, B) ({ \
87 typeof(A) a = (A); \
88 typeof(B) b = (B); \
89 (T)(a + b) < a; \
90 })
91 #endif
92
93 #define add_overflows(A, B) \
94 add_overflows_t(typeof((A) + (B)), (A), (B))
95
96 #define range_overflows(start, size, max) ({ \
97 typeof(start) start__ = (start); \
98 typeof(size) size__ = (size); \
99 typeof(max) max__ = (max); \
100 (void)(&start__ == &size__); \
101 (void)(&start__ == &max__); \
102 start__ > max__ || size__ > max__ - start__; \
103 })
104
105 #define range_overflows_t(type, start, size, max) \
106 range_overflows((type)(start), (type)(size), (type)(max))
107
108 /* Note we don't consider signbits :| */
109 #define overflows_type(x, T) \
110 (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
111
112 static inline bool
__check_struct_size(size_t base,size_t arr,size_t count,size_t * size)113 __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
114 {
115 size_t sz;
116
117 if (check_mul_overflow(count, arr, &sz))
118 return false;
119
120 if (check_add_overflow(sz, base, &sz))
121 return false;
122
123 *size = sz;
124 return true;
125 }
126
127 /**
128 * check_struct_size() - Calculate size of structure with trailing array.
129 * @p: Pointer to the structure.
130 * @member: Name of the array member.
131 * @n: Number of elements in the array.
132 * @sz: Total size of structure and array
133 *
134 * Calculates size of memory needed for structure @p followed by an
135 * array of @n @member elements, like struct_size() but reports
136 * whether it overflowed, and the resultant size in @sz
137 *
138 * Return: false if the calculation overflowed.
139 */
140 #define check_struct_size(p, member, n, sz) \
141 likely(__check_struct_size(sizeof(*(p)), \
142 sizeof(*(p)->member) + __must_be_array((p)->member), \
143 n, sz))
144
145 #define ptr_mask_bits(ptr, n) ({ \
146 unsigned long __v = (unsigned long)(ptr); \
147 (typeof(ptr))(__v & -BIT(n)); \
148 })
149
150 #define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
151
152 #define ptr_unpack_bits(ptr, bits, n) ({ \
153 unsigned long __v = (unsigned long)(ptr); \
154 *(bits) = __v & (BIT(n) - 1); \
155 (typeof(ptr))(__v & -BIT(n)); \
156 })
157
158 #define ptr_pack_bits(ptr, bits, n) ({ \
159 unsigned long __bits = (bits); \
160 GEM_BUG_ON(__bits & -BIT(n)); \
161 ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
162 })
163
164 #define ptr_dec(ptr) ({ \
165 unsigned long __v = (unsigned long)(ptr); \
166 (typeof(ptr))(__v - 1); \
167 })
168
169 #define ptr_inc(ptr) ({ \
170 unsigned long __v = (unsigned long)(ptr); \
171 (typeof(ptr))(__v + 1); \
172 })
173
174 #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
175 #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
176 #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
177 #define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
178
179 #define struct_member(T, member) (((T *)0)->member)
180
181 #define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
182
183 #define fetch_and_zero(ptr) ({ \
184 typeof(*ptr) __T = *(ptr); \
185 *(ptr) = (typeof(*ptr))0; \
186 __T; \
187 })
188
189 /*
190 * container_of_user: Extract the superclass from a pointer to a member.
191 *
192 * Exactly like container_of() with the exception that it plays nicely
193 * with sparse for __user @ptr.
194 */
195 #define container_of_user(ptr, type, member) ({ \
196 void __user *__mptr = (void __user *)(ptr); \
197 BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \
198 !__same_type(*(ptr), void), \
199 "pointer type mismatch in container_of()"); \
200 ((type __user *)(__mptr - offsetof(type, member))); })
201
202 /*
203 * check_user_mbz: Check that a user value exists and is zero
204 *
205 * Frequently in our uABI we reserve space for future extensions, and
206 * two ensure that userspace is prepared we enforce that space must
207 * be zero. (Then any future extension can safely assume a default value
208 * of 0.)
209 *
210 * check_user_mbz() combines checking that the user pointer is accessible
211 * and that the contained value is zero.
212 *
213 * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
214 */
215 #define check_user_mbz(U) ({ \
216 typeof(*(U)) mbz__; \
217 get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
218 })
219
ptr_to_u64(const void * ptr)220 static inline u64 ptr_to_u64(const void *ptr)
221 {
222 return (uintptr_t)ptr;
223 }
224
225 #define u64_to_ptr(T, x) ({ \
226 typecheck(u64, x); \
227 (T *)(uintptr_t)(x); \
228 })
229
230 #define __mask_next_bit(mask) ({ \
231 int __idx = ffs(mask) - 1; \
232 mask &= ~BIT(__idx); \
233 __idx; \
234 })
235
__list_del_many(struct list_head * head,struct list_head * first)236 static inline void __list_del_many(struct list_head *head,
237 struct list_head *first)
238 {
239 first->prev = head;
240 WRITE_ONCE(head->next, first);
241 }
242
243 /*
244 * Wait until the work is finally complete, even if it tries to postpone
245 * by requeueing itself. Note, that if the worker never cancels itself,
246 * we will spin forever.
247 */
drain_delayed_work(struct delayed_work * dw)248 static inline void drain_delayed_work(struct delayed_work *dw)
249 {
250 do {
251 while (flush_delayed_work(dw))
252 ;
253 } while (delayed_work_pending(dw));
254 }
255
msecs_to_jiffies_timeout(const unsigned int m)256 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
257 {
258 unsigned long j = msecs_to_jiffies(m);
259
260 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
261 }
262
263 /*
264 * If you need to wait X milliseconds between events A and B, but event B
265 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
266 * when event A happened, then just before event B you call this function and
267 * pass the timestamp as the first argument, and X as the second argument.
268 */
269 static inline void
wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies,int to_wait_ms)270 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
271 {
272 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
273
274 /*
275 * Don't re-read the value of "jiffies" every time since it may change
276 * behind our back and break the math.
277 */
278 tmp_jiffies = jiffies;
279 target_jiffies = timestamp_jiffies +
280 msecs_to_jiffies_timeout(to_wait_ms);
281
282 if (time_after(target_jiffies, tmp_jiffies)) {
283 remaining_jiffies = target_jiffies - tmp_jiffies;
284 while (remaining_jiffies)
285 remaining_jiffies =
286 schedule_timeout_uninterruptible(remaining_jiffies);
287 }
288 }
289
290 /**
291 * __wait_for - magic wait macro
292 *
293 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
294 * important that we check the condition again after having timed out, since the
295 * timeout could be due to preemption or similar and we've never had a chance to
296 * check the condition before the timeout.
297 */
298 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
299 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
300 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
301 int ret__; \
302 might_sleep(); \
303 for (;;) { \
304 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
305 OP; \
306 /* Guarantee COND check prior to timeout */ \
307 barrier(); \
308 if (COND) { \
309 ret__ = 0; \
310 break; \
311 } \
312 if (expired__) { \
313 ret__ = -ETIMEDOUT; \
314 break; \
315 } \
316 usleep_range(wait__, wait__ * 2); \
317 if (wait__ < (Wmax)) \
318 wait__ <<= 1; \
319 } \
320 ret__; \
321 })
322
323 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
324 (Wmax))
325 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
326
327 /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
328 #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
329 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
330 #else
331 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
332 #endif
333
334 #define _wait_for_atomic(COND, US, ATOMIC) \
335 ({ \
336 int cpu, ret, timeout = (US) * 1000; \
337 u64 base; \
338 _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
339 if (!(ATOMIC)) { \
340 preempt_disable(); \
341 cpu = smp_processor_id(); \
342 } \
343 base = local_clock(); \
344 for (;;) { \
345 u64 now = local_clock(); \
346 if (!(ATOMIC)) \
347 preempt_enable(); \
348 /* Guarantee COND check prior to timeout */ \
349 barrier(); \
350 if (COND) { \
351 ret = 0; \
352 break; \
353 } \
354 if (now - base >= timeout) { \
355 ret = -ETIMEDOUT; \
356 break; \
357 } \
358 cpu_relax(); \
359 if (!(ATOMIC)) { \
360 preempt_disable(); \
361 if (unlikely(cpu != smp_processor_id())) { \
362 timeout -= now - base; \
363 cpu = smp_processor_id(); \
364 base = local_clock(); \
365 } \
366 } \
367 } \
368 ret; \
369 })
370
371 #define wait_for_us(COND, US) \
372 ({ \
373 int ret__; \
374 BUILD_BUG_ON(!__builtin_constant_p(US)); \
375 if ((US) > 10) \
376 ret__ = _wait_for((COND), (US), 10, 10); \
377 else \
378 ret__ = _wait_for_atomic((COND), (US), 0); \
379 ret__; \
380 })
381
382 #define wait_for_atomic_us(COND, US) \
383 ({ \
384 BUILD_BUG_ON(!__builtin_constant_p(US)); \
385 BUILD_BUG_ON((US) > 50000); \
386 _wait_for_atomic((COND), (US), 1); \
387 })
388
389 #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
390
391 #define KHz(x) (1000 * (x))
392 #define MHz(x) KHz(1000 * (x))
393
394 #define KBps(x) (1000 * (x))
395 #define MBps(x) KBps(1000 * (x))
396 #define GBps(x) ((u64)1000 * MBps((x)))
397
yesno(bool v)398 static inline const char *yesno(bool v)
399 {
400 return v ? "yes" : "no";
401 }
402
onoff(bool v)403 static inline const char *onoff(bool v)
404 {
405 return v ? "on" : "off";
406 }
407
enableddisabled(bool v)408 static inline const char *enableddisabled(bool v)
409 {
410 return v ? "enabled" : "disabled";
411 }
412
add_taint_for_CI(unsigned int taint)413 static inline void add_taint_for_CI(unsigned int taint)
414 {
415 /*
416 * The system is "ok", just about surviving for the user, but
417 * CI results are now unreliable as the HW is very suspect.
418 * CI checks the taint state after every test and will reboot
419 * the machine if the kernel is tainted.
420 */
421 add_taint(taint, LOCKDEP_STILL_OK);
422 }
423
424 #endif /* !__I915_UTILS_H */
425