Lines Matching full:ref

15 #define BKL(ref) (&(ref)->i915->drm.struct_mutex)  argument
31 struct i915_active *ref; member
78 struct i915_active *ref = addr; in active_debug_hint() local
80 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; in active_debug_hint()
88 static void debug_active_init(struct i915_active *ref) in debug_active_init() argument
90 debug_object_init(ref, &active_debug_desc); in debug_active_init()
93 static void debug_active_activate(struct i915_active *ref) in debug_active_activate() argument
95 debug_object_activate(ref, &active_debug_desc); in debug_active_activate()
98 static void debug_active_deactivate(struct i915_active *ref) in debug_active_deactivate() argument
100 debug_object_deactivate(ref, &active_debug_desc); in debug_active_deactivate()
103 static void debug_active_fini(struct i915_active *ref) in debug_active_fini() argument
105 debug_object_free(ref, &active_debug_desc); in debug_active_fini()
108 static void debug_active_assert(struct i915_active *ref) in debug_active_assert() argument
110 debug_object_assert_init(ref, &active_debug_desc); in debug_active_assert()
115 static inline void debug_active_init(struct i915_active *ref) { } in debug_active_init() argument
116 static inline void debug_active_activate(struct i915_active *ref) { } in debug_active_activate() argument
117 static inline void debug_active_deactivate(struct i915_active *ref) { } in debug_active_deactivate() argument
118 static inline void debug_active_fini(struct i915_active *ref) { } in debug_active_fini() argument
119 static inline void debug_active_assert(struct i915_active *ref) { } in debug_active_assert() argument
124 __active_retire(struct i915_active *ref) in __active_retire() argument
130 lockdep_assert_held(&ref->mutex); in __active_retire()
133 if (atomic_dec_and_test(&ref->count)) { in __active_retire()
134 debug_active_deactivate(ref); in __active_retire()
135 root = ref->tree; in __active_retire()
136 ref->tree = RB_ROOT; in __active_retire()
137 ref->cache = NULL; in __active_retire()
141 mutex_unlock(&ref->mutex); in __active_retire()
151 if (ref->retire) in __active_retire()
152 ref->retire(ref); in __active_retire()
156 active_retire(struct i915_active *ref) in active_retire() argument
158 GEM_BUG_ON(!atomic_read(&ref->count)); in active_retire()
159 if (atomic_add_unless(&ref->count, -1, 1)) in active_retire()
163 mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); in active_retire()
164 __active_retire(ref); in active_retire()
170 active_retire(node_from_active(base)->ref); in node_retire()
174 active_instance(struct i915_active *ref, struct intel_timeline *tl) in active_instance() argument
187 node = READ_ONCE(ref->cache); in active_instance()
196 mutex_lock(&ref->mutex); in active_instance()
197 GEM_BUG_ON(i915_active_is_idle(ref)); in active_instance()
200 p = &ref->tree.rb_node; in active_instance()
218 node->ref = ref; in active_instance()
222 rb_insert_color(&node->node, &ref->tree); in active_instance()
225 ref->cache = node; in active_instance()
226 mutex_unlock(&ref->mutex); in active_instance()
233 struct i915_active *ref, in __i915_active_init() argument
234 int (*active)(struct i915_active *ref), in __i915_active_init() argument
235 void (*retire)(struct i915_active *ref), in __i915_active_init() argument
238 debug_active_init(ref); in __i915_active_init()
240 ref->i915 = i915; in __i915_active_init()
241 ref->flags = 0; in __i915_active_init()
242 ref->active = active; in __i915_active_init()
243 ref->retire = retire; in __i915_active_init()
244 ref->tree = RB_ROOT; in __i915_active_init()
245 ref->cache = NULL; in __i915_active_init()
246 init_llist_head(&ref->preallocated_barriers); in __i915_active_init()
247 atomic_set(&ref->count, 0); in __i915_active_init()
248 __mutex_init(&ref->mutex, "i915_active", key); in __i915_active_init()
251 static bool ____active_del_barrier(struct i915_active *ref, in ____active_del_barrier() argument
294 __active_del_barrier(struct i915_active *ref, struct active_node *node) in __active_del_barrier() argument
296 return ____active_del_barrier(ref, node, barrier_to_engine(node)); in __active_del_barrier()
299 int i915_active_ref(struct i915_active *ref, in i915_active_ref() argument
309 err = i915_active_acquire(ref); in i915_active_ref()
313 active = active_instance(ref, tl); in i915_active_ref()
325 __active_del_barrier(ref, node_from_active(active)); in i915_active_ref()
330 atomic_inc(&ref->count); in i915_active_ref()
332 GEM_BUG_ON(!atomic_read(&ref->count)); in i915_active_ref()
336 i915_active_release(ref); in i915_active_ref()
340 int i915_active_acquire(struct i915_active *ref) in i915_active_acquire() argument
344 debug_active_assert(ref); in i915_active_acquire()
345 if (atomic_add_unless(&ref->count, 1, 0)) in i915_active_acquire()
348 err = mutex_lock_interruptible(&ref->mutex); in i915_active_acquire()
352 if (!atomic_read(&ref->count) && ref->active) in i915_active_acquire()
353 err = ref->active(ref); in i915_active_acquire()
355 debug_active_activate(ref); in i915_active_acquire()
356 atomic_inc(&ref->count); in i915_active_acquire()
359 mutex_unlock(&ref->mutex); in i915_active_acquire()
364 void i915_active_release(struct i915_active *ref) in i915_active_release() argument
366 debug_active_assert(ref); in i915_active_release()
367 active_retire(ref); in i915_active_release()
370 static void __active_ungrab(struct i915_active *ref) in __active_ungrab() argument
372 clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags); in __active_ungrab()
375 bool i915_active_trygrab(struct i915_active *ref) in i915_active_trygrab() argument
377 debug_active_assert(ref); in i915_active_trygrab()
379 if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)) in i915_active_trygrab()
382 if (!atomic_add_unless(&ref->count, 1, 0)) { in i915_active_trygrab()
383 __active_ungrab(ref); in i915_active_trygrab()
390 void i915_active_ungrab(struct i915_active *ref) in i915_active_ungrab() argument
392 GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)); in i915_active_ungrab()
394 active_retire(ref); in i915_active_ungrab()
395 __active_ungrab(ref); in i915_active_ungrab()
398 int i915_active_wait(struct i915_active *ref) in i915_active_wait() argument
404 might_lock(&ref->mutex); in i915_active_wait()
406 if (i915_active_is_idle(ref)) in i915_active_wait()
409 err = mutex_lock_interruptible(&ref->mutex); in i915_active_wait()
413 if (!atomic_add_unless(&ref->count, 1, 0)) { in i915_active_wait()
414 mutex_unlock(&ref->mutex); in i915_active_wait()
418 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { in i915_active_wait()
424 err = i915_active_request_retire(&it->base, BKL(ref)); in i915_active_wait()
429 __active_retire(ref); in i915_active_wait()
433 if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE)) in i915_active_wait()
436 if (!i915_active_is_idle(ref)) in i915_active_wait()
451 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) in i915_request_await_active() argument
456 if (RB_EMPTY_ROOT(&ref->tree)) in i915_request_await_active()
460 err = i915_active_acquire(ref); in i915_request_await_active()
464 mutex_lock(&ref->mutex); in i915_request_await_active()
465 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { in i915_request_await_active()
470 mutex_unlock(&ref->mutex); in i915_request_await_active()
472 i915_active_release(ref); in i915_request_await_active()
477 void i915_active_fini(struct i915_active *ref) in i915_active_fini() argument
479 debug_active_fini(ref); in i915_active_fini()
480 GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); in i915_active_fini()
481 GEM_BUG_ON(atomic_read(&ref->count)); in i915_active_fini()
482 mutex_destroy(&ref->mutex); in i915_active_fini()
491 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) in reuse_idle_barrier() argument
495 if (RB_EMPTY_ROOT(&ref->tree)) in reuse_idle_barrier()
498 mutex_lock(&ref->mutex); in reuse_idle_barrier()
499 GEM_BUG_ON(i915_active_is_idle(ref)); in reuse_idle_barrier()
508 if (ref->cache && is_idle_barrier(ref->cache, idx)) { in reuse_idle_barrier()
509 p = &ref->cache->node; in reuse_idle_barrier()
514 p = ref->tree.rb_node; in reuse_idle_barrier()
559 ____active_del_barrier(ref, node, engine)) in reuse_idle_barrier()
563 mutex_unlock(&ref->mutex); in reuse_idle_barrier()
568 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ in reuse_idle_barrier()
569 if (p == &ref->cache->node) in reuse_idle_barrier()
570 ref->cache = NULL; in reuse_idle_barrier()
571 mutex_unlock(&ref->mutex); in reuse_idle_barrier()
576 int i915_active_acquire_preallocate_barrier(struct i915_active *ref, in i915_active_acquire_preallocate_barrier() argument
584 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); in i915_active_acquire_preallocate_barrier()
596 node = reuse_idle_barrier(ref, idx); in i915_active_acquire_preallocate_barrier()
611 node->ref = ref; in i915_active_acquire_preallocate_barrier()
626 atomic_inc(&ref->count); in i915_active_acquire_preallocate_barrier()
630 llist_add(barrier_to_ll(node), &ref->preallocated_barriers); in i915_active_acquire_preallocate_barrier()
637 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { in i915_active_acquire_preallocate_barrier()
640 atomic_dec(&ref->count); in i915_active_acquire_preallocate_barrier()
648 void i915_active_acquire_barrier(struct i915_active *ref) in i915_active_acquire_barrier() argument
652 GEM_BUG_ON(i915_active_is_idle(ref)); in i915_active_acquire_barrier()
660 mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); in i915_active_acquire_barrier()
661 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { in i915_active_acquire_barrier()
667 p = &ref->tree.rb_node; in i915_active_acquire_barrier()
680 rb_insert_color(&node->node, &ref->tree); in i915_active_acquire_barrier()
685 mutex_unlock(&ref->mutex); in i915_active_acquire_barrier()