1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #ifndef __INTEL_CONTEXT_H__
8 #define __INTEL_CONTEXT_H__
9
10 #include <linux/lockdep.h>
11
12 #include "i915_active.h"
13 #include "intel_context_types.h"
14 #include "intel_engine_types.h"
15 #include "intel_timeline_types.h"
16
17 void intel_context_init(struct intel_context *ce,
18 struct i915_gem_context *ctx,
19 struct intel_engine_cs *engine);
20 void intel_context_fini(struct intel_context *ce);
21
22 struct intel_context *
23 intel_context_create(struct i915_gem_context *ctx,
24 struct intel_engine_cs *engine);
25
26 void intel_context_free(struct intel_context *ce);
27
28 /**
29 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
30 * @ce - the context
31 *
32 * Acquire a lock on the pinned status of the HW context, such that the context
33 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
34 * intel_context_is_pinned() remains stable.
35 */
intel_context_lock_pinned(struct intel_context * ce)36 static inline int intel_context_lock_pinned(struct intel_context *ce)
37 __acquires(ce->pin_mutex)
38 {
39 return mutex_lock_interruptible(&ce->pin_mutex);
40 }
41
42 /**
43 * intel_context_is_pinned - Reports the 'pinned' status
44 * @ce - the context
45 *
46 * While in use by the GPU, the context, along with its ring and page
47 * tables is pinned into memory and the GTT.
48 *
49 * Returns: true if the context is currently pinned for use by the GPU.
50 */
51 static inline bool
intel_context_is_pinned(struct intel_context * ce)52 intel_context_is_pinned(struct intel_context *ce)
53 {
54 return atomic_read(&ce->pin_count);
55 }
56
57 /**
58 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
59 * @ce - the context
60 *
61 * Releases the lock earlier acquired by intel_context_unlock_pinned().
62 */
intel_context_unlock_pinned(struct intel_context * ce)63 static inline void intel_context_unlock_pinned(struct intel_context *ce)
64 __releases(ce->pin_mutex)
65 {
66 mutex_unlock(&ce->pin_mutex);
67 }
68
69 int __intel_context_do_pin(struct intel_context *ce);
70
intel_context_pin(struct intel_context * ce)71 static inline int intel_context_pin(struct intel_context *ce)
72 {
73 if (likely(atomic_inc_not_zero(&ce->pin_count)))
74 return 0;
75
76 return __intel_context_do_pin(ce);
77 }
78
__intel_context_pin(struct intel_context * ce)79 static inline void __intel_context_pin(struct intel_context *ce)
80 {
81 GEM_BUG_ON(!intel_context_is_pinned(ce));
82 atomic_inc(&ce->pin_count);
83 }
84
85 void intel_context_unpin(struct intel_context *ce);
86
87 void intel_context_enter_engine(struct intel_context *ce);
88 void intel_context_exit_engine(struct intel_context *ce);
89
intel_context_enter(struct intel_context * ce)90 static inline void intel_context_enter(struct intel_context *ce)
91 {
92 lockdep_assert_held(&ce->timeline->mutex);
93 if (!ce->active_count++)
94 ce->ops->enter(ce);
95 }
96
intel_context_mark_active(struct intel_context * ce)97 static inline void intel_context_mark_active(struct intel_context *ce)
98 {
99 lockdep_assert_held(&ce->timeline->mutex);
100 ++ce->active_count;
101 }
102
intel_context_exit(struct intel_context * ce)103 static inline void intel_context_exit(struct intel_context *ce)
104 {
105 lockdep_assert_held(&ce->timeline->mutex);
106 GEM_BUG_ON(!ce->active_count);
107 if (!--ce->active_count)
108 ce->ops->exit(ce);
109 }
110
111 int intel_context_active_acquire(struct intel_context *ce);
112 void intel_context_active_release(struct intel_context *ce);
113
intel_context_get(struct intel_context * ce)114 static inline struct intel_context *intel_context_get(struct intel_context *ce)
115 {
116 kref_get(&ce->ref);
117 return ce;
118 }
119
intel_context_put(struct intel_context * ce)120 static inline void intel_context_put(struct intel_context *ce)
121 {
122 kref_put(&ce->ref, ce->ops->destroy);
123 }
124
125 static inline struct intel_timeline *__must_check
intel_context_timeline_lock(struct intel_context * ce)126 intel_context_timeline_lock(struct intel_context *ce)
127 __acquires(&ce->timeline->mutex)
128 {
129 struct intel_timeline *tl = ce->timeline;
130 int err;
131
132 err = mutex_lock_interruptible(&tl->mutex);
133 if (err)
134 return ERR_PTR(err);
135
136 return tl;
137 }
138
intel_context_timeline_unlock(struct intel_timeline * tl)139 static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
140 __releases(&tl->mutex)
141 {
142 mutex_unlock(&tl->mutex);
143 }
144
145 int intel_context_prepare_remote_request(struct intel_context *ce,
146 struct i915_request *rq);
147
148 struct i915_request *intel_context_create_request(struct intel_context *ce);
149
__intel_context_ring_size(u64 sz)150 static inline struct intel_ring *__intel_context_ring_size(u64 sz)
151 {
152 return u64_to_ptr(struct intel_ring, sz);
153 }
154
155 #endif /* __INTEL_CONTEXT_H__ */
156