1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef __I915_GEM_CONTEXT_H__
26 #define __I915_GEM_CONTEXT_H__
27 
28 #include <linux/bitops.h>
29 #include <linux/list.h>
30 #include <linux/radix-tree.h>
31 
32 #include "i915_gem.h"
33 #include "i915_scheduler.h"
34 
35 struct pid;
36 
37 struct drm_device;
38 struct drm_file;
39 
40 struct drm_i915_private;
41 struct drm_i915_file_private;
42 struct i915_hw_ppgtt;
43 struct i915_request;
44 struct i915_vma;
45 struct intel_ring;
46 
47 #define DEFAULT_CONTEXT_HANDLE 0
48 
49 struct intel_context;
50 
51 struct intel_context_ops {
52 	void (*unpin)(struct intel_context *ce);
53 	void (*destroy)(struct intel_context *ce);
54 };
55 
56 /**
57  * struct i915_gem_context - client state
58  *
59  * The struct i915_gem_context represents the combined view of the driver and
60  * logical hardware state for a particular client.
61  */
62 struct i915_gem_context {
63 	/** i915: i915 device backpointer */
64 	struct drm_i915_private *i915;
65 
66 	/** file_priv: owning file descriptor */
67 	struct drm_i915_file_private *file_priv;
68 
69 	/**
70 	 * @ppgtt: unique address space (GTT)
71 	 *
72 	 * In full-ppgtt mode, each context has its own address space ensuring
73 	 * complete seperation of one client from all others.
74 	 *
75 	 * In other modes, this is a NULL pointer with the expectation that
76 	 * the caller uses the shared global GTT.
77 	 */
78 	struct i915_hw_ppgtt *ppgtt;
79 
80 	/**
81 	 * @pid: process id of creator
82 	 *
83 	 * Note that who created the context may not be the principle user,
84 	 * as the context may be shared across a local socket. However,
85 	 * that should only affect the default context, all contexts created
86 	 * explicitly by the client are expected to be isolated.
87 	 */
88 	struct pid *pid;
89 
90 	/**
91 	 * @name: arbitrary name
92 	 *
93 	 * A name is constructed for the context from the creator's process
94 	 * name, pid and user handle in order to uniquely identify the
95 	 * context in messages.
96 	 */
97 	const char *name;
98 
99 	/** link: place with &drm_i915_private.context_list */
100 	struct list_head link;
101 	struct llist_node free_link;
102 
103 	/**
104 	 * @ref: reference count
105 	 *
106 	 * A reference to a context is held by both the client who created it
107 	 * and on each request submitted to the hardware using the request
108 	 * (to ensure the hardware has access to the state until it has
109 	 * finished all pending writes). See i915_gem_context_get() and
110 	 * i915_gem_context_put() for access.
111 	 */
112 	struct kref ref;
113 
114 	/**
115 	 * @rcu: rcu_head for deferred freeing.
116 	 */
117 	struct rcu_head rcu;
118 
119 	/**
120 	 * @flags: small set of booleans
121 	 */
122 	unsigned long flags;
123 #define CONTEXT_NO_ZEROMAP		BIT(0)
124 #define CONTEXT_NO_ERROR_CAPTURE	1
125 #define CONTEXT_CLOSED			2
126 #define CONTEXT_BANNABLE		3
127 #define CONTEXT_BANNED			4
128 #define CONTEXT_FORCE_SINGLE_SUBMISSION	5
129 
130 	/**
131 	 * @hw_id: - unique identifier for the context
132 	 *
133 	 * The hardware needs to uniquely identify the context for a few
134 	 * functions like fault reporting, PASID, scheduling. The
135 	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
136 	 * id for the lifetime of the context.
137 	 */
138 	unsigned int hw_id;
139 
140 	/**
141 	 * @user_handle: userspace identifier
142 	 *
143 	 * A unique per-file identifier is generated from
144 	 * &drm_i915_file_private.contexts.
145 	 */
146 	u32 user_handle;
147 
148 	struct i915_sched_attr sched;
149 
150 	/** ggtt_offset_bias: placement restriction for context objects */
151 	u32 ggtt_offset_bias;
152 
153 	/** engine: per-engine logical HW state */
154 	struct intel_context {
155 		struct i915_gem_context *gem_context;
156 		struct i915_vma *state;
157 		struct intel_ring *ring;
158 		u32 *lrc_reg_state;
159 		u64 lrc_desc;
160 		int pin_count;
161 
162 		const struct intel_context_ops *ops;
163 	} __engine[I915_NUM_ENGINES];
164 
165 	/** ring_size: size for allocating the per-engine ring buffer */
166 	u32 ring_size;
167 	/** desc_template: invariant fields for the HW context descriptor */
168 	u32 desc_template;
169 
170 	/** guilty_count: How many times this context has caused a GPU hang. */
171 	atomic_t guilty_count;
172 	/**
173 	 * @active_count: How many times this context was active during a GPU
174 	 * hang, but did not cause it.
175 	 */
176 	atomic_t active_count;
177 
178 #define CONTEXT_SCORE_GUILTY		10
179 #define CONTEXT_SCORE_BAN_THRESHOLD	40
180 	/** ban_score: Accumulated score of all hangs caused by this context. */
181 	atomic_t ban_score;
182 
183 	/** remap_slice: Bitmask of cache lines that need remapping */
184 	u8 remap_slice;
185 
186 	/** handles_vma: rbtree to look up our context specific obj/vma for
187 	 * the user handle. (user handles are per fd, but the binding is
188 	 * per vm, which may be one per context or shared with the global GTT)
189 	 */
190 	struct radix_tree_root handles_vma;
191 
192 	/** handles_list: reverse list of all the rbtree entries in use for
193 	 * this context, which allows us to free all the allocations on
194 	 * context close.
195 	 */
196 	struct list_head handles_list;
197 };
198 
i915_gem_context_is_closed(const struct i915_gem_context * ctx)199 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
200 {
201 	return test_bit(CONTEXT_CLOSED, &ctx->flags);
202 }
203 
i915_gem_context_set_closed(struct i915_gem_context * ctx)204 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
205 {
206 	GEM_BUG_ON(i915_gem_context_is_closed(ctx));
207 	__set_bit(CONTEXT_CLOSED, &ctx->flags);
208 }
209 
i915_gem_context_no_error_capture(const struct i915_gem_context * ctx)210 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
211 {
212 	return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
213 }
214 
i915_gem_context_set_no_error_capture(struct i915_gem_context * ctx)215 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
216 {
217 	__set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
218 }
219 
i915_gem_context_clear_no_error_capture(struct i915_gem_context * ctx)220 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
221 {
222 	__clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
223 }
224 
i915_gem_context_is_bannable(const struct i915_gem_context * ctx)225 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
226 {
227 	return test_bit(CONTEXT_BANNABLE, &ctx->flags);
228 }
229 
i915_gem_context_set_bannable(struct i915_gem_context * ctx)230 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
231 {
232 	__set_bit(CONTEXT_BANNABLE, &ctx->flags);
233 }
234 
i915_gem_context_clear_bannable(struct i915_gem_context * ctx)235 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
236 {
237 	__clear_bit(CONTEXT_BANNABLE, &ctx->flags);
238 }
239 
i915_gem_context_is_banned(const struct i915_gem_context * ctx)240 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
241 {
242 	return test_bit(CONTEXT_BANNED, &ctx->flags);
243 }
244 
i915_gem_context_set_banned(struct i915_gem_context * ctx)245 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
246 {
247 	__set_bit(CONTEXT_BANNED, &ctx->flags);
248 }
249 
i915_gem_context_force_single_submission(const struct i915_gem_context * ctx)250 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
251 {
252 	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
253 }
254 
i915_gem_context_set_force_single_submission(struct i915_gem_context * ctx)255 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
256 {
257 	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
258 }
259 
i915_gem_context_is_default(const struct i915_gem_context * c)260 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
261 {
262 	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
263 }
264 
i915_gem_context_is_kernel(struct i915_gem_context * ctx)265 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
266 {
267 	return !ctx->file_priv;
268 }
269 
270 static inline struct intel_context *
to_intel_context(struct i915_gem_context * ctx,const struct intel_engine_cs * engine)271 to_intel_context(struct i915_gem_context *ctx,
272 		 const struct intel_engine_cs *engine)
273 {
274 	return &ctx->__engine[engine->id];
275 }
276 
277 static inline struct intel_context *
intel_context_pin(struct i915_gem_context * ctx,struct intel_engine_cs * engine)278 intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
279 {
280 	return engine->context_pin(engine, ctx);
281 }
282 
__intel_context_pin(struct intel_context * ce)283 static inline void __intel_context_pin(struct intel_context *ce)
284 {
285 	GEM_BUG_ON(!ce->pin_count);
286 	ce->pin_count++;
287 }
288 
intel_context_unpin(struct intel_context * ce)289 static inline void intel_context_unpin(struct intel_context *ce)
290 {
291 	GEM_BUG_ON(!ce->pin_count);
292 	if (--ce->pin_count)
293 		return;
294 
295 	GEM_BUG_ON(!ce->ops);
296 	ce->ops->unpin(ce);
297 }
298 
299 /* i915_gem_context.c */
300 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
301 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
302 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
303 
304 int i915_gem_context_open(struct drm_i915_private *i915,
305 			  struct drm_file *file);
306 void i915_gem_context_close(struct drm_file *file);
307 
308 int i915_switch_context(struct i915_request *rq);
309 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
310 
311 void i915_gem_context_release(struct kref *ctx_ref);
312 struct i915_gem_context *
313 i915_gem_context_create_gvt(struct drm_device *dev);
314 
315 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
316 				  struct drm_file *file);
317 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
318 				   struct drm_file *file);
319 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
320 				    struct drm_file *file_priv);
321 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
322 				    struct drm_file *file_priv);
323 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
324 				       struct drm_file *file);
325 
326 struct i915_gem_context *
327 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
328 
329 static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context * ctx)330 i915_gem_context_get(struct i915_gem_context *ctx)
331 {
332 	kref_get(&ctx->ref);
333 	return ctx;
334 }
335 
i915_gem_context_put(struct i915_gem_context * ctx)336 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
337 {
338 	kref_put(&ctx->ref, i915_gem_context_release);
339 }
340 
341 #endif /* !__I915_GEM_CONTEXT_H__ */
342