1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_TIMELINE_H
26 #define I915_TIMELINE_H
27 
28 #include <linux/list.h>
29 #include <linux/kref.h>
30 
31 #include "i915_request.h"
32 #include "i915_syncmap.h"
33 #include "i915_utils.h"
34 
35 struct i915_timeline {
36 	u64 fence_context;
37 	u32 seqno;
38 
39 	spinlock_t lock;
40 #define TIMELINE_CLIENT 0 /* default subclass */
41 #define TIMELINE_ENGINE 1
42 
43 	/**
44 	 * List of breadcrumbs associated with GPU requests currently
45 	 * outstanding.
46 	 */
47 	struct list_head requests;
48 
49 	/* Contains an RCU guarded pointer to the last request. No reference is
50 	 * held to the request, users must carefully acquire a reference to
51 	 * the request using i915_gem_active_get_request_rcu(), or hold the
52 	 * struct_mutex.
53 	 */
54 	struct i915_gem_active last_request;
55 
56 	/**
57 	 * We track the most recent seqno that we wait on in every context so
58 	 * that we only have to emit a new await and dependency on a more
59 	 * recent sync point. As the contexts may be executed out-of-order, we
60 	 * have to track each individually and can not rely on an absolute
61 	 * global_seqno. When we know that all tracked fences are completed
62 	 * (i.e. when the driver is idle), we know that the syncmap is
63 	 * redundant and we can discard it without loss of generality.
64 	 */
65 	struct i915_syncmap *sync;
66 	/**
67 	 * Separately to the inter-context seqno map above, we track the last
68 	 * barrier (e.g. semaphore wait) to the global engine timelines. Note
69 	 * that this tracks global_seqno rather than the context.seqno, and
70 	 * so it is subject to the limitations of hw wraparound and that we
71 	 * may need to revoke global_seqno (on pre-emption).
72 	 */
73 	u32 global_sync[I915_NUM_ENGINES];
74 
75 	struct list_head link;
76 	const char *name;
77 
78 	struct kref kref;
79 };
80 
81 void i915_timeline_init(struct drm_i915_private *i915,
82 			struct i915_timeline *tl,
83 			const char *name);
84 void i915_timeline_fini(struct i915_timeline *tl);
85 
86 struct i915_timeline *
87 i915_timeline_create(struct drm_i915_private *i915, const char *name);
88 
89 static inline struct i915_timeline *
i915_timeline_get(struct i915_timeline * timeline)90 i915_timeline_get(struct i915_timeline *timeline)
91 {
92 	kref_get(&timeline->kref);
93 	return timeline;
94 }
95 
96 void __i915_timeline_free(struct kref *kref);
i915_timeline_put(struct i915_timeline * timeline)97 static inline void i915_timeline_put(struct i915_timeline *timeline)
98 {
99 	kref_put(&timeline->kref, __i915_timeline_free);
100 }
101 
__i915_timeline_sync_set(struct i915_timeline * tl,u64 context,u32 seqno)102 static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
103 					   u64 context, u32 seqno)
104 {
105 	return i915_syncmap_set(&tl->sync, context, seqno);
106 }
107 
i915_timeline_sync_set(struct i915_timeline * tl,const struct dma_fence * fence)108 static inline int i915_timeline_sync_set(struct i915_timeline *tl,
109 					 const struct dma_fence *fence)
110 {
111 	return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
112 }
113 
__i915_timeline_sync_is_later(struct i915_timeline * tl,u64 context,u32 seqno)114 static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
115 						 u64 context, u32 seqno)
116 {
117 	return i915_syncmap_is_later(&tl->sync, context, seqno);
118 }
119 
i915_timeline_sync_is_later(struct i915_timeline * tl,const struct dma_fence * fence)120 static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
121 					       const struct dma_fence *fence)
122 {
123 	return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
124 }
125 
126 void i915_timelines_park(struct drm_i915_private *i915);
127 
128 #endif
129