1 /*
2  * Header file for reservations for dma-buf and ttm
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Copyright (C) 2012-2013 Canonical Ltd
6  * Copyright (C) 2012 Texas Instruments
7  *
8  * Authors:
9  * Rob Clark <robdclark@gmail.com>
10  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
12  *
13  * Based on bo.c which bears the following copyright notice,
14  * but is dual licensed:
15  *
16  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
17  * All Rights Reserved.
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a
20  * copy of this software and associated documentation files (the
21  * "Software"), to deal in the Software without restriction, including
22  * without limitation the rights to use, copy, modify, merge, publish,
23  * distribute, sub license, and/or sell copies of the Software, and to
24  * permit persons to whom the Software is furnished to do so, subject to
25  * the following conditions:
26  *
27  * The above copyright notice and this permission notice (including the
28  * next paragraph) shall be included in all copies or substantial portions
29  * of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37  * USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 #ifndef _LINUX_RESERVATION_H
40 #define _LINUX_RESERVATION_H
41 
42 #include <linux/ww_mutex.h>
43 #include <linux/dma-fence.h>
44 #include <linux/slab.h>
45 #include <linux/seqlock.h>
46 #include <linux/rcupdate.h>
47 
48 extern struct ww_class reservation_ww_class;
49 extern struct lock_class_key reservation_seqcount_class;
50 extern const char reservation_seqcount_string[];
51 
52 /**
53  * struct dma_resv_list - a list of shared fences
54  * @rcu: for internal use
55  * @shared_count: table of shared fences
56  * @shared_max: for growing shared fence table
57  * @shared: shared fence table
58  */
59 struct dma_resv_list {
60 	struct rcu_head rcu;
61 	u32 shared_count, shared_max;
62 	struct dma_fence __rcu *shared[];
63 };
64 
65 /**
66  * struct dma_resv - a reservation object manages fences for a buffer
67  * @lock: update side lock
68  * @seq: sequence count for managing RCU read-side synchronization
69  * @fence_excl: the exclusive fence, if there is one currently
70  * @fence: list of current shared fences
71  */
72 struct dma_resv {
73 	struct ww_mutex lock;
74 	seqcount_t seq;
75 
76 	struct dma_fence __rcu *fence_excl;
77 	struct dma_resv_list __rcu *fence;
78 };
79 
80 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
81 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
82 
83 /**
84  * dma_resv_get_list - get the reservation object's
85  * shared fence list, with update-side lock held
86  * @obj: the reservation object
87  *
88  * Returns the shared fence list.  Does NOT take references to
89  * the fence.  The obj->lock must be held.
90  */
dma_resv_get_list(struct dma_resv * obj)91 static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
92 {
93 	return rcu_dereference_protected(obj->fence,
94 					 dma_resv_held(obj));
95 }
96 
97 /**
98  * dma_resv_lock - lock the reservation object
99  * @obj: the reservation object
100  * @ctx: the locking context
101  *
102  * Locks the reservation object for exclusive access and modification. Note,
103  * that the lock is only against other writers, readers will run concurrently
104  * with a writer under RCU. The seqlock is used to notify readers if they
105  * overlap with a writer.
106  *
107  * As the reservation object may be locked by multiple parties in an
108  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
109  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
110  * object may be locked by itself by passing NULL as @ctx.
111  */
dma_resv_lock(struct dma_resv * obj,struct ww_acquire_ctx * ctx)112 static inline int dma_resv_lock(struct dma_resv *obj,
113 				struct ww_acquire_ctx *ctx)
114 {
115 	return ww_mutex_lock(&obj->lock, ctx);
116 }
117 
118 /**
119  * dma_resv_lock_interruptible - lock the reservation object
120  * @obj: the reservation object
121  * @ctx: the locking context
122  *
123  * Locks the reservation object interruptible for exclusive access and
124  * modification. Note, that the lock is only against other writers, readers
125  * will run concurrently with a writer under RCU. The seqlock is used to
126  * notify readers if they overlap with a writer.
127  *
128  * As the reservation object may be locked by multiple parties in an
129  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
130  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
131  * object may be locked by itself by passing NULL as @ctx.
132  */
dma_resv_lock_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)133 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
134 					      struct ww_acquire_ctx *ctx)
135 {
136 	return ww_mutex_lock_interruptible(&obj->lock, ctx);
137 }
138 
139 /**
140  * dma_resv_lock_slow - slowpath lock the reservation object
141  * @obj: the reservation object
142  * @ctx: the locking context
143  *
144  * Acquires the reservation object after a die case. This function
145  * will sleep until the lock becomes available. See dma_resv_lock() as
146  * well.
147  */
dma_resv_lock_slow(struct dma_resv * obj,struct ww_acquire_ctx * ctx)148 static inline void dma_resv_lock_slow(struct dma_resv *obj,
149 				      struct ww_acquire_ctx *ctx)
150 {
151 	ww_mutex_lock_slow(&obj->lock, ctx);
152 }
153 
154 /**
155  * dma_resv_lock_slow_interruptible - slowpath lock the reservation
156  * object, interruptible
157  * @obj: the reservation object
158  * @ctx: the locking context
159  *
160  * Acquires the reservation object interruptible after a die case. This function
161  * will sleep until the lock becomes available. See
162  * dma_resv_lock_interruptible() as well.
163  */
dma_resv_lock_slow_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)164 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
165 						   struct ww_acquire_ctx *ctx)
166 {
167 	return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
168 }
169 
170 /**
171  * dma_resv_trylock - trylock the reservation object
172  * @obj: the reservation object
173  *
174  * Tries to lock the reservation object for exclusive access and modification.
175  * Note, that the lock is only against other writers, readers will run
176  * concurrently with a writer under RCU. The seqlock is used to notify readers
177  * if they overlap with a writer.
178  *
179  * Also note that since no context is provided, no deadlock protection is
180  * possible.
181  *
182  * Returns true if the lock was acquired, false otherwise.
183  */
dma_resv_trylock(struct dma_resv * obj)184 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
185 {
186 	return ww_mutex_trylock(&obj->lock);
187 }
188 
189 /**
190  * dma_resv_is_locked - is the reservation object locked
191  * @obj: the reservation object
192  *
193  * Returns true if the mutex is locked, false if unlocked.
194  */
dma_resv_is_locked(struct dma_resv * obj)195 static inline bool dma_resv_is_locked(struct dma_resv *obj)
196 {
197 	return ww_mutex_is_locked(&obj->lock);
198 }
199 
200 /**
201  * dma_resv_locking_ctx - returns the context used to lock the object
202  * @obj: the reservation object
203  *
204  * Returns the context used to lock a reservation object or NULL if no context
205  * was used or the object is not locked at all.
206  */
dma_resv_locking_ctx(struct dma_resv * obj)207 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
208 {
209 	return READ_ONCE(obj->lock.ctx);
210 }
211 
212 /**
213  * dma_resv_unlock - unlock the reservation object
214  * @obj: the reservation object
215  *
216  * Unlocks the reservation object following exclusive access.
217  */
dma_resv_unlock(struct dma_resv * obj)218 static inline void dma_resv_unlock(struct dma_resv *obj)
219 {
220 #ifdef CONFIG_DEBUG_MUTEXES
221 	/* Test shared fence slot reservation */
222 	if (rcu_access_pointer(obj->fence)) {
223 		struct dma_resv_list *fence = dma_resv_get_list(obj);
224 
225 		fence->shared_max = fence->shared_count;
226 	}
227 #endif
228 	ww_mutex_unlock(&obj->lock);
229 }
230 
231 /**
232  * dma_resv_get_excl - get the reservation object's
233  * exclusive fence, with update-side lock held
234  * @obj: the reservation object
235  *
236  * Returns the exclusive fence (if any).  Does NOT take a
237  * reference. Writers must hold obj->lock, readers may only
238  * hold a RCU read side lock.
239  *
240  * RETURNS
241  * The exclusive fence or NULL
242  */
243 static inline struct dma_fence *
dma_resv_get_excl(struct dma_resv * obj)244 dma_resv_get_excl(struct dma_resv *obj)
245 {
246 	return rcu_dereference_protected(obj->fence_excl,
247 					 dma_resv_held(obj));
248 }
249 
250 /**
251  * dma_resv_get_excl_rcu - get the reservation object's
252  * exclusive fence, without lock held.
253  * @obj: the reservation object
254  *
255  * If there is an exclusive fence, this atomically increments it's
256  * reference count and returns it.
257  *
258  * RETURNS
259  * The exclusive fence or NULL if none
260  */
261 static inline struct dma_fence *
dma_resv_get_excl_rcu(struct dma_resv * obj)262 dma_resv_get_excl_rcu(struct dma_resv *obj)
263 {
264 	struct dma_fence *fence;
265 
266 	if (!rcu_access_pointer(obj->fence_excl))
267 		return NULL;
268 
269 	rcu_read_lock();
270 	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
271 	rcu_read_unlock();
272 
273 	return fence;
274 }
275 
276 void dma_resv_init(struct dma_resv *obj);
277 void dma_resv_fini(struct dma_resv *obj);
278 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
279 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
280 
281 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
282 
283 int dma_resv_get_fences_rcu(struct dma_resv *obj,
284 			    struct dma_fence **pfence_excl,
285 			    unsigned *pshared_count,
286 			    struct dma_fence ***pshared);
287 
288 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
289 
290 long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
291 			       unsigned long timeout);
292 
293 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
294 
295 #endif /* _LINUX_RESERVATION_H */
296