1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #include <drm/ttm/ttm_lock.h>
33 #include <drm/ttm/ttm_module.h>
34 #include <linux/atomic.h>
35 #include <linux/errno.h>
36 #include <linux/wait.h>
37 #include <linux/sched/signal.h>
38 #include <linux/module.h>
39
40 #define TTM_WRITE_LOCK_PENDING (1 << 0)
41 #define TTM_VT_LOCK_PENDING (1 << 1)
42 #define TTM_SUSPEND_LOCK_PENDING (1 << 2)
43 #define TTM_VT_LOCK (1 << 3)
44 #define TTM_SUSPEND_LOCK (1 << 4)
45
ttm_lock_init(struct ttm_lock * lock)46 void ttm_lock_init(struct ttm_lock *lock)
47 {
48 spin_lock_init(&lock->lock);
49 init_waitqueue_head(&lock->queue);
50 lock->rw = 0;
51 lock->flags = 0;
52 lock->kill_takers = false;
53 lock->signal = SIGKILL;
54 }
55 EXPORT_SYMBOL(ttm_lock_init);
56
ttm_read_unlock(struct ttm_lock * lock)57 void ttm_read_unlock(struct ttm_lock *lock)
58 {
59 spin_lock(&lock->lock);
60 if (--lock->rw == 0)
61 wake_up_all(&lock->queue);
62 spin_unlock(&lock->lock);
63 }
64 EXPORT_SYMBOL(ttm_read_unlock);
65
__ttm_read_lock(struct ttm_lock * lock)66 static bool __ttm_read_lock(struct ttm_lock *lock)
67 {
68 bool locked = false;
69
70 spin_lock(&lock->lock);
71 if (unlikely(lock->kill_takers)) {
72 send_sig(lock->signal, current, 0);
73 spin_unlock(&lock->lock);
74 return false;
75 }
76 if (lock->rw >= 0 && lock->flags == 0) {
77 ++lock->rw;
78 locked = true;
79 }
80 spin_unlock(&lock->lock);
81 return locked;
82 }
83
ttm_read_lock(struct ttm_lock * lock,bool interruptible)84 int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
85 {
86 int ret = 0;
87
88 if (interruptible)
89 ret = wait_event_interruptible(lock->queue,
90 __ttm_read_lock(lock));
91 else
92 wait_event(lock->queue, __ttm_read_lock(lock));
93 return ret;
94 }
95 EXPORT_SYMBOL(ttm_read_lock);
96
__ttm_read_trylock(struct ttm_lock * lock,bool * locked)97 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
98 {
99 bool block = true;
100
101 *locked = false;
102
103 spin_lock(&lock->lock);
104 if (unlikely(lock->kill_takers)) {
105 send_sig(lock->signal, current, 0);
106 spin_unlock(&lock->lock);
107 return false;
108 }
109 if (lock->rw >= 0 && lock->flags == 0) {
110 ++lock->rw;
111 block = false;
112 *locked = true;
113 } else if (lock->flags == 0) {
114 block = false;
115 }
116 spin_unlock(&lock->lock);
117
118 return !block;
119 }
120
ttm_read_trylock(struct ttm_lock * lock,bool interruptible)121 int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
122 {
123 int ret = 0;
124 bool locked;
125
126 if (interruptible)
127 ret = wait_event_interruptible
128 (lock->queue, __ttm_read_trylock(lock, &locked));
129 else
130 wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
131
132 if (unlikely(ret != 0)) {
133 BUG_ON(locked);
134 return ret;
135 }
136
137 return (locked) ? 0 : -EBUSY;
138 }
139
ttm_write_unlock(struct ttm_lock * lock)140 void ttm_write_unlock(struct ttm_lock *lock)
141 {
142 spin_lock(&lock->lock);
143 lock->rw = 0;
144 wake_up_all(&lock->queue);
145 spin_unlock(&lock->lock);
146 }
147 EXPORT_SYMBOL(ttm_write_unlock);
148
__ttm_write_lock(struct ttm_lock * lock)149 static bool __ttm_write_lock(struct ttm_lock *lock)
150 {
151 bool locked = false;
152
153 spin_lock(&lock->lock);
154 if (unlikely(lock->kill_takers)) {
155 send_sig(lock->signal, current, 0);
156 spin_unlock(&lock->lock);
157 return false;
158 }
159 if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
160 lock->rw = -1;
161 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
162 locked = true;
163 } else {
164 lock->flags |= TTM_WRITE_LOCK_PENDING;
165 }
166 spin_unlock(&lock->lock);
167 return locked;
168 }
169
ttm_write_lock(struct ttm_lock * lock,bool interruptible)170 int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
171 {
172 int ret = 0;
173
174 if (interruptible) {
175 ret = wait_event_interruptible(lock->queue,
176 __ttm_write_lock(lock));
177 if (unlikely(ret != 0)) {
178 spin_lock(&lock->lock);
179 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
180 wake_up_all(&lock->queue);
181 spin_unlock(&lock->lock);
182 }
183 } else
184 wait_event(lock->queue, __ttm_write_lock(lock));
185
186 return ret;
187 }
188 EXPORT_SYMBOL(ttm_write_lock);
189
__ttm_vt_unlock(struct ttm_lock * lock)190 static int __ttm_vt_unlock(struct ttm_lock *lock)
191 {
192 int ret = 0;
193
194 spin_lock(&lock->lock);
195 if (unlikely(!(lock->flags & TTM_VT_LOCK)))
196 ret = -EINVAL;
197 lock->flags &= ~TTM_VT_LOCK;
198 wake_up_all(&lock->queue);
199 spin_unlock(&lock->lock);
200
201 return ret;
202 }
203
ttm_vt_lock_remove(struct ttm_base_object ** p_base)204 static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
205 {
206 struct ttm_base_object *base = *p_base;
207 struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
208 int ret;
209
210 *p_base = NULL;
211 ret = __ttm_vt_unlock(lock);
212 BUG_ON(ret != 0);
213 }
214
__ttm_vt_lock(struct ttm_lock * lock)215 static bool __ttm_vt_lock(struct ttm_lock *lock)
216 {
217 bool locked = false;
218
219 spin_lock(&lock->lock);
220 if (lock->rw == 0) {
221 lock->flags &= ~TTM_VT_LOCK_PENDING;
222 lock->flags |= TTM_VT_LOCK;
223 locked = true;
224 } else {
225 lock->flags |= TTM_VT_LOCK_PENDING;
226 }
227 spin_unlock(&lock->lock);
228 return locked;
229 }
230
ttm_vt_lock(struct ttm_lock * lock,bool interruptible,struct ttm_object_file * tfile)231 int ttm_vt_lock(struct ttm_lock *lock,
232 bool interruptible,
233 struct ttm_object_file *tfile)
234 {
235 int ret = 0;
236
237 if (interruptible) {
238 ret = wait_event_interruptible(lock->queue,
239 __ttm_vt_lock(lock));
240 if (unlikely(ret != 0)) {
241 spin_lock(&lock->lock);
242 lock->flags &= ~TTM_VT_LOCK_PENDING;
243 wake_up_all(&lock->queue);
244 spin_unlock(&lock->lock);
245 return ret;
246 }
247 } else
248 wait_event(lock->queue, __ttm_vt_lock(lock));
249
250 /*
251 * Add a base-object, the destructor of which will
252 * make sure the lock is released if the client dies
253 * while holding it.
254 */
255
256 ret = ttm_base_object_init(tfile, &lock->base, false,
257 ttm_lock_type, &ttm_vt_lock_remove, NULL);
258 if (ret)
259 (void)__ttm_vt_unlock(lock);
260 else
261 lock->vt_holder = tfile;
262
263 return ret;
264 }
265 EXPORT_SYMBOL(ttm_vt_lock);
266
ttm_vt_unlock(struct ttm_lock * lock)267 int ttm_vt_unlock(struct ttm_lock *lock)
268 {
269 return ttm_ref_object_base_unref(lock->vt_holder,
270 lock->base.hash.key, TTM_REF_USAGE);
271 }
272 EXPORT_SYMBOL(ttm_vt_unlock);
273
ttm_suspend_unlock(struct ttm_lock * lock)274 void ttm_suspend_unlock(struct ttm_lock *lock)
275 {
276 spin_lock(&lock->lock);
277 lock->flags &= ~TTM_SUSPEND_LOCK;
278 wake_up_all(&lock->queue);
279 spin_unlock(&lock->lock);
280 }
281 EXPORT_SYMBOL(ttm_suspend_unlock);
282
__ttm_suspend_lock(struct ttm_lock * lock)283 static bool __ttm_suspend_lock(struct ttm_lock *lock)
284 {
285 bool locked = false;
286
287 spin_lock(&lock->lock);
288 if (lock->rw == 0) {
289 lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
290 lock->flags |= TTM_SUSPEND_LOCK;
291 locked = true;
292 } else {
293 lock->flags |= TTM_SUSPEND_LOCK_PENDING;
294 }
295 spin_unlock(&lock->lock);
296 return locked;
297 }
298
ttm_suspend_lock(struct ttm_lock * lock)299 void ttm_suspend_lock(struct ttm_lock *lock)
300 {
301 wait_event(lock->queue, __ttm_suspend_lock(lock));
302 }
303 EXPORT_SYMBOL(ttm_suspend_lock);
304