1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
6 *
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
9 *
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 *
13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 * Waiman Long <longman@redhat.com> and
15 * Peter Zijlstra <peterz@infradead.org>.
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
30
31 #ifndef CONFIG_PREEMPT_RT
32 #include "lock_events.h"
33
34 /*
35 * The least significant 2 bits of the owner value has the following
36 * meanings when set.
37 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
38 * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
39 *
40 * When the rwsem is reader-owned and a spinning writer has timed out,
41 * the nonspinnable bit will be set to disable optimistic spinning.
42
43 * When a writer acquires a rwsem, it puts its task_struct pointer
44 * into the owner field. It is cleared after an unlock.
45 *
46 * When a reader acquires a rwsem, it will also puts its task_struct
47 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
48 * On unlock, the owner field will largely be left untouched. So
49 * for a free or reader-owned rwsem, the owner value may contain
50 * information about the last reader that acquires the rwsem.
51 *
52 * That information may be helpful in debugging cases where the system
53 * seems to hang on a reader owned rwsem especially if only one reader
54 * is involved. Ideally we would like to track all the readers that own
55 * a rwsem, but the overhead is simply too big.
56 *
57 * A fast path reader optimistic lock stealing is supported when the rwsem
58 * is previously owned by a writer and the following conditions are met:
59 * - OSQ is empty
60 * - rwsem is not currently writer owned
61 * - the handoff isn't set.
62 */
63 #define RWSEM_READER_OWNED (1UL << 0)
64 #define RWSEM_NONSPINNABLE (1UL << 1)
65 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
66
67 #ifdef CONFIG_DEBUG_RWSEMS
68 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
69 if (!debug_locks_silent && \
70 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
71 #c, atomic_long_read(&(sem)->count), \
72 (unsigned long) sem->magic, \
73 atomic_long_read(&(sem)->owner), (long)current, \
74 list_empty(&(sem)->wait_list) ? "" : "not ")) \
75 debug_locks_off(); \
76 } while (0)
77 #else
78 # define DEBUG_RWSEMS_WARN_ON(c, sem)
79 #endif
80
81 /*
82 * On 64-bit architectures, the bit definitions of the count are:
83 *
84 * Bit 0 - writer locked bit
85 * Bit 1 - waiters present bit
86 * Bit 2 - lock handoff bit
87 * Bits 3-7 - reserved
88 * Bits 8-62 - 55-bit reader count
89 * Bit 63 - read fail bit
90 *
91 * On 32-bit architectures, the bit definitions of the count are:
92 *
93 * Bit 0 - writer locked bit
94 * Bit 1 - waiters present bit
95 * Bit 2 - lock handoff bit
96 * Bits 3-7 - reserved
97 * Bits 8-30 - 23-bit reader count
98 * Bit 31 - read fail bit
99 *
100 * It is not likely that the most significant bit (read fail bit) will ever
101 * be set. This guard bit is still checked anyway in the down_read() fastpath
102 * just in case we need to use up more of the reader bits for other purpose
103 * in the future.
104 *
105 * atomic_long_fetch_add() is used to obtain reader lock, whereas
106 * atomic_long_cmpxchg() will be used to obtain writer lock.
107 *
108 * There are three places where the lock handoff bit may be set or cleared.
109 * 1) rwsem_mark_wake() for readers.
110 * 2) rwsem_try_write_lock() for writers.
111 * 3) Error path of rwsem_down_write_slowpath().
112 *
113 * For all the above cases, wait_lock will be held. A writer must also
114 * be the first one in the wait_list to be eligible for setting the handoff
115 * bit. So concurrent setting/clearing of handoff bit is not possible.
116 */
117 #define RWSEM_WRITER_LOCKED (1UL << 0)
118 #define RWSEM_FLAG_WAITERS (1UL << 1)
119 #define RWSEM_FLAG_HANDOFF (1UL << 2)
120 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
121
122 #define RWSEM_READER_SHIFT 8
123 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
124 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
125 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
126 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
127 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
128 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
129
130 /*
131 * All writes to owner are protected by WRITE_ONCE() to make sure that
132 * store tearing can't happen as optimistic spinners may read and use
133 * the owner value concurrently without lock. Read from owner, however,
134 * may not need READ_ONCE() as long as the pointer value is only used
135 * for comparison and isn't being dereferenced.
136 */
rwsem_set_owner(struct rw_semaphore * sem)137 static inline void rwsem_set_owner(struct rw_semaphore *sem)
138 {
139 atomic_long_set(&sem->owner, (long)current);
140 }
141
rwsem_clear_owner(struct rw_semaphore * sem)142 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
143 {
144 atomic_long_set(&sem->owner, 0);
145 }
146
147 /*
148 * Test the flags in the owner field.
149 */
rwsem_test_oflags(struct rw_semaphore * sem,long flags)150 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
151 {
152 return atomic_long_read(&sem->owner) & flags;
153 }
154
155 /*
156 * The task_struct pointer of the last owning reader will be left in
157 * the owner field.
158 *
159 * Note that the owner value just indicates the task has owned the rwsem
160 * previously, it may not be the real owner or one of the real owners
161 * anymore when that field is examined, so take it with a grain of salt.
162 *
163 * The reader non-spinnable bit is preserved.
164 */
__rwsem_set_reader_owned(struct rw_semaphore * sem,struct task_struct * owner)165 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
166 struct task_struct *owner)
167 {
168 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
169 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
170
171 atomic_long_set(&sem->owner, val);
172 }
173
rwsem_set_reader_owned(struct rw_semaphore * sem)174 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
175 {
176 __rwsem_set_reader_owned(sem, current);
177 }
178
179 /*
180 * Return true if the rwsem is owned by a reader.
181 */
is_rwsem_reader_owned(struct rw_semaphore * sem)182 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
183 {
184 #ifdef CONFIG_DEBUG_RWSEMS
185 /*
186 * Check the count to see if it is write-locked.
187 */
188 long count = atomic_long_read(&sem->count);
189
190 if (count & RWSEM_WRITER_MASK)
191 return false;
192 #endif
193 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
194 }
195
196 #ifdef CONFIG_DEBUG_RWSEMS
197 /*
198 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
199 * is a task pointer in owner of a reader-owned rwsem, it will be the
200 * real owner or one of the real owners. The only exception is when the
201 * unlock is done by up_read_non_owner().
202 */
rwsem_clear_reader_owned(struct rw_semaphore * sem)203 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
204 {
205 unsigned long val = atomic_long_read(&sem->owner);
206
207 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
208 if (atomic_long_try_cmpxchg(&sem->owner, &val,
209 val & RWSEM_OWNER_FLAGS_MASK))
210 return;
211 }
212 }
213 #else
rwsem_clear_reader_owned(struct rw_semaphore * sem)214 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
215 {
216 }
217 #endif
218
219 /*
220 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
221 * remains set. Otherwise, the operation will be aborted.
222 */
rwsem_set_nonspinnable(struct rw_semaphore * sem)223 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
224 {
225 unsigned long owner = atomic_long_read(&sem->owner);
226
227 do {
228 if (!(owner & RWSEM_READER_OWNED))
229 break;
230 if (owner & RWSEM_NONSPINNABLE)
231 break;
232 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
233 owner | RWSEM_NONSPINNABLE));
234 }
235
rwsem_read_trylock(struct rw_semaphore * sem,long * cntp)236 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
237 {
238 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
239
240 if (WARN_ON_ONCE(*cntp < 0))
241 rwsem_set_nonspinnable(sem);
242
243 if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
244 rwsem_set_reader_owned(sem);
245 return true;
246 }
247
248 return false;
249 }
250
rwsem_write_trylock(struct rw_semaphore * sem)251 static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
252 {
253 long tmp = RWSEM_UNLOCKED_VALUE;
254
255 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
256 rwsem_set_owner(sem);
257 return true;
258 }
259
260 return false;
261 }
262
263 /*
264 * Return just the real task structure pointer of the owner
265 */
rwsem_owner(struct rw_semaphore * sem)266 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
267 {
268 return (struct task_struct *)
269 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
270 }
271
272 /*
273 * Return the real task structure pointer of the owner and the embedded
274 * flags in the owner. pflags must be non-NULL.
275 */
276 static inline struct task_struct *
rwsem_owner_flags(struct rw_semaphore * sem,unsigned long * pflags)277 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
278 {
279 unsigned long owner = atomic_long_read(&sem->owner);
280
281 *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
282 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
283 }
284
285 /*
286 * Guide to the rw_semaphore's count field.
287 *
288 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
289 * by a writer.
290 *
291 * The lock is owned by readers when
292 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
293 * (2) some of the reader bits are set in count, and
294 * (3) the owner field has RWSEM_READ_OWNED bit set.
295 *
296 * Having some reader bits set is not enough to guarantee a readers owned
297 * lock as the readers may be in the process of backing out from the count
298 * and a writer has just released the lock. So another writer may steal
299 * the lock immediately after that.
300 */
301
302 /*
303 * Initialize an rwsem:
304 */
__init_rwsem(struct rw_semaphore * sem,const char * name,struct lock_class_key * key)305 void __init_rwsem(struct rw_semaphore *sem, const char *name,
306 struct lock_class_key *key)
307 {
308 #ifdef CONFIG_DEBUG_LOCK_ALLOC
309 /*
310 * Make sure we are not reinitializing a held semaphore:
311 */
312 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
313 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
314 #endif
315 #ifdef CONFIG_DEBUG_RWSEMS
316 sem->magic = sem;
317 #endif
318 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
319 raw_spin_lock_init(&sem->wait_lock);
320 INIT_LIST_HEAD(&sem->wait_list);
321 atomic_long_set(&sem->owner, 0L);
322 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
323 osq_lock_init(&sem->osq);
324 #endif
325 }
326 EXPORT_SYMBOL(__init_rwsem);
327
328 enum rwsem_waiter_type {
329 RWSEM_WAITING_FOR_WRITE,
330 RWSEM_WAITING_FOR_READ
331 };
332
333 struct rwsem_waiter {
334 struct list_head list;
335 struct task_struct *task;
336 enum rwsem_waiter_type type;
337 unsigned long timeout;
338 };
339 #define rwsem_first_waiter(sem) \
340 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
341
342 enum rwsem_wake_type {
343 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
344 RWSEM_WAKE_READERS, /* Wake readers only */
345 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
346 };
347
348 enum writer_wait_state {
349 WRITER_NOT_FIRST, /* Writer is not first in wait list */
350 WRITER_FIRST, /* Writer is first in wait list */
351 WRITER_HANDOFF /* Writer is first & handoff needed */
352 };
353
354 /*
355 * The typical HZ value is either 250 or 1000. So set the minimum waiting
356 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
357 * queue before initiating the handoff protocol.
358 */
359 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
360
361 /*
362 * Magic number to batch-wakeup waiting readers, even when writers are
363 * also present in the queue. This both limits the amount of work the
364 * waking thread must do and also prevents any potential counter overflow,
365 * however unlikely.
366 */
367 #define MAX_READERS_WAKEUP 0x100
368
369 /*
370 * handle the lock release when processes blocked on it that can now run
371 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
372 * have been set.
373 * - there must be someone on the queue
374 * - the wait_lock must be held by the caller
375 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
376 * to actually wakeup the blocked task(s) and drop the reference count,
377 * preferably when the wait_lock is released
378 * - woken process blocks are discarded from the list after having task zeroed
379 * - writers are only marked woken if downgrading is false
380 */
rwsem_mark_wake(struct rw_semaphore * sem,enum rwsem_wake_type wake_type,struct wake_q_head * wake_q)381 static void rwsem_mark_wake(struct rw_semaphore *sem,
382 enum rwsem_wake_type wake_type,
383 struct wake_q_head *wake_q)
384 {
385 struct rwsem_waiter *waiter, *tmp;
386 long oldcount, woken = 0, adjustment = 0;
387 struct list_head wlist;
388
389 lockdep_assert_held(&sem->wait_lock);
390
391 /*
392 * Take a peek at the queue head waiter such that we can determine
393 * the wakeup(s) to perform.
394 */
395 waiter = rwsem_first_waiter(sem);
396
397 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
398 if (wake_type == RWSEM_WAKE_ANY) {
399 /*
400 * Mark writer at the front of the queue for wakeup.
401 * Until the task is actually later awoken later by
402 * the caller, other writers are able to steal it.
403 * Readers, on the other hand, will block as they
404 * will notice the queued writer.
405 */
406 wake_q_add(wake_q, waiter->task);
407 lockevent_inc(rwsem_wake_writer);
408 }
409
410 return;
411 }
412
413 /*
414 * No reader wakeup if there are too many of them already.
415 */
416 if (unlikely(atomic_long_read(&sem->count) < 0))
417 return;
418
419 /*
420 * Writers might steal the lock before we grant it to the next reader.
421 * We prefer to do the first reader grant before counting readers
422 * so we can bail out early if a writer stole the lock.
423 */
424 if (wake_type != RWSEM_WAKE_READ_OWNED) {
425 struct task_struct *owner;
426
427 adjustment = RWSEM_READER_BIAS;
428 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
429 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
430 /*
431 * When we've been waiting "too" long (for writers
432 * to give up the lock), request a HANDOFF to
433 * force the issue.
434 */
435 if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
436 time_after(jiffies, waiter->timeout)) {
437 adjustment -= RWSEM_FLAG_HANDOFF;
438 lockevent_inc(rwsem_rlock_handoff);
439 }
440
441 atomic_long_add(-adjustment, &sem->count);
442 return;
443 }
444 /*
445 * Set it to reader-owned to give spinners an early
446 * indication that readers now have the lock.
447 * The reader nonspinnable bit seen at slowpath entry of
448 * the reader is copied over.
449 */
450 owner = waiter->task;
451 __rwsem_set_reader_owned(sem, owner);
452 }
453
454 /*
455 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
456 * queue. We know that the woken will be at least 1 as we accounted
457 * for above. Note we increment the 'active part' of the count by the
458 * number of readers before waking any processes up.
459 *
460 * This is an adaptation of the phase-fair R/W locks where at the
461 * reader phase (first waiter is a reader), all readers are eligible
462 * to acquire the lock at the same time irrespective of their order
463 * in the queue. The writers acquire the lock according to their
464 * order in the queue.
465 *
466 * We have to do wakeup in 2 passes to prevent the possibility that
467 * the reader count may be decremented before it is incremented. It
468 * is because the to-be-woken waiter may not have slept yet. So it
469 * may see waiter->task got cleared, finish its critical section and
470 * do an unlock before the reader count increment.
471 *
472 * 1) Collect the read-waiters in a separate list, count them and
473 * fully increment the reader count in rwsem.
474 * 2) For each waiters in the new list, clear waiter->task and
475 * put them into wake_q to be woken up later.
476 */
477 INIT_LIST_HEAD(&wlist);
478 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
479 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
480 continue;
481
482 woken++;
483 list_move_tail(&waiter->list, &wlist);
484
485 /*
486 * Limit # of readers that can be woken up per wakeup call.
487 */
488 if (woken >= MAX_READERS_WAKEUP)
489 break;
490 }
491
492 adjustment = woken * RWSEM_READER_BIAS - adjustment;
493 lockevent_cond_inc(rwsem_wake_reader, woken);
494 if (list_empty(&sem->wait_list)) {
495 /* hit end of list above */
496 adjustment -= RWSEM_FLAG_WAITERS;
497 }
498
499 /*
500 * When we've woken a reader, we no longer need to force writers
501 * to give up the lock and we can clear HANDOFF.
502 */
503 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
504 adjustment -= RWSEM_FLAG_HANDOFF;
505
506 if (adjustment)
507 atomic_long_add(adjustment, &sem->count);
508
509 /* 2nd pass */
510 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
511 struct task_struct *tsk;
512
513 tsk = waiter->task;
514 get_task_struct(tsk);
515
516 /*
517 * Ensure calling get_task_struct() before setting the reader
518 * waiter to nil such that rwsem_down_read_slowpath() cannot
519 * race with do_exit() by always holding a reference count
520 * to the task to wakeup.
521 */
522 smp_store_release(&waiter->task, NULL);
523 /*
524 * Ensure issuing the wakeup (either by us or someone else)
525 * after setting the reader waiter to nil.
526 */
527 wake_q_add_safe(wake_q, tsk);
528 }
529 }
530
531 /*
532 * This function must be called with the sem->wait_lock held to prevent
533 * race conditions between checking the rwsem wait list and setting the
534 * sem->count accordingly.
535 *
536 * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
537 * bit is set or the lock is acquired with handoff bit cleared.
538 */
rwsem_try_write_lock(struct rw_semaphore * sem,enum writer_wait_state wstate)539 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
540 enum writer_wait_state wstate)
541 {
542 long count, new;
543
544 lockdep_assert_held(&sem->wait_lock);
545
546 count = atomic_long_read(&sem->count);
547 do {
548 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
549
550 if (has_handoff && wstate == WRITER_NOT_FIRST)
551 return false;
552
553 new = count;
554
555 if (count & RWSEM_LOCK_MASK) {
556 if (has_handoff || (wstate != WRITER_HANDOFF))
557 return false;
558
559 new |= RWSEM_FLAG_HANDOFF;
560 } else {
561 new |= RWSEM_WRITER_LOCKED;
562 new &= ~RWSEM_FLAG_HANDOFF;
563
564 if (list_is_singular(&sem->wait_list))
565 new &= ~RWSEM_FLAG_WAITERS;
566 }
567 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
568
569 /*
570 * We have either acquired the lock with handoff bit cleared or
571 * set the handoff bit.
572 */
573 if (new & RWSEM_FLAG_HANDOFF)
574 return false;
575
576 rwsem_set_owner(sem);
577 return true;
578 }
579
580 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
581 /*
582 * Try to acquire write lock before the writer has been put on wait queue.
583 */
rwsem_try_write_lock_unqueued(struct rw_semaphore * sem)584 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
585 {
586 long count = atomic_long_read(&sem->count);
587
588 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
589 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
590 count | RWSEM_WRITER_LOCKED)) {
591 rwsem_set_owner(sem);
592 lockevent_inc(rwsem_opt_lock);
593 return true;
594 }
595 }
596 return false;
597 }
598
owner_on_cpu(struct task_struct * owner)599 static inline bool owner_on_cpu(struct task_struct *owner)
600 {
601 /*
602 * As lock holder preemption issue, we both skip spinning if
603 * task is not on cpu or its cpu is preempted
604 */
605 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
606 }
607
rwsem_can_spin_on_owner(struct rw_semaphore * sem)608 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
609 {
610 struct task_struct *owner;
611 unsigned long flags;
612 bool ret = true;
613
614 if (need_resched()) {
615 lockevent_inc(rwsem_opt_fail);
616 return false;
617 }
618
619 preempt_disable();
620 rcu_read_lock();
621 owner = rwsem_owner_flags(sem, &flags);
622 /*
623 * Don't check the read-owner as the entry may be stale.
624 */
625 if ((flags & RWSEM_NONSPINNABLE) ||
626 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
627 ret = false;
628 rcu_read_unlock();
629 preempt_enable();
630
631 lockevent_cond_inc(rwsem_opt_fail, !ret);
632 return ret;
633 }
634
635 /*
636 * The rwsem_spin_on_owner() function returns the following 4 values
637 * depending on the lock owner state.
638 * OWNER_NULL : owner is currently NULL
639 * OWNER_WRITER: when owner changes and is a writer
640 * OWNER_READER: when owner changes and the new owner may be a reader.
641 * OWNER_NONSPINNABLE:
642 * when optimistic spinning has to stop because either the
643 * owner stops running, is unknown, or its timeslice has
644 * been used up.
645 */
646 enum owner_state {
647 OWNER_NULL = 1 << 0,
648 OWNER_WRITER = 1 << 1,
649 OWNER_READER = 1 << 2,
650 OWNER_NONSPINNABLE = 1 << 3,
651 };
652 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
653
654 static inline enum owner_state
rwsem_owner_state(struct task_struct * owner,unsigned long flags)655 rwsem_owner_state(struct task_struct *owner, unsigned long flags)
656 {
657 if (flags & RWSEM_NONSPINNABLE)
658 return OWNER_NONSPINNABLE;
659
660 if (flags & RWSEM_READER_OWNED)
661 return OWNER_READER;
662
663 return owner ? OWNER_WRITER : OWNER_NULL;
664 }
665
666 static noinline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore * sem)667 rwsem_spin_on_owner(struct rw_semaphore *sem)
668 {
669 struct task_struct *new, *owner;
670 unsigned long flags, new_flags;
671 enum owner_state state;
672
673 owner = rwsem_owner_flags(sem, &flags);
674 state = rwsem_owner_state(owner, flags);
675 if (state != OWNER_WRITER)
676 return state;
677
678 rcu_read_lock();
679 for (;;) {
680 /*
681 * When a waiting writer set the handoff flag, it may spin
682 * on the owner as well. Once that writer acquires the lock,
683 * we can spin on it. So we don't need to quit even when the
684 * handoff bit is set.
685 */
686 new = rwsem_owner_flags(sem, &new_flags);
687 if ((new != owner) || (new_flags != flags)) {
688 state = rwsem_owner_state(new, new_flags);
689 break;
690 }
691
692 /*
693 * Ensure we emit the owner->on_cpu, dereference _after_
694 * checking sem->owner still matches owner, if that fails,
695 * owner might point to free()d memory, if it still matches,
696 * the rcu_read_lock() ensures the memory stays valid.
697 */
698 barrier();
699
700 if (need_resched() || !owner_on_cpu(owner)) {
701 state = OWNER_NONSPINNABLE;
702 break;
703 }
704
705 cpu_relax();
706 }
707 rcu_read_unlock();
708
709 return state;
710 }
711
712 /*
713 * Calculate reader-owned rwsem spinning threshold for writer
714 *
715 * The more readers own the rwsem, the longer it will take for them to
716 * wind down and free the rwsem. So the empirical formula used to
717 * determine the actual spinning time limit here is:
718 *
719 * Spinning threshold = (10 + nr_readers/2)us
720 *
721 * The limit is capped to a maximum of 25us (30 readers). This is just
722 * a heuristic and is subjected to change in the future.
723 */
rwsem_rspin_threshold(struct rw_semaphore * sem)724 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
725 {
726 long count = atomic_long_read(&sem->count);
727 int readers = count >> RWSEM_READER_SHIFT;
728 u64 delta;
729
730 if (readers > 30)
731 readers = 30;
732 delta = (20 + readers) * NSEC_PER_USEC / 2;
733
734 return sched_clock() + delta;
735 }
736
rwsem_optimistic_spin(struct rw_semaphore * sem)737 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
738 {
739 bool taken = false;
740 int prev_owner_state = OWNER_NULL;
741 int loop = 0;
742 u64 rspin_threshold = 0;
743
744 preempt_disable();
745
746 /* sem->wait_lock should not be held when doing optimistic spinning */
747 if (!osq_lock(&sem->osq))
748 goto done;
749
750 /*
751 * Optimistically spin on the owner field and attempt to acquire the
752 * lock whenever the owner changes. Spinning will be stopped when:
753 * 1) the owning writer isn't running; or
754 * 2) readers own the lock and spinning time has exceeded limit.
755 */
756 for (;;) {
757 enum owner_state owner_state;
758
759 owner_state = rwsem_spin_on_owner(sem);
760 if (!(owner_state & OWNER_SPINNABLE))
761 break;
762
763 /*
764 * Try to acquire the lock
765 */
766 taken = rwsem_try_write_lock_unqueued(sem);
767
768 if (taken)
769 break;
770
771 /*
772 * Time-based reader-owned rwsem optimistic spinning
773 */
774 if (owner_state == OWNER_READER) {
775 /*
776 * Re-initialize rspin_threshold every time when
777 * the owner state changes from non-reader to reader.
778 * This allows a writer to steal the lock in between
779 * 2 reader phases and have the threshold reset at
780 * the beginning of the 2nd reader phase.
781 */
782 if (prev_owner_state != OWNER_READER) {
783 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
784 break;
785 rspin_threshold = rwsem_rspin_threshold(sem);
786 loop = 0;
787 }
788
789 /*
790 * Check time threshold once every 16 iterations to
791 * avoid calling sched_clock() too frequently so
792 * as to reduce the average latency between the times
793 * when the lock becomes free and when the spinner
794 * is ready to do a trylock.
795 */
796 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
797 rwsem_set_nonspinnable(sem);
798 lockevent_inc(rwsem_opt_nospin);
799 break;
800 }
801 }
802
803 /*
804 * An RT task cannot do optimistic spinning if it cannot
805 * be sure the lock holder is running or live-lock may
806 * happen if the current task and the lock holder happen
807 * to run in the same CPU. However, aborting optimistic
808 * spinning while a NULL owner is detected may miss some
809 * opportunity where spinning can continue without causing
810 * problem.
811 *
812 * There are 2 possible cases where an RT task may be able
813 * to continue spinning.
814 *
815 * 1) The lock owner is in the process of releasing the
816 * lock, sem->owner is cleared but the lock has not
817 * been released yet.
818 * 2) The lock was free and owner cleared, but another
819 * task just comes in and acquire the lock before
820 * we try to get it. The new owner may be a spinnable
821 * writer.
822 *
823 * To take advantage of two scenarios listed above, the RT
824 * task is made to retry one more time to see if it can
825 * acquire the lock or continue spinning on the new owning
826 * writer. Of course, if the time lag is long enough or the
827 * new owner is not a writer or spinnable, the RT task will
828 * quit spinning.
829 *
830 * If the owner is a writer, the need_resched() check is
831 * done inside rwsem_spin_on_owner(). If the owner is not
832 * a writer, need_resched() check needs to be done here.
833 */
834 if (owner_state != OWNER_WRITER) {
835 if (need_resched())
836 break;
837 if (rt_task(current) &&
838 (prev_owner_state != OWNER_WRITER))
839 break;
840 }
841 prev_owner_state = owner_state;
842
843 /*
844 * The cpu_relax() call is a compiler barrier which forces
845 * everything in this loop to be re-loaded. We don't need
846 * memory barriers as we'll eventually observe the right
847 * values at the cost of a few extra spins.
848 */
849 cpu_relax();
850 }
851 osq_unlock(&sem->osq);
852 done:
853 preempt_enable();
854 lockevent_cond_inc(rwsem_opt_fail, !taken);
855 return taken;
856 }
857
858 /*
859 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
860 * only be called when the reader count reaches 0.
861 */
clear_nonspinnable(struct rw_semaphore * sem)862 static inline void clear_nonspinnable(struct rw_semaphore *sem)
863 {
864 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
865 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
866 }
867
868 #else
rwsem_can_spin_on_owner(struct rw_semaphore * sem)869 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
870 {
871 return false;
872 }
873
rwsem_optimistic_spin(struct rw_semaphore * sem)874 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
875 {
876 return false;
877 }
878
clear_nonspinnable(struct rw_semaphore * sem)879 static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
880
881 static inline int
rwsem_spin_on_owner(struct rw_semaphore * sem)882 rwsem_spin_on_owner(struct rw_semaphore *sem)
883 {
884 return 0;
885 }
886 #define OWNER_NULL 1
887 #endif
888
889 /*
890 * Wait for the read lock to be granted
891 */
892 static struct rw_semaphore __sched *
rwsem_down_read_slowpath(struct rw_semaphore * sem,long count,unsigned int state)893 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
894 {
895 long adjustment = -RWSEM_READER_BIAS;
896 long rcnt = (count >> RWSEM_READER_SHIFT);
897 struct rwsem_waiter waiter;
898 DEFINE_WAKE_Q(wake_q);
899 bool wake = false;
900
901 /*
902 * To prevent a constant stream of readers from starving a sleeping
903 * waiter, don't attempt optimistic lock stealing if the lock is
904 * currently owned by readers.
905 */
906 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
907 (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
908 goto queue;
909
910 /*
911 * Reader optimistic lock stealing.
912 */
913 if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
914 rwsem_set_reader_owned(sem);
915 lockevent_inc(rwsem_rlock_steal);
916
917 /*
918 * Wake up other readers in the wait queue if it is
919 * the first reader.
920 */
921 if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
922 raw_spin_lock_irq(&sem->wait_lock);
923 if (!list_empty(&sem->wait_list))
924 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
925 &wake_q);
926 raw_spin_unlock_irq(&sem->wait_lock);
927 wake_up_q(&wake_q);
928 }
929 return sem;
930 }
931
932 queue:
933 waiter.task = current;
934 waiter.type = RWSEM_WAITING_FOR_READ;
935 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
936
937 raw_spin_lock_irq(&sem->wait_lock);
938 if (list_empty(&sem->wait_list)) {
939 /*
940 * In case the wait queue is empty and the lock isn't owned
941 * by a writer or has the handoff bit set, this reader can
942 * exit the slowpath and return immediately as its
943 * RWSEM_READER_BIAS has already been set in the count.
944 */
945 if (!(atomic_long_read(&sem->count) &
946 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
947 /* Provide lock ACQUIRE */
948 smp_acquire__after_ctrl_dep();
949 raw_spin_unlock_irq(&sem->wait_lock);
950 rwsem_set_reader_owned(sem);
951 lockevent_inc(rwsem_rlock_fast);
952 return sem;
953 }
954 adjustment += RWSEM_FLAG_WAITERS;
955 }
956 list_add_tail(&waiter.list, &sem->wait_list);
957
958 /* we're now waiting on the lock, but no longer actively locking */
959 count = atomic_long_add_return(adjustment, &sem->count);
960
961 /*
962 * If there are no active locks, wake the front queued process(es).
963 *
964 * If there are no writers and we are first in the queue,
965 * wake our own waiter to join the existing active readers !
966 */
967 if (!(count & RWSEM_LOCK_MASK)) {
968 clear_nonspinnable(sem);
969 wake = true;
970 }
971 if (wake || (!(count & RWSEM_WRITER_MASK) &&
972 (adjustment & RWSEM_FLAG_WAITERS)))
973 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
974
975 raw_spin_unlock_irq(&sem->wait_lock);
976 wake_up_q(&wake_q);
977
978 /* wait to be given the lock */
979 for (;;) {
980 set_current_state(state);
981 if (!smp_load_acquire(&waiter.task)) {
982 /* Matches rwsem_mark_wake()'s smp_store_release(). */
983 break;
984 }
985 if (signal_pending_state(state, current)) {
986 raw_spin_lock_irq(&sem->wait_lock);
987 if (waiter.task)
988 goto out_nolock;
989 raw_spin_unlock_irq(&sem->wait_lock);
990 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
991 break;
992 }
993 schedule();
994 lockevent_inc(rwsem_sleep_reader);
995 }
996
997 __set_current_state(TASK_RUNNING);
998 lockevent_inc(rwsem_rlock);
999 return sem;
1000
1001 out_nolock:
1002 list_del(&waiter.list);
1003 if (list_empty(&sem->wait_list)) {
1004 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
1005 &sem->count);
1006 }
1007 raw_spin_unlock_irq(&sem->wait_lock);
1008 __set_current_state(TASK_RUNNING);
1009 lockevent_inc(rwsem_rlock_fail);
1010 return ERR_PTR(-EINTR);
1011 }
1012
1013 /*
1014 * Wait until we successfully acquire the write lock
1015 */
1016 static struct rw_semaphore *
rwsem_down_write_slowpath(struct rw_semaphore * sem,int state)1017 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1018 {
1019 long count;
1020 enum writer_wait_state wstate;
1021 struct rwsem_waiter waiter;
1022 struct rw_semaphore *ret = sem;
1023 DEFINE_WAKE_Q(wake_q);
1024
1025 /* do optimistic spinning and steal lock if possible */
1026 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1027 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1028 return sem;
1029 }
1030
1031 /*
1032 * Optimistic spinning failed, proceed to the slowpath
1033 * and block until we can acquire the sem.
1034 */
1035 waiter.task = current;
1036 waiter.type = RWSEM_WAITING_FOR_WRITE;
1037 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1038
1039 raw_spin_lock_irq(&sem->wait_lock);
1040
1041 /* account for this before adding a new element to the list */
1042 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1043
1044 list_add_tail(&waiter.list, &sem->wait_list);
1045
1046 /* we're now waiting on the lock */
1047 if (wstate == WRITER_NOT_FIRST) {
1048 count = atomic_long_read(&sem->count);
1049
1050 /*
1051 * If there were already threads queued before us and:
1052 * 1) there are no active locks, wake the front
1053 * queued process(es) as the handoff bit might be set.
1054 * 2) there are no active writers and some readers, the lock
1055 * must be read owned; so we try to wake any read lock
1056 * waiters that were queued ahead of us.
1057 */
1058 if (count & RWSEM_WRITER_MASK)
1059 goto wait;
1060
1061 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1062 ? RWSEM_WAKE_READERS
1063 : RWSEM_WAKE_ANY, &wake_q);
1064
1065 if (!wake_q_empty(&wake_q)) {
1066 /*
1067 * We want to minimize wait_lock hold time especially
1068 * when a large number of readers are to be woken up.
1069 */
1070 raw_spin_unlock_irq(&sem->wait_lock);
1071 wake_up_q(&wake_q);
1072 wake_q_init(&wake_q); /* Used again, reinit */
1073 raw_spin_lock_irq(&sem->wait_lock);
1074 }
1075 } else {
1076 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1077 }
1078
1079 wait:
1080 /* wait until we successfully acquire the lock */
1081 set_current_state(state);
1082 for (;;) {
1083 if (rwsem_try_write_lock(sem, wstate)) {
1084 /* rwsem_try_write_lock() implies ACQUIRE on success */
1085 break;
1086 }
1087
1088 raw_spin_unlock_irq(&sem->wait_lock);
1089
1090 /*
1091 * After setting the handoff bit and failing to acquire
1092 * the lock, attempt to spin on owner to accelerate lock
1093 * transfer. If the previous owner is a on-cpu writer and it
1094 * has just released the lock, OWNER_NULL will be returned.
1095 * In this case, we attempt to acquire the lock again
1096 * without sleeping.
1097 */
1098 if (wstate == WRITER_HANDOFF &&
1099 rwsem_spin_on_owner(sem) == OWNER_NULL)
1100 goto trylock_again;
1101
1102 /* Block until there are no active lockers. */
1103 for (;;) {
1104 if (signal_pending_state(state, current))
1105 goto out_nolock;
1106
1107 schedule();
1108 lockevent_inc(rwsem_sleep_writer);
1109 set_current_state(state);
1110 /*
1111 * If HANDOFF bit is set, unconditionally do
1112 * a trylock.
1113 */
1114 if (wstate == WRITER_HANDOFF)
1115 break;
1116
1117 if ((wstate == WRITER_NOT_FIRST) &&
1118 (rwsem_first_waiter(sem) == &waiter))
1119 wstate = WRITER_FIRST;
1120
1121 count = atomic_long_read(&sem->count);
1122 if (!(count & RWSEM_LOCK_MASK))
1123 break;
1124
1125 /*
1126 * The setting of the handoff bit is deferred
1127 * until rwsem_try_write_lock() is called.
1128 */
1129 if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1130 time_after(jiffies, waiter.timeout))) {
1131 wstate = WRITER_HANDOFF;
1132 lockevent_inc(rwsem_wlock_handoff);
1133 break;
1134 }
1135 }
1136 trylock_again:
1137 raw_spin_lock_irq(&sem->wait_lock);
1138 }
1139 __set_current_state(TASK_RUNNING);
1140 list_del(&waiter.list);
1141 raw_spin_unlock_irq(&sem->wait_lock);
1142 lockevent_inc(rwsem_wlock);
1143
1144 return ret;
1145
1146 out_nolock:
1147 __set_current_state(TASK_RUNNING);
1148 raw_spin_lock_irq(&sem->wait_lock);
1149 list_del(&waiter.list);
1150
1151 if (unlikely(wstate == WRITER_HANDOFF))
1152 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1153
1154 if (list_empty(&sem->wait_list))
1155 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1156 else
1157 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1158 raw_spin_unlock_irq(&sem->wait_lock);
1159 wake_up_q(&wake_q);
1160 lockevent_inc(rwsem_wlock_fail);
1161
1162 return ERR_PTR(-EINTR);
1163 }
1164
1165 /*
1166 * handle waking up a waiter on the semaphore
1167 * - up_read/up_write has decremented the active part of count if we come here
1168 */
rwsem_wake(struct rw_semaphore * sem)1169 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1170 {
1171 unsigned long flags;
1172 DEFINE_WAKE_Q(wake_q);
1173
1174 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1175
1176 if (!list_empty(&sem->wait_list))
1177 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1178
1179 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1180 wake_up_q(&wake_q);
1181
1182 return sem;
1183 }
1184
1185 /*
1186 * downgrade a write lock into a read lock
1187 * - caller incremented waiting part of count and discovered it still negative
1188 * - just wake up any readers at the front of the queue
1189 */
rwsem_downgrade_wake(struct rw_semaphore * sem)1190 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1191 {
1192 unsigned long flags;
1193 DEFINE_WAKE_Q(wake_q);
1194
1195 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1196
1197 if (!list_empty(&sem->wait_list))
1198 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1199
1200 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1201 wake_up_q(&wake_q);
1202
1203 return sem;
1204 }
1205
1206 /*
1207 * lock for reading
1208 */
__down_read_common(struct rw_semaphore * sem,int state)1209 static inline int __down_read_common(struct rw_semaphore *sem, int state)
1210 {
1211 long count;
1212
1213 if (!rwsem_read_trylock(sem, &count)) {
1214 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
1215 return -EINTR;
1216 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1217 }
1218 return 0;
1219 }
1220
__down_read(struct rw_semaphore * sem)1221 static inline void __down_read(struct rw_semaphore *sem)
1222 {
1223 __down_read_common(sem, TASK_UNINTERRUPTIBLE);
1224 }
1225
__down_read_interruptible(struct rw_semaphore * sem)1226 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1227 {
1228 return __down_read_common(sem, TASK_INTERRUPTIBLE);
1229 }
1230
__down_read_killable(struct rw_semaphore * sem)1231 static inline int __down_read_killable(struct rw_semaphore *sem)
1232 {
1233 return __down_read_common(sem, TASK_KILLABLE);
1234 }
1235
__down_read_trylock(struct rw_semaphore * sem)1236 static inline int __down_read_trylock(struct rw_semaphore *sem)
1237 {
1238 long tmp;
1239
1240 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1241
1242 /*
1243 * Optimize for the case when the rwsem is not locked at all.
1244 */
1245 tmp = RWSEM_UNLOCKED_VALUE;
1246 do {
1247 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1248 tmp + RWSEM_READER_BIAS)) {
1249 rwsem_set_reader_owned(sem);
1250 return 1;
1251 }
1252 } while (!(tmp & RWSEM_READ_FAILED_MASK));
1253 return 0;
1254 }
1255
1256 /*
1257 * lock for writing
1258 */
__down_write_common(struct rw_semaphore * sem,int state)1259 static inline int __down_write_common(struct rw_semaphore *sem, int state)
1260 {
1261 if (unlikely(!rwsem_write_trylock(sem))) {
1262 if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1263 return -EINTR;
1264 }
1265
1266 return 0;
1267 }
1268
__down_write(struct rw_semaphore * sem)1269 static inline void __down_write(struct rw_semaphore *sem)
1270 {
1271 __down_write_common(sem, TASK_UNINTERRUPTIBLE);
1272 }
1273
__down_write_killable(struct rw_semaphore * sem)1274 static inline int __down_write_killable(struct rw_semaphore *sem)
1275 {
1276 return __down_write_common(sem, TASK_KILLABLE);
1277 }
1278
__down_write_trylock(struct rw_semaphore * sem)1279 static inline int __down_write_trylock(struct rw_semaphore *sem)
1280 {
1281 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1282 return rwsem_write_trylock(sem);
1283 }
1284
1285 /*
1286 * unlock after reading
1287 */
__up_read(struct rw_semaphore * sem)1288 static inline void __up_read(struct rw_semaphore *sem)
1289 {
1290 long tmp;
1291
1292 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1293 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1294
1295 rwsem_clear_reader_owned(sem);
1296 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1297 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1298 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1299 RWSEM_FLAG_WAITERS)) {
1300 clear_nonspinnable(sem);
1301 rwsem_wake(sem);
1302 }
1303 }
1304
1305 /*
1306 * unlock after writing
1307 */
__up_write(struct rw_semaphore * sem)1308 static inline void __up_write(struct rw_semaphore *sem)
1309 {
1310 long tmp;
1311
1312 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1313 /*
1314 * sem->owner may differ from current if the ownership is transferred
1315 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1316 */
1317 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1318 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1319
1320 rwsem_clear_owner(sem);
1321 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1322 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1323 rwsem_wake(sem);
1324 }
1325
1326 /*
1327 * downgrade write lock to read lock
1328 */
__downgrade_write(struct rw_semaphore * sem)1329 static inline void __downgrade_write(struct rw_semaphore *sem)
1330 {
1331 long tmp;
1332
1333 /*
1334 * When downgrading from exclusive to shared ownership,
1335 * anything inside the write-locked region cannot leak
1336 * into the read side. In contrast, anything in the
1337 * read-locked region is ok to be re-ordered into the
1338 * write side. As such, rely on RELEASE semantics.
1339 */
1340 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1341 tmp = atomic_long_fetch_add_release(
1342 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1343 rwsem_set_reader_owned(sem);
1344 if (tmp & RWSEM_FLAG_WAITERS)
1345 rwsem_downgrade_wake(sem);
1346 }
1347
1348 #else /* !CONFIG_PREEMPT_RT */
1349
1350 #define RT_MUTEX_BUILD_MUTEX
1351 #include "rtmutex.c"
1352
1353 #define rwbase_set_and_save_current_state(state) \
1354 set_current_state(state)
1355
1356 #define rwbase_restore_current_state() \
1357 __set_current_state(TASK_RUNNING)
1358
1359 #define rwbase_rtmutex_lock_state(rtm, state) \
1360 __rt_mutex_lock(rtm, state)
1361
1362 #define rwbase_rtmutex_slowlock_locked(rtm, state) \
1363 __rt_mutex_slowlock_locked(rtm, NULL, state)
1364
1365 #define rwbase_rtmutex_unlock(rtm) \
1366 __rt_mutex_unlock(rtm)
1367
1368 #define rwbase_rtmutex_trylock(rtm) \
1369 __rt_mutex_trylock(rtm)
1370
1371 #define rwbase_signal_pending_state(state, current) \
1372 signal_pending_state(state, current)
1373
1374 #define rwbase_schedule() \
1375 schedule()
1376
1377 #include "rwbase_rt.c"
1378
__init_rwsem(struct rw_semaphore * sem,const char * name,struct lock_class_key * key)1379 void __init_rwsem(struct rw_semaphore *sem, const char *name,
1380 struct lock_class_key *key)
1381 {
1382 init_rwbase_rt(&(sem)->rwbase);
1383
1384 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1385 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1386 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
1387 #endif
1388 }
1389 EXPORT_SYMBOL(__init_rwsem);
1390
__down_read(struct rw_semaphore * sem)1391 static inline void __down_read(struct rw_semaphore *sem)
1392 {
1393 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1394 }
1395
__down_read_interruptible(struct rw_semaphore * sem)1396 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1397 {
1398 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1399 }
1400
__down_read_killable(struct rw_semaphore * sem)1401 static inline int __down_read_killable(struct rw_semaphore *sem)
1402 {
1403 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1404 }
1405
__down_read_trylock(struct rw_semaphore * sem)1406 static inline int __down_read_trylock(struct rw_semaphore *sem)
1407 {
1408 return rwbase_read_trylock(&sem->rwbase);
1409 }
1410
__up_read(struct rw_semaphore * sem)1411 static inline void __up_read(struct rw_semaphore *sem)
1412 {
1413 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1414 }
1415
__down_write(struct rw_semaphore * sem)1416 static inline void __sched __down_write(struct rw_semaphore *sem)
1417 {
1418 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1419 }
1420
__down_write_killable(struct rw_semaphore * sem)1421 static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1422 {
1423 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1424 }
1425
__down_write_trylock(struct rw_semaphore * sem)1426 static inline int __down_write_trylock(struct rw_semaphore *sem)
1427 {
1428 return rwbase_write_trylock(&sem->rwbase);
1429 }
1430
__up_write(struct rw_semaphore * sem)1431 static inline void __up_write(struct rw_semaphore *sem)
1432 {
1433 rwbase_write_unlock(&sem->rwbase);
1434 }
1435
__downgrade_write(struct rw_semaphore * sem)1436 static inline void __downgrade_write(struct rw_semaphore *sem)
1437 {
1438 rwbase_write_downgrade(&sem->rwbase);
1439 }
1440
1441 /* Debug stubs for the common API */
1442 #define DEBUG_RWSEMS_WARN_ON(c, sem)
1443
__rwsem_set_reader_owned(struct rw_semaphore * sem,struct task_struct * owner)1444 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1445 struct task_struct *owner)
1446 {
1447 }
1448
is_rwsem_reader_owned(struct rw_semaphore * sem)1449 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1450 {
1451 int count = atomic_read(&sem->rwbase.readers);
1452
1453 return count < 0 && count != READER_BIAS;
1454 }
1455
1456 #endif /* CONFIG_PREEMPT_RT */
1457
1458 /*
1459 * lock for reading
1460 */
down_read(struct rw_semaphore * sem)1461 void __sched down_read(struct rw_semaphore *sem)
1462 {
1463 might_sleep();
1464 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1465
1466 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1467 }
1468 EXPORT_SYMBOL(down_read);
1469
down_read_interruptible(struct rw_semaphore * sem)1470 int __sched down_read_interruptible(struct rw_semaphore *sem)
1471 {
1472 might_sleep();
1473 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1474
1475 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1476 rwsem_release(&sem->dep_map, _RET_IP_);
1477 return -EINTR;
1478 }
1479
1480 return 0;
1481 }
1482 EXPORT_SYMBOL(down_read_interruptible);
1483
down_read_killable(struct rw_semaphore * sem)1484 int __sched down_read_killable(struct rw_semaphore *sem)
1485 {
1486 might_sleep();
1487 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1488
1489 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1490 rwsem_release(&sem->dep_map, _RET_IP_);
1491 return -EINTR;
1492 }
1493
1494 return 0;
1495 }
1496 EXPORT_SYMBOL(down_read_killable);
1497
1498 /*
1499 * trylock for reading -- returns 1 if successful, 0 if contention
1500 */
down_read_trylock(struct rw_semaphore * sem)1501 int down_read_trylock(struct rw_semaphore *sem)
1502 {
1503 int ret = __down_read_trylock(sem);
1504
1505 if (ret == 1)
1506 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1507 return ret;
1508 }
1509 EXPORT_SYMBOL(down_read_trylock);
1510
1511 /*
1512 * lock for writing
1513 */
down_write(struct rw_semaphore * sem)1514 void __sched down_write(struct rw_semaphore *sem)
1515 {
1516 might_sleep();
1517 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1518 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1519 }
1520 EXPORT_SYMBOL(down_write);
1521
1522 /*
1523 * lock for writing
1524 */
down_write_killable(struct rw_semaphore * sem)1525 int __sched down_write_killable(struct rw_semaphore *sem)
1526 {
1527 might_sleep();
1528 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1529
1530 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1531 __down_write_killable)) {
1532 rwsem_release(&sem->dep_map, _RET_IP_);
1533 return -EINTR;
1534 }
1535
1536 return 0;
1537 }
1538 EXPORT_SYMBOL(down_write_killable);
1539
1540 /*
1541 * trylock for writing -- returns 1 if successful, 0 if contention
1542 */
down_write_trylock(struct rw_semaphore * sem)1543 int down_write_trylock(struct rw_semaphore *sem)
1544 {
1545 int ret = __down_write_trylock(sem);
1546
1547 if (ret == 1)
1548 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1549
1550 return ret;
1551 }
1552 EXPORT_SYMBOL(down_write_trylock);
1553
1554 /*
1555 * release a read lock
1556 */
up_read(struct rw_semaphore * sem)1557 void up_read(struct rw_semaphore *sem)
1558 {
1559 rwsem_release(&sem->dep_map, _RET_IP_);
1560 __up_read(sem);
1561 }
1562 EXPORT_SYMBOL(up_read);
1563
1564 /*
1565 * release a write lock
1566 */
up_write(struct rw_semaphore * sem)1567 void up_write(struct rw_semaphore *sem)
1568 {
1569 rwsem_release(&sem->dep_map, _RET_IP_);
1570 __up_write(sem);
1571 }
1572 EXPORT_SYMBOL(up_write);
1573
1574 /*
1575 * downgrade write lock to read lock
1576 */
downgrade_write(struct rw_semaphore * sem)1577 void downgrade_write(struct rw_semaphore *sem)
1578 {
1579 lock_downgrade(&sem->dep_map, _RET_IP_);
1580 __downgrade_write(sem);
1581 }
1582 EXPORT_SYMBOL(downgrade_write);
1583
1584 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1585
down_read_nested(struct rw_semaphore * sem,int subclass)1586 void down_read_nested(struct rw_semaphore *sem, int subclass)
1587 {
1588 might_sleep();
1589 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1590 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1591 }
1592 EXPORT_SYMBOL(down_read_nested);
1593
down_read_killable_nested(struct rw_semaphore * sem,int subclass)1594 int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1595 {
1596 might_sleep();
1597 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1598
1599 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1600 rwsem_release(&sem->dep_map, _RET_IP_);
1601 return -EINTR;
1602 }
1603
1604 return 0;
1605 }
1606 EXPORT_SYMBOL(down_read_killable_nested);
1607
_down_write_nest_lock(struct rw_semaphore * sem,struct lockdep_map * nest)1608 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1609 {
1610 might_sleep();
1611 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1612 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1613 }
1614 EXPORT_SYMBOL(_down_write_nest_lock);
1615
down_read_non_owner(struct rw_semaphore * sem)1616 void down_read_non_owner(struct rw_semaphore *sem)
1617 {
1618 might_sleep();
1619 __down_read(sem);
1620 __rwsem_set_reader_owned(sem, NULL);
1621 }
1622 EXPORT_SYMBOL(down_read_non_owner);
1623
down_write_nested(struct rw_semaphore * sem,int subclass)1624 void down_write_nested(struct rw_semaphore *sem, int subclass)
1625 {
1626 might_sleep();
1627 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1628 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1629 }
1630 EXPORT_SYMBOL(down_write_nested);
1631
down_write_killable_nested(struct rw_semaphore * sem,int subclass)1632 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1633 {
1634 might_sleep();
1635 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1636
1637 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1638 __down_write_killable)) {
1639 rwsem_release(&sem->dep_map, _RET_IP_);
1640 return -EINTR;
1641 }
1642
1643 return 0;
1644 }
1645 EXPORT_SYMBOL(down_write_killable_nested);
1646
up_read_non_owner(struct rw_semaphore * sem)1647 void up_read_non_owner(struct rw_semaphore *sem)
1648 {
1649 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1650 __up_read(sem);
1651 }
1652 EXPORT_SYMBOL(up_read_non_owner);
1653
1654 #endif
1655