1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Fast Userspace Mutexes (which I call "Futexes!").
4  *  (C) Rusty Russell, IBM 2002
5  *
6  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8  *
9  *  Removed page pinning, fix privately mapped COW pages and other cleanups
10  *  (C) Copyright 2003, 2004 Jamie Lokier
11  *
12  *  Robust futex support started by Ingo Molnar
13  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15  *
16  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
17  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19  *
20  *  PRIVATE futexes by Eric Dumazet
21  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22  *
23  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24  *  Copyright (C) IBM Corporation, 2009
25  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
26  *
27  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28  *  enough at me, Linus for the original (flawed) idea, Matthew
29  *  Kirkwood for proof-of-concept implementation.
30  *
31  *  "The futexes are also cursed."
32  *  "But they come in a choice of three flavours!"
33  */
34 #include <linux/compat.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fs.h>
38 #include <linux/file.h>
39 #include <linux/jhash.h>
40 #include <linux/init.h>
41 #include <linux/futex.h>
42 #include <linux/mount.h>
43 #include <linux/pagemap.h>
44 #include <linux/syscalls.h>
45 #include <linux/signal.h>
46 #include <linux/export.h>
47 #include <linux/magic.h>
48 #include <linux/pid.h>
49 #include <linux/nsproxy.h>
50 #include <linux/ptrace.h>
51 #include <linux/sched/rt.h>
52 #include <linux/sched/wake_q.h>
53 #include <linux/sched/mm.h>
54 #include <linux/hugetlb.h>
55 #include <linux/freezer.h>
56 #include <linux/memblock.h>
57 #include <linux/fault-inject.h>
58 #include <linux/refcount.h>
59 
60 #include <asm/futex.h>
61 
62 #include "locking/rtmutex_common.h"
63 
64 /*
65  * READ this before attempting to hack on futexes!
66  *
67  * Basic futex operation and ordering guarantees
68  * =============================================
69  *
70  * The waiter reads the futex value in user space and calls
71  * futex_wait(). This function computes the hash bucket and acquires
72  * the hash bucket lock. After that it reads the futex user space value
73  * again and verifies that the data has not changed. If it has not changed
74  * it enqueues itself into the hash bucket, releases the hash bucket lock
75  * and schedules.
76  *
77  * The waker side modifies the user space value of the futex and calls
78  * futex_wake(). This function computes the hash bucket and acquires the
79  * hash bucket lock. Then it looks for waiters on that futex in the hash
80  * bucket and wakes them.
81  *
82  * In futex wake up scenarios where no tasks are blocked on a futex, taking
83  * the hb spinlock can be avoided and simply return. In order for this
84  * optimization to work, ordering guarantees must exist so that the waiter
85  * being added to the list is acknowledged when the list is concurrently being
86  * checked by the waker, avoiding scenarios like the following:
87  *
88  * CPU 0                               CPU 1
89  * val = *futex;
90  * sys_futex(WAIT, futex, val);
91  *   futex_wait(futex, val);
92  *   uval = *futex;
93  *                                     *futex = newval;
94  *                                     sys_futex(WAKE, futex);
95  *                                       futex_wake(futex);
96  *                                       if (queue_empty())
97  *                                         return;
98  *   if (uval == val)
99  *      lock(hash_bucket(futex));
100  *      queue();
101  *     unlock(hash_bucket(futex));
102  *     schedule();
103  *
104  * This would cause the waiter on CPU 0 to wait forever because it
105  * missed the transition of the user space value from val to newval
106  * and the waker did not find the waiter in the hash bucket queue.
107  *
108  * The correct serialization ensures that a waiter either observes
109  * the changed user space value before blocking or is woken by a
110  * concurrent waker:
111  *
112  * CPU 0                                 CPU 1
113  * val = *futex;
114  * sys_futex(WAIT, futex, val);
115  *   futex_wait(futex, val);
116  *
117  *   waiters++; (a)
118  *   smp_mb(); (A) <-- paired with -.
119  *                                  |
120  *   lock(hash_bucket(futex));      |
121  *                                  |
122  *   uval = *futex;                 |
123  *                                  |        *futex = newval;
124  *                                  |        sys_futex(WAKE, futex);
125  *                                  |          futex_wake(futex);
126  *                                  |
127  *                                  `--------> smp_mb(); (B)
128  *   if (uval == val)
129  *     queue();
130  *     unlock(hash_bucket(futex));
131  *     schedule();                         if (waiters)
132  *                                           lock(hash_bucket(futex));
133  *   else                                    wake_waiters(futex);
134  *     waiters--; (b)                        unlock(hash_bucket(futex));
135  *
136  * Where (A) orders the waiters increment and the futex value read through
137  * atomic operations (see hb_waiters_inc) and where (B) orders the write
138  * to futex and the waiters read -- this is done by the barriers for both
139  * shared and private futexes in get_futex_key_refs().
140  *
141  * This yields the following case (where X:=waiters, Y:=futex):
142  *
143  *	X = Y = 0
144  *
145  *	w[X]=1		w[Y]=1
146  *	MB		MB
147  *	r[Y]=y		r[X]=x
148  *
149  * Which guarantees that x==0 && y==0 is impossible; which translates back into
150  * the guarantee that we cannot both miss the futex variable change and the
151  * enqueue.
152  *
153  * Note that a new waiter is accounted for in (a) even when it is possible that
154  * the wait call can return error, in which case we backtrack from it in (b).
155  * Refer to the comment in queue_lock().
156  *
157  * Similarly, in order to account for waiters being requeued on another
158  * address we always increment the waiters for the destination bucket before
159  * acquiring the lock. It then decrements them again  after releasing it -
160  * the code that actually moves the futex(es) between hash buckets (requeue_futex)
161  * will do the additional required waiter count housekeeping. This is done for
162  * double_lock_hb() and double_unlock_hb(), respectively.
163  */
164 
165 #ifdef CONFIG_HAVE_FUTEX_CMPXCHG
166 #define futex_cmpxchg_enabled 1
167 #else
168 static int  __read_mostly futex_cmpxchg_enabled;
169 #endif
170 
171 /*
172  * Futex flags used to encode options to functions and preserve them across
173  * restarts.
174  */
175 #ifdef CONFIG_MMU
176 # define FLAGS_SHARED		0x01
177 #else
178 /*
179  * NOMMU does not have per process address space. Let the compiler optimize
180  * code away.
181  */
182 # define FLAGS_SHARED		0x00
183 #endif
184 #define FLAGS_CLOCKRT		0x02
185 #define FLAGS_HAS_TIMEOUT	0x04
186 
187 /*
188  * Priority Inheritance state:
189  */
190 struct futex_pi_state {
191 	/*
192 	 * list of 'owned' pi_state instances - these have to be
193 	 * cleaned up in do_exit() if the task exits prematurely:
194 	 */
195 	struct list_head list;
196 
197 	/*
198 	 * The PI object:
199 	 */
200 	struct rt_mutex pi_mutex;
201 
202 	struct task_struct *owner;
203 	refcount_t refcount;
204 
205 	union futex_key key;
206 } __randomize_layout;
207 
208 /**
209  * struct futex_q - The hashed futex queue entry, one per waiting task
210  * @list:		priority-sorted list of tasks waiting on this futex
211  * @task:		the task waiting on the futex
212  * @lock_ptr:		the hash bucket lock
213  * @key:		the key the futex is hashed on
214  * @pi_state:		optional priority inheritance state
215  * @rt_waiter:		rt_waiter storage for use with requeue_pi
216  * @requeue_pi_key:	the requeue_pi target futex key
217  * @bitset:		bitset for the optional bitmasked wakeup
218  *
219  * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
220  * we can wake only the relevant ones (hashed queues may be shared).
221  *
222  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
223  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
224  * The order of wakeup is always to make the first condition true, then
225  * the second.
226  *
227  * PI futexes are typically woken before they are removed from the hash list via
228  * the rt_mutex code. See unqueue_me_pi().
229  */
230 struct futex_q {
231 	struct plist_node list;
232 
233 	struct task_struct *task;
234 	spinlock_t *lock_ptr;
235 	union futex_key key;
236 	struct futex_pi_state *pi_state;
237 	struct rt_mutex_waiter *rt_waiter;
238 	union futex_key *requeue_pi_key;
239 	u32 bitset;
240 } __randomize_layout;
241 
242 static const struct futex_q futex_q_init = {
243 	/* list gets initialized in queue_me()*/
244 	.key = FUTEX_KEY_INIT,
245 	.bitset = FUTEX_BITSET_MATCH_ANY
246 };
247 
248 /*
249  * Hash buckets are shared by all the futex_keys that hash to the same
250  * location.  Each key may have multiple futex_q structures, one for each task
251  * waiting on a futex.
252  */
253 struct futex_hash_bucket {
254 	atomic_t waiters;
255 	spinlock_t lock;
256 	struct plist_head chain;
257 } ____cacheline_aligned_in_smp;
258 
259 /*
260  * The base of the bucket array and its size are always used together
261  * (after initialization only in hash_futex()), so ensure that they
262  * reside in the same cacheline.
263  */
264 static struct {
265 	struct futex_hash_bucket *queues;
266 	unsigned long            hashsize;
267 } __futex_data __read_mostly __aligned(2*sizeof(long));
268 #define futex_queues   (__futex_data.queues)
269 #define futex_hashsize (__futex_data.hashsize)
270 
271 
272 /*
273  * Fault injections for futexes.
274  */
275 #ifdef CONFIG_FAIL_FUTEX
276 
277 static struct {
278 	struct fault_attr attr;
279 
280 	bool ignore_private;
281 } fail_futex = {
282 	.attr = FAULT_ATTR_INITIALIZER,
283 	.ignore_private = false,
284 };
285 
setup_fail_futex(char * str)286 static int __init setup_fail_futex(char *str)
287 {
288 	return setup_fault_attr(&fail_futex.attr, str);
289 }
290 __setup("fail_futex=", setup_fail_futex);
291 
should_fail_futex(bool fshared)292 static bool should_fail_futex(bool fshared)
293 {
294 	if (fail_futex.ignore_private && !fshared)
295 		return false;
296 
297 	return should_fail(&fail_futex.attr, 1);
298 }
299 
300 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
301 
fail_futex_debugfs(void)302 static int __init fail_futex_debugfs(void)
303 {
304 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
305 	struct dentry *dir;
306 
307 	dir = fault_create_debugfs_attr("fail_futex", NULL,
308 					&fail_futex.attr);
309 	if (IS_ERR(dir))
310 		return PTR_ERR(dir);
311 
312 	debugfs_create_bool("ignore-private", mode, dir,
313 			    &fail_futex.ignore_private);
314 	return 0;
315 }
316 
317 late_initcall(fail_futex_debugfs);
318 
319 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
320 
321 #else
should_fail_futex(bool fshared)322 static inline bool should_fail_futex(bool fshared)
323 {
324 	return false;
325 }
326 #endif /* CONFIG_FAIL_FUTEX */
327 
futex_get_mm(union futex_key * key)328 static inline void futex_get_mm(union futex_key *key)
329 {
330 	mmgrab(key->private.mm);
331 	/*
332 	 * Ensure futex_get_mm() implies a full barrier such that
333 	 * get_futex_key() implies a full barrier. This is relied upon
334 	 * as smp_mb(); (B), see the ordering comment above.
335 	 */
336 	smp_mb__after_atomic();
337 }
338 
339 /*
340  * Reflects a new waiter being added to the waitqueue.
341  */
hb_waiters_inc(struct futex_hash_bucket * hb)342 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
343 {
344 #ifdef CONFIG_SMP
345 	atomic_inc(&hb->waiters);
346 	/*
347 	 * Full barrier (A), see the ordering comment above.
348 	 */
349 	smp_mb__after_atomic();
350 #endif
351 }
352 
353 /*
354  * Reflects a waiter being removed from the waitqueue by wakeup
355  * paths.
356  */
hb_waiters_dec(struct futex_hash_bucket * hb)357 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
358 {
359 #ifdef CONFIG_SMP
360 	atomic_dec(&hb->waiters);
361 #endif
362 }
363 
hb_waiters_pending(struct futex_hash_bucket * hb)364 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
365 {
366 #ifdef CONFIG_SMP
367 	return atomic_read(&hb->waiters);
368 #else
369 	return 1;
370 #endif
371 }
372 
373 /**
374  * hash_futex - Return the hash bucket in the global hash
375  * @key:	Pointer to the futex key for which the hash is calculated
376  *
377  * We hash on the keys returned from get_futex_key (see below) and return the
378  * corresponding hash bucket in the global hash.
379  */
hash_futex(union futex_key * key)380 static struct futex_hash_bucket *hash_futex(union futex_key *key)
381 {
382 	u32 hash = jhash2((u32*)&key->both.word,
383 			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
384 			  key->both.offset);
385 	return &futex_queues[hash & (futex_hashsize - 1)];
386 }
387 
388 
389 /**
390  * match_futex - Check whether two futex keys are equal
391  * @key1:	Pointer to key1
392  * @key2:	Pointer to key2
393  *
394  * Return 1 if two futex_keys are equal, 0 otherwise.
395  */
match_futex(union futex_key * key1,union futex_key * key2)396 static inline int match_futex(union futex_key *key1, union futex_key *key2)
397 {
398 	return (key1 && key2
399 		&& key1->both.word == key2->both.word
400 		&& key1->both.ptr == key2->both.ptr
401 		&& key1->both.offset == key2->both.offset);
402 }
403 
404 /*
405  * Take a reference to the resource addressed by a key.
406  * Can be called while holding spinlocks.
407  *
408  */
get_futex_key_refs(union futex_key * key)409 static void get_futex_key_refs(union futex_key *key)
410 {
411 	if (!key->both.ptr)
412 		return;
413 
414 	/*
415 	 * On MMU less systems futexes are always "private" as there is no per
416 	 * process address space. We need the smp wmb nevertheless - yes,
417 	 * arch/blackfin has MMU less SMP ...
418 	 */
419 	if (!IS_ENABLED(CONFIG_MMU)) {
420 		smp_mb(); /* explicit smp_mb(); (B) */
421 		return;
422 	}
423 
424 	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
425 	case FUT_OFF_INODE:
426 		ihold(key->shared.inode); /* implies smp_mb(); (B) */
427 		break;
428 	case FUT_OFF_MMSHARED:
429 		futex_get_mm(key); /* implies smp_mb(); (B) */
430 		break;
431 	default:
432 		/*
433 		 * Private futexes do not hold reference on an inode or
434 		 * mm, therefore the only purpose of calling get_futex_key_refs
435 		 * is because we need the barrier for the lockless waiter check.
436 		 */
437 		smp_mb(); /* explicit smp_mb(); (B) */
438 	}
439 }
440 
441 /*
442  * Drop a reference to the resource addressed by a key.
443  * The hash bucket spinlock must not be held. This is
444  * a no-op for private futexes, see comment in the get
445  * counterpart.
446  */
drop_futex_key_refs(union futex_key * key)447 static void drop_futex_key_refs(union futex_key *key)
448 {
449 	if (!key->both.ptr) {
450 		/* If we're here then we tried to put a key we failed to get */
451 		WARN_ON_ONCE(1);
452 		return;
453 	}
454 
455 	if (!IS_ENABLED(CONFIG_MMU))
456 		return;
457 
458 	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
459 	case FUT_OFF_INODE:
460 		iput(key->shared.inode);
461 		break;
462 	case FUT_OFF_MMSHARED:
463 		mmdrop(key->private.mm);
464 		break;
465 	}
466 }
467 
468 enum futex_access {
469 	FUTEX_READ,
470 	FUTEX_WRITE
471 };
472 
473 /**
474  * futex_setup_timer - set up the sleeping hrtimer.
475  * @time:	ptr to the given timeout value
476  * @timeout:	the hrtimer_sleeper structure to be set up
477  * @flags:	futex flags
478  * @range_ns:	optional range in ns
479  *
480  * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
481  *	   value given
482  */
483 static inline struct hrtimer_sleeper *
futex_setup_timer(ktime_t * time,struct hrtimer_sleeper * timeout,int flags,u64 range_ns)484 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
485 		  int flags, u64 range_ns)
486 {
487 	if (!time)
488 		return NULL;
489 
490 	hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
491 				      CLOCK_REALTIME : CLOCK_MONOTONIC,
492 				      HRTIMER_MODE_ABS);
493 	/*
494 	 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
495 	 * effectively the same as calling hrtimer_set_expires().
496 	 */
497 	hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
498 
499 	return timeout;
500 }
501 
502 /**
503  * get_futex_key() - Get parameters which are the keys for a futex
504  * @uaddr:	virtual address of the futex
505  * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
506  * @key:	address where result is stored.
507  * @rw:		mapping needs to be read/write (values: FUTEX_READ,
508  *              FUTEX_WRITE)
509  *
510  * Return: a negative error code or 0
511  *
512  * The key words are stored in @key on success.
513  *
514  * For shared mappings, it's (page->index, file_inode(vma->vm_file),
515  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
516  * We can usually work out the index without swapping in the page.
517  *
518  * lock_page() might sleep, the caller should not hold a spinlock.
519  */
520 static int
get_futex_key(u32 __user * uaddr,int fshared,union futex_key * key,enum futex_access rw)521 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw)
522 {
523 	unsigned long address = (unsigned long)uaddr;
524 	struct mm_struct *mm = current->mm;
525 	struct page *page, *tail;
526 	struct address_space *mapping;
527 	int err, ro = 0;
528 
529 	/*
530 	 * The futex address must be "naturally" aligned.
531 	 */
532 	key->both.offset = address % PAGE_SIZE;
533 	if (unlikely((address % sizeof(u32)) != 0))
534 		return -EINVAL;
535 	address -= key->both.offset;
536 
537 	if (unlikely(!access_ok(uaddr, sizeof(u32))))
538 		return -EFAULT;
539 
540 	if (unlikely(should_fail_futex(fshared)))
541 		return -EFAULT;
542 
543 	/*
544 	 * PROCESS_PRIVATE futexes are fast.
545 	 * As the mm cannot disappear under us and the 'key' only needs
546 	 * virtual address, we dont even have to find the underlying vma.
547 	 * Note : We do have to check 'uaddr' is a valid user address,
548 	 *        but access_ok() should be faster than find_vma()
549 	 */
550 	if (!fshared) {
551 		key->private.mm = mm;
552 		key->private.address = address;
553 		get_futex_key_refs(key);  /* implies smp_mb(); (B) */
554 		return 0;
555 	}
556 
557 again:
558 	/* Ignore any VERIFY_READ mapping (futex common case) */
559 	if (unlikely(should_fail_futex(fshared)))
560 		return -EFAULT;
561 
562 	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
563 	/*
564 	 * If write access is not required (eg. FUTEX_WAIT), try
565 	 * and get read-only access.
566 	 */
567 	if (err == -EFAULT && rw == FUTEX_READ) {
568 		err = get_user_pages_fast(address, 1, 0, &page);
569 		ro = 1;
570 	}
571 	if (err < 0)
572 		return err;
573 	else
574 		err = 0;
575 
576 	/*
577 	 * The treatment of mapping from this point on is critical. The page
578 	 * lock protects many things but in this context the page lock
579 	 * stabilizes mapping, prevents inode freeing in the shared
580 	 * file-backed region case and guards against movement to swap cache.
581 	 *
582 	 * Strictly speaking the page lock is not needed in all cases being
583 	 * considered here and page lock forces unnecessarily serialization
584 	 * From this point on, mapping will be re-verified if necessary and
585 	 * page lock will be acquired only if it is unavoidable
586 	 *
587 	 * Mapping checks require the head page for any compound page so the
588 	 * head page and mapping is looked up now. For anonymous pages, it
589 	 * does not matter if the page splits in the future as the key is
590 	 * based on the address. For filesystem-backed pages, the tail is
591 	 * required as the index of the page determines the key. For
592 	 * base pages, there is no tail page and tail == page.
593 	 */
594 	tail = page;
595 	page = compound_head(page);
596 	mapping = READ_ONCE(page->mapping);
597 
598 	/*
599 	 * If page->mapping is NULL, then it cannot be a PageAnon
600 	 * page; but it might be the ZERO_PAGE or in the gate area or
601 	 * in a special mapping (all cases which we are happy to fail);
602 	 * or it may have been a good file page when get_user_pages_fast
603 	 * found it, but truncated or holepunched or subjected to
604 	 * invalidate_complete_page2 before we got the page lock (also
605 	 * cases which we are happy to fail).  And we hold a reference,
606 	 * so refcount care in invalidate_complete_page's remove_mapping
607 	 * prevents drop_caches from setting mapping to NULL beneath us.
608 	 *
609 	 * The case we do have to guard against is when memory pressure made
610 	 * shmem_writepage move it from filecache to swapcache beneath us:
611 	 * an unlikely race, but we do need to retry for page->mapping.
612 	 */
613 	if (unlikely(!mapping)) {
614 		int shmem_swizzled;
615 
616 		/*
617 		 * Page lock is required to identify which special case above
618 		 * applies. If this is really a shmem page then the page lock
619 		 * will prevent unexpected transitions.
620 		 */
621 		lock_page(page);
622 		shmem_swizzled = PageSwapCache(page) || page->mapping;
623 		unlock_page(page);
624 		put_page(page);
625 
626 		if (shmem_swizzled)
627 			goto again;
628 
629 		return -EFAULT;
630 	}
631 
632 	/*
633 	 * Private mappings are handled in a simple way.
634 	 *
635 	 * If the futex key is stored on an anonymous page, then the associated
636 	 * object is the mm which is implicitly pinned by the calling process.
637 	 *
638 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
639 	 * it's a read-only handle, it's expected that futexes attach to
640 	 * the object not the particular process.
641 	 */
642 	if (PageAnon(page)) {
643 		/*
644 		 * A RO anonymous page will never change and thus doesn't make
645 		 * sense for futex operations.
646 		 */
647 		if (unlikely(should_fail_futex(fshared)) || ro) {
648 			err = -EFAULT;
649 			goto out;
650 		}
651 
652 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
653 		key->private.mm = mm;
654 		key->private.address = address;
655 
656 		get_futex_key_refs(key); /* implies smp_mb(); (B) */
657 
658 	} else {
659 		struct inode *inode;
660 
661 		/*
662 		 * The associated futex object in this case is the inode and
663 		 * the page->mapping must be traversed. Ordinarily this should
664 		 * be stabilised under page lock but it's not strictly
665 		 * necessary in this case as we just want to pin the inode, not
666 		 * update the radix tree or anything like that.
667 		 *
668 		 * The RCU read lock is taken as the inode is finally freed
669 		 * under RCU. If the mapping still matches expectations then the
670 		 * mapping->host can be safely accessed as being a valid inode.
671 		 */
672 		rcu_read_lock();
673 
674 		if (READ_ONCE(page->mapping) != mapping) {
675 			rcu_read_unlock();
676 			put_page(page);
677 
678 			goto again;
679 		}
680 
681 		inode = READ_ONCE(mapping->host);
682 		if (!inode) {
683 			rcu_read_unlock();
684 			put_page(page);
685 
686 			goto again;
687 		}
688 
689 		/*
690 		 * Take a reference unless it is about to be freed. Previously
691 		 * this reference was taken by ihold under the page lock
692 		 * pinning the inode in place so i_lock was unnecessary. The
693 		 * only way for this check to fail is if the inode was
694 		 * truncated in parallel which is almost certainly an
695 		 * application bug. In such a case, just retry.
696 		 *
697 		 * We are not calling into get_futex_key_refs() in file-backed
698 		 * cases, therefore a successful atomic_inc return below will
699 		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
700 		 */
701 		if (!atomic_inc_not_zero(&inode->i_count)) {
702 			rcu_read_unlock();
703 			put_page(page);
704 
705 			goto again;
706 		}
707 
708 		/* Should be impossible but lets be paranoid for now */
709 		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
710 			err = -EFAULT;
711 			rcu_read_unlock();
712 			iput(inode);
713 
714 			goto out;
715 		}
716 
717 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
718 		key->shared.inode = inode;
719 		key->shared.pgoff = basepage_index(tail);
720 		rcu_read_unlock();
721 	}
722 
723 out:
724 	put_page(page);
725 	return err;
726 }
727 
put_futex_key(union futex_key * key)728 static inline void put_futex_key(union futex_key *key)
729 {
730 	drop_futex_key_refs(key);
731 }
732 
733 /**
734  * fault_in_user_writeable() - Fault in user address and verify RW access
735  * @uaddr:	pointer to faulting user space address
736  *
737  * Slow path to fixup the fault we just took in the atomic write
738  * access to @uaddr.
739  *
740  * We have no generic implementation of a non-destructive write to the
741  * user address. We know that we faulted in the atomic pagefault
742  * disabled section so we can as well avoid the #PF overhead by
743  * calling get_user_pages() right away.
744  */
fault_in_user_writeable(u32 __user * uaddr)745 static int fault_in_user_writeable(u32 __user *uaddr)
746 {
747 	struct mm_struct *mm = current->mm;
748 	int ret;
749 
750 	down_read(&mm->mmap_sem);
751 	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
752 			       FAULT_FLAG_WRITE, NULL);
753 	up_read(&mm->mmap_sem);
754 
755 	return ret < 0 ? ret : 0;
756 }
757 
758 /**
759  * futex_top_waiter() - Return the highest priority waiter on a futex
760  * @hb:		the hash bucket the futex_q's reside in
761  * @key:	the futex key (to distinguish it from other futex futex_q's)
762  *
763  * Must be called with the hb lock held.
764  */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)765 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
766 					union futex_key *key)
767 {
768 	struct futex_q *this;
769 
770 	plist_for_each_entry(this, &hb->chain, list) {
771 		if (match_futex(&this->key, key))
772 			return this;
773 	}
774 	return NULL;
775 }
776 
cmpxchg_futex_value_locked(u32 * curval,u32 __user * uaddr,u32 uval,u32 newval)777 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
778 				      u32 uval, u32 newval)
779 {
780 	int ret;
781 
782 	pagefault_disable();
783 	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
784 	pagefault_enable();
785 
786 	return ret;
787 }
788 
get_futex_value_locked(u32 * dest,u32 __user * from)789 static int get_futex_value_locked(u32 *dest, u32 __user *from)
790 {
791 	int ret;
792 
793 	pagefault_disable();
794 	ret = __get_user(*dest, from);
795 	pagefault_enable();
796 
797 	return ret ? -EFAULT : 0;
798 }
799 
800 
801 /*
802  * PI code:
803  */
refill_pi_state_cache(void)804 static int refill_pi_state_cache(void)
805 {
806 	struct futex_pi_state *pi_state;
807 
808 	if (likely(current->pi_state_cache))
809 		return 0;
810 
811 	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
812 
813 	if (!pi_state)
814 		return -ENOMEM;
815 
816 	INIT_LIST_HEAD(&pi_state->list);
817 	/* pi_mutex gets initialized later */
818 	pi_state->owner = NULL;
819 	refcount_set(&pi_state->refcount, 1);
820 	pi_state->key = FUTEX_KEY_INIT;
821 
822 	current->pi_state_cache = pi_state;
823 
824 	return 0;
825 }
826 
alloc_pi_state(void)827 static struct futex_pi_state *alloc_pi_state(void)
828 {
829 	struct futex_pi_state *pi_state = current->pi_state_cache;
830 
831 	WARN_ON(!pi_state);
832 	current->pi_state_cache = NULL;
833 
834 	return pi_state;
835 }
836 
get_pi_state(struct futex_pi_state * pi_state)837 static void get_pi_state(struct futex_pi_state *pi_state)
838 {
839 	WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
840 }
841 
842 /*
843  * Drops a reference to the pi_state object and frees or caches it
844  * when the last reference is gone.
845  */
put_pi_state(struct futex_pi_state * pi_state)846 static void put_pi_state(struct futex_pi_state *pi_state)
847 {
848 	if (!pi_state)
849 		return;
850 
851 	if (!refcount_dec_and_test(&pi_state->refcount))
852 		return;
853 
854 	/*
855 	 * If pi_state->owner is NULL, the owner is most probably dying
856 	 * and has cleaned up the pi_state already
857 	 */
858 	if (pi_state->owner) {
859 		struct task_struct *owner;
860 
861 		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
862 		owner = pi_state->owner;
863 		if (owner) {
864 			raw_spin_lock(&owner->pi_lock);
865 			list_del_init(&pi_state->list);
866 			raw_spin_unlock(&owner->pi_lock);
867 		}
868 		rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
869 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
870 	}
871 
872 	if (current->pi_state_cache) {
873 		kfree(pi_state);
874 	} else {
875 		/*
876 		 * pi_state->list is already empty.
877 		 * clear pi_state->owner.
878 		 * refcount is at 0 - put it back to 1.
879 		 */
880 		pi_state->owner = NULL;
881 		refcount_set(&pi_state->refcount, 1);
882 		current->pi_state_cache = pi_state;
883 	}
884 }
885 
886 #ifdef CONFIG_FUTEX_PI
887 
888 /*
889  * This task is holding PI mutexes at exit time => bad.
890  * Kernel cleans up PI-state, but userspace is likely hosed.
891  * (Robust-futex cleanup is separate and might save the day for userspace.)
892  */
exit_pi_state_list(struct task_struct * curr)893 void exit_pi_state_list(struct task_struct *curr)
894 {
895 	struct list_head *next, *head = &curr->pi_state_list;
896 	struct futex_pi_state *pi_state;
897 	struct futex_hash_bucket *hb;
898 	union futex_key key = FUTEX_KEY_INIT;
899 
900 	if (!futex_cmpxchg_enabled)
901 		return;
902 	/*
903 	 * We are a ZOMBIE and nobody can enqueue itself on
904 	 * pi_state_list anymore, but we have to be careful
905 	 * versus waiters unqueueing themselves:
906 	 */
907 	raw_spin_lock_irq(&curr->pi_lock);
908 	while (!list_empty(head)) {
909 		next = head->next;
910 		pi_state = list_entry(next, struct futex_pi_state, list);
911 		key = pi_state->key;
912 		hb = hash_futex(&key);
913 
914 		/*
915 		 * We can race against put_pi_state() removing itself from the
916 		 * list (a waiter going away). put_pi_state() will first
917 		 * decrement the reference count and then modify the list, so
918 		 * its possible to see the list entry but fail this reference
919 		 * acquire.
920 		 *
921 		 * In that case; drop the locks to let put_pi_state() make
922 		 * progress and retry the loop.
923 		 */
924 		if (!refcount_inc_not_zero(&pi_state->refcount)) {
925 			raw_spin_unlock_irq(&curr->pi_lock);
926 			cpu_relax();
927 			raw_spin_lock_irq(&curr->pi_lock);
928 			continue;
929 		}
930 		raw_spin_unlock_irq(&curr->pi_lock);
931 
932 		spin_lock(&hb->lock);
933 		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
934 		raw_spin_lock(&curr->pi_lock);
935 		/*
936 		 * We dropped the pi-lock, so re-check whether this
937 		 * task still owns the PI-state:
938 		 */
939 		if (head->next != next) {
940 			/* retain curr->pi_lock for the loop invariant */
941 			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
942 			spin_unlock(&hb->lock);
943 			put_pi_state(pi_state);
944 			continue;
945 		}
946 
947 		WARN_ON(pi_state->owner != curr);
948 		WARN_ON(list_empty(&pi_state->list));
949 		list_del_init(&pi_state->list);
950 		pi_state->owner = NULL;
951 
952 		raw_spin_unlock(&curr->pi_lock);
953 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
954 		spin_unlock(&hb->lock);
955 
956 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
957 		put_pi_state(pi_state);
958 
959 		raw_spin_lock_irq(&curr->pi_lock);
960 	}
961 	raw_spin_unlock_irq(&curr->pi_lock);
962 }
963 
964 #endif
965 
966 /*
967  * We need to check the following states:
968  *
969  *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
970  *
971  * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
972  * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
973  *
974  * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
975  *
976  * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
977  * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
978  *
979  * [6]  Found  | Found    | task      | 0         | 1      | Valid
980  *
981  * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
982  *
983  * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
984  * [9]  Found  | Found    | task      | 0         | 0      | Invalid
985  * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
986  *
987  * [1]	Indicates that the kernel can acquire the futex atomically. We
988  *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
989  *
990  * [2]	Valid, if TID does not belong to a kernel thread. If no matching
991  *      thread is found then it indicates that the owner TID has died.
992  *
993  * [3]	Invalid. The waiter is queued on a non PI futex
994  *
995  * [4]	Valid state after exit_robust_list(), which sets the user space
996  *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
997  *
998  * [5]	The user space value got manipulated between exit_robust_list()
999  *	and exit_pi_state_list()
1000  *
1001  * [6]	Valid state after exit_pi_state_list() which sets the new owner in
1002  *	the pi_state but cannot access the user space value.
1003  *
1004  * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
1005  *
1006  * [8]	Owner and user space value match
1007  *
1008  * [9]	There is no transient state which sets the user space TID to 0
1009  *	except exit_robust_list(), but this is indicated by the
1010  *	FUTEX_OWNER_DIED bit. See [4]
1011  *
1012  * [10] There is no transient state which leaves owner and user space
1013  *	TID out of sync.
1014  *
1015  *
1016  * Serialization and lifetime rules:
1017  *
1018  * hb->lock:
1019  *
1020  *	hb -> futex_q, relation
1021  *	futex_q -> pi_state, relation
1022  *
1023  *	(cannot be raw because hb can contain arbitrary amount
1024  *	 of futex_q's)
1025  *
1026  * pi_mutex->wait_lock:
1027  *
1028  *	{uval, pi_state}
1029  *
1030  *	(and pi_mutex 'obviously')
1031  *
1032  * p->pi_lock:
1033  *
1034  *	p->pi_state_list -> pi_state->list, relation
1035  *
1036  * pi_state->refcount:
1037  *
1038  *	pi_state lifetime
1039  *
1040  *
1041  * Lock order:
1042  *
1043  *   hb->lock
1044  *     pi_mutex->wait_lock
1045  *       p->pi_lock
1046  *
1047  */
1048 
1049 /*
1050  * Validate that the existing waiter has a pi_state and sanity check
1051  * the pi_state against the user space value. If correct, attach to
1052  * it.
1053  */
attach_to_pi_state(u32 __user * uaddr,u32 uval,struct futex_pi_state * pi_state,struct futex_pi_state ** ps)1054 static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1055 			      struct futex_pi_state *pi_state,
1056 			      struct futex_pi_state **ps)
1057 {
1058 	pid_t pid = uval & FUTEX_TID_MASK;
1059 	u32 uval2;
1060 	int ret;
1061 
1062 	/*
1063 	 * Userspace might have messed up non-PI and PI futexes [3]
1064 	 */
1065 	if (unlikely(!pi_state))
1066 		return -EINVAL;
1067 
1068 	/*
1069 	 * We get here with hb->lock held, and having found a
1070 	 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1071 	 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1072 	 * which in turn means that futex_lock_pi() still has a reference on
1073 	 * our pi_state.
1074 	 *
1075 	 * The waiter holding a reference on @pi_state also protects against
1076 	 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1077 	 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1078 	 * free pi_state before we can take a reference ourselves.
1079 	 */
1080 	WARN_ON(!refcount_read(&pi_state->refcount));
1081 
1082 	/*
1083 	 * Now that we have a pi_state, we can acquire wait_lock
1084 	 * and do the state validation.
1085 	 */
1086 	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1087 
1088 	/*
1089 	 * Since {uval, pi_state} is serialized by wait_lock, and our current
1090 	 * uval was read without holding it, it can have changed. Verify it
1091 	 * still is what we expect it to be, otherwise retry the entire
1092 	 * operation.
1093 	 */
1094 	if (get_futex_value_locked(&uval2, uaddr))
1095 		goto out_efault;
1096 
1097 	if (uval != uval2)
1098 		goto out_eagain;
1099 
1100 	/*
1101 	 * Handle the owner died case:
1102 	 */
1103 	if (uval & FUTEX_OWNER_DIED) {
1104 		/*
1105 		 * exit_pi_state_list sets owner to NULL and wakes the
1106 		 * topmost waiter. The task which acquires the
1107 		 * pi_state->rt_mutex will fixup owner.
1108 		 */
1109 		if (!pi_state->owner) {
1110 			/*
1111 			 * No pi state owner, but the user space TID
1112 			 * is not 0. Inconsistent state. [5]
1113 			 */
1114 			if (pid)
1115 				goto out_einval;
1116 			/*
1117 			 * Take a ref on the state and return success. [4]
1118 			 */
1119 			goto out_attach;
1120 		}
1121 
1122 		/*
1123 		 * If TID is 0, then either the dying owner has not
1124 		 * yet executed exit_pi_state_list() or some waiter
1125 		 * acquired the rtmutex in the pi state, but did not
1126 		 * yet fixup the TID in user space.
1127 		 *
1128 		 * Take a ref on the state and return success. [6]
1129 		 */
1130 		if (!pid)
1131 			goto out_attach;
1132 	} else {
1133 		/*
1134 		 * If the owner died bit is not set, then the pi_state
1135 		 * must have an owner. [7]
1136 		 */
1137 		if (!pi_state->owner)
1138 			goto out_einval;
1139 	}
1140 
1141 	/*
1142 	 * Bail out if user space manipulated the futex value. If pi
1143 	 * state exists then the owner TID must be the same as the
1144 	 * user space TID. [9/10]
1145 	 */
1146 	if (pid != task_pid_vnr(pi_state->owner))
1147 		goto out_einval;
1148 
1149 out_attach:
1150 	get_pi_state(pi_state);
1151 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1152 	*ps = pi_state;
1153 	return 0;
1154 
1155 out_einval:
1156 	ret = -EINVAL;
1157 	goto out_error;
1158 
1159 out_eagain:
1160 	ret = -EAGAIN;
1161 	goto out_error;
1162 
1163 out_efault:
1164 	ret = -EFAULT;
1165 	goto out_error;
1166 
1167 out_error:
1168 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1169 	return ret;
1170 }
1171 
handle_exit_race(u32 __user * uaddr,u32 uval,struct task_struct * tsk)1172 static int handle_exit_race(u32 __user *uaddr, u32 uval,
1173 			    struct task_struct *tsk)
1174 {
1175 	u32 uval2;
1176 
1177 	/*
1178 	 * If PF_EXITPIDONE is not yet set, then try again.
1179 	 */
1180 	if (tsk && !(tsk->flags & PF_EXITPIDONE))
1181 		return -EAGAIN;
1182 
1183 	/*
1184 	 * Reread the user space value to handle the following situation:
1185 	 *
1186 	 * CPU0				CPU1
1187 	 *
1188 	 * sys_exit()			sys_futex()
1189 	 *  do_exit()			 futex_lock_pi()
1190 	 *                                futex_lock_pi_atomic()
1191 	 *   exit_signals(tsk)		    No waiters:
1192 	 *    tsk->flags |= PF_EXITING;	    *uaddr == 0x00000PID
1193 	 *  mm_release(tsk)		    Set waiter bit
1194 	 *   exit_robust_list(tsk) {	    *uaddr = 0x80000PID;
1195 	 *      Set owner died		    attach_to_pi_owner() {
1196 	 *    *uaddr = 0xC0000000;	     tsk = get_task(PID);
1197 	 *   }				     if (!tsk->flags & PF_EXITING) {
1198 	 *  ...				       attach();
1199 	 *  tsk->flags |= PF_EXITPIDONE;     } else {
1200 	 *				       if (!(tsk->flags & PF_EXITPIDONE))
1201 	 *				         return -EAGAIN;
1202 	 *				       return -ESRCH; <--- FAIL
1203 	 *				     }
1204 	 *
1205 	 * Returning ESRCH unconditionally is wrong here because the
1206 	 * user space value has been changed by the exiting task.
1207 	 *
1208 	 * The same logic applies to the case where the exiting task is
1209 	 * already gone.
1210 	 */
1211 	if (get_futex_value_locked(&uval2, uaddr))
1212 		return -EFAULT;
1213 
1214 	/* If the user space value has changed, try again. */
1215 	if (uval2 != uval)
1216 		return -EAGAIN;
1217 
1218 	/*
1219 	 * The exiting task did not have a robust list, the robust list was
1220 	 * corrupted or the user space value in *uaddr is simply bogus.
1221 	 * Give up and tell user space.
1222 	 */
1223 	return -ESRCH;
1224 }
1225 
1226 /*
1227  * Lookup the task for the TID provided from user space and attach to
1228  * it after doing proper sanity checks.
1229  */
attach_to_pi_owner(u32 __user * uaddr,u32 uval,union futex_key * key,struct futex_pi_state ** ps)1230 static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1231 			      struct futex_pi_state **ps)
1232 {
1233 	pid_t pid = uval & FUTEX_TID_MASK;
1234 	struct futex_pi_state *pi_state;
1235 	struct task_struct *p;
1236 
1237 	/*
1238 	 * We are the first waiter - try to look up the real owner and attach
1239 	 * the new pi_state to it, but bail out when TID = 0 [1]
1240 	 *
1241 	 * The !pid check is paranoid. None of the call sites should end up
1242 	 * with pid == 0, but better safe than sorry. Let the caller retry
1243 	 */
1244 	if (!pid)
1245 		return -EAGAIN;
1246 	p = find_get_task_by_vpid(pid);
1247 	if (!p)
1248 		return handle_exit_race(uaddr, uval, NULL);
1249 
1250 	if (unlikely(p->flags & PF_KTHREAD)) {
1251 		put_task_struct(p);
1252 		return -EPERM;
1253 	}
1254 
1255 	/*
1256 	 * We need to look at the task state flags to figure out,
1257 	 * whether the task is exiting. To protect against the do_exit
1258 	 * change of the task flags, we do this protected by
1259 	 * p->pi_lock:
1260 	 */
1261 	raw_spin_lock_irq(&p->pi_lock);
1262 	if (unlikely(p->flags & PF_EXITING)) {
1263 		/*
1264 		 * The task is on the way out. When PF_EXITPIDONE is
1265 		 * set, we know that the task has finished the
1266 		 * cleanup:
1267 		 */
1268 		int ret = handle_exit_race(uaddr, uval, p);
1269 
1270 		raw_spin_unlock_irq(&p->pi_lock);
1271 		put_task_struct(p);
1272 		return ret;
1273 	}
1274 
1275 	/*
1276 	 * No existing pi state. First waiter. [2]
1277 	 *
1278 	 * This creates pi_state, we have hb->lock held, this means nothing can
1279 	 * observe this state, wait_lock is irrelevant.
1280 	 */
1281 	pi_state = alloc_pi_state();
1282 
1283 	/*
1284 	 * Initialize the pi_mutex in locked state and make @p
1285 	 * the owner of it:
1286 	 */
1287 	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1288 
1289 	/* Store the key for possible exit cleanups: */
1290 	pi_state->key = *key;
1291 
1292 	WARN_ON(!list_empty(&pi_state->list));
1293 	list_add(&pi_state->list, &p->pi_state_list);
1294 	/*
1295 	 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1296 	 * because there is no concurrency as the object is not published yet.
1297 	 */
1298 	pi_state->owner = p;
1299 	raw_spin_unlock_irq(&p->pi_lock);
1300 
1301 	put_task_struct(p);
1302 
1303 	*ps = pi_state;
1304 
1305 	return 0;
1306 }
1307 
lookup_pi_state(u32 __user * uaddr,u32 uval,struct futex_hash_bucket * hb,union futex_key * key,struct futex_pi_state ** ps)1308 static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1309 			   struct futex_hash_bucket *hb,
1310 			   union futex_key *key, struct futex_pi_state **ps)
1311 {
1312 	struct futex_q *top_waiter = futex_top_waiter(hb, key);
1313 
1314 	/*
1315 	 * If there is a waiter on that futex, validate it and
1316 	 * attach to the pi_state when the validation succeeds.
1317 	 */
1318 	if (top_waiter)
1319 		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1320 
1321 	/*
1322 	 * We are the first waiter - try to look up the owner based on
1323 	 * @uval and attach to it.
1324 	 */
1325 	return attach_to_pi_owner(uaddr, uval, key, ps);
1326 }
1327 
lock_pi_update_atomic(u32 __user * uaddr,u32 uval,u32 newval)1328 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1329 {
1330 	int err;
1331 	u32 uninitialized_var(curval);
1332 
1333 	if (unlikely(should_fail_futex(true)))
1334 		return -EFAULT;
1335 
1336 	err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1337 	if (unlikely(err))
1338 		return err;
1339 
1340 	/* If user space value changed, let the caller retry */
1341 	return curval != uval ? -EAGAIN : 0;
1342 }
1343 
1344 /**
1345  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1346  * @uaddr:		the pi futex user address
1347  * @hb:			the pi futex hash bucket
1348  * @key:		the futex key associated with uaddr and hb
1349  * @ps:			the pi_state pointer where we store the result of the
1350  *			lookup
1351  * @task:		the task to perform the atomic lock work for.  This will
1352  *			be "current" except in the case of requeue pi.
1353  * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1354  *
1355  * Return:
1356  *  -  0 - ready to wait;
1357  *  -  1 - acquired the lock;
1358  *  - <0 - error
1359  *
1360  * The hb->lock and futex_key refs shall be held by the caller.
1361  */
futex_lock_pi_atomic(u32 __user * uaddr,struct futex_hash_bucket * hb,union futex_key * key,struct futex_pi_state ** ps,struct task_struct * task,int set_waiters)1362 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1363 				union futex_key *key,
1364 				struct futex_pi_state **ps,
1365 				struct task_struct *task, int set_waiters)
1366 {
1367 	u32 uval, newval, vpid = task_pid_vnr(task);
1368 	struct futex_q *top_waiter;
1369 	int ret;
1370 
1371 	/*
1372 	 * Read the user space value first so we can validate a few
1373 	 * things before proceeding further.
1374 	 */
1375 	if (get_futex_value_locked(&uval, uaddr))
1376 		return -EFAULT;
1377 
1378 	if (unlikely(should_fail_futex(true)))
1379 		return -EFAULT;
1380 
1381 	/*
1382 	 * Detect deadlocks.
1383 	 */
1384 	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1385 		return -EDEADLK;
1386 
1387 	if ((unlikely(should_fail_futex(true))))
1388 		return -EDEADLK;
1389 
1390 	/*
1391 	 * Lookup existing state first. If it exists, try to attach to
1392 	 * its pi_state.
1393 	 */
1394 	top_waiter = futex_top_waiter(hb, key);
1395 	if (top_waiter)
1396 		return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1397 
1398 	/*
1399 	 * No waiter and user TID is 0. We are here because the
1400 	 * waiters or the owner died bit is set or called from
1401 	 * requeue_cmp_pi or for whatever reason something took the
1402 	 * syscall.
1403 	 */
1404 	if (!(uval & FUTEX_TID_MASK)) {
1405 		/*
1406 		 * We take over the futex. No other waiters and the user space
1407 		 * TID is 0. We preserve the owner died bit.
1408 		 */
1409 		newval = uval & FUTEX_OWNER_DIED;
1410 		newval |= vpid;
1411 
1412 		/* The futex requeue_pi code can enforce the waiters bit */
1413 		if (set_waiters)
1414 			newval |= FUTEX_WAITERS;
1415 
1416 		ret = lock_pi_update_atomic(uaddr, uval, newval);
1417 		/* If the take over worked, return 1 */
1418 		return ret < 0 ? ret : 1;
1419 	}
1420 
1421 	/*
1422 	 * First waiter. Set the waiters bit before attaching ourself to
1423 	 * the owner. If owner tries to unlock, it will be forced into
1424 	 * the kernel and blocked on hb->lock.
1425 	 */
1426 	newval = uval | FUTEX_WAITERS;
1427 	ret = lock_pi_update_atomic(uaddr, uval, newval);
1428 	if (ret)
1429 		return ret;
1430 	/*
1431 	 * If the update of the user space value succeeded, we try to
1432 	 * attach to the owner. If that fails, no harm done, we only
1433 	 * set the FUTEX_WAITERS bit in the user space variable.
1434 	 */
1435 	return attach_to_pi_owner(uaddr, newval, key, ps);
1436 }
1437 
1438 /**
1439  * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1440  * @q:	The futex_q to unqueue
1441  *
1442  * The q->lock_ptr must not be NULL and must be held by the caller.
1443  */
__unqueue_futex(struct futex_q * q)1444 static void __unqueue_futex(struct futex_q *q)
1445 {
1446 	struct futex_hash_bucket *hb;
1447 
1448 	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1449 		return;
1450 	lockdep_assert_held(q->lock_ptr);
1451 
1452 	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1453 	plist_del(&q->list, &hb->chain);
1454 	hb_waiters_dec(hb);
1455 }
1456 
1457 /*
1458  * The hash bucket lock must be held when this is called.
1459  * Afterwards, the futex_q must not be accessed. Callers
1460  * must ensure to later call wake_up_q() for the actual
1461  * wakeups to occur.
1462  */
mark_wake_futex(struct wake_q_head * wake_q,struct futex_q * q)1463 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1464 {
1465 	struct task_struct *p = q->task;
1466 
1467 	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1468 		return;
1469 
1470 	get_task_struct(p);
1471 	__unqueue_futex(q);
1472 	/*
1473 	 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1474 	 * is written, without taking any locks. This is possible in the event
1475 	 * of a spurious wakeup, for example. A memory barrier is required here
1476 	 * to prevent the following store to lock_ptr from getting ahead of the
1477 	 * plist_del in __unqueue_futex().
1478 	 */
1479 	smp_store_release(&q->lock_ptr, NULL);
1480 
1481 	/*
1482 	 * Queue the task for later wakeup for after we've released
1483 	 * the hb->lock. wake_q_add() grabs reference to p.
1484 	 */
1485 	wake_q_add_safe(wake_q, p);
1486 }
1487 
1488 /*
1489  * Caller must hold a reference on @pi_state.
1490  */
wake_futex_pi(u32 __user * uaddr,u32 uval,struct futex_pi_state * pi_state)1491 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1492 {
1493 	u32 uninitialized_var(curval), newval;
1494 	struct task_struct *new_owner;
1495 	bool postunlock = false;
1496 	DEFINE_WAKE_Q(wake_q);
1497 	int ret = 0;
1498 
1499 	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1500 	if (WARN_ON_ONCE(!new_owner)) {
1501 		/*
1502 		 * As per the comment in futex_unlock_pi() this should not happen.
1503 		 *
1504 		 * When this happens, give up our locks and try again, giving
1505 		 * the futex_lock_pi() instance time to complete, either by
1506 		 * waiting on the rtmutex or removing itself from the futex
1507 		 * queue.
1508 		 */
1509 		ret = -EAGAIN;
1510 		goto out_unlock;
1511 	}
1512 
1513 	/*
1514 	 * We pass it to the next owner. The WAITERS bit is always kept
1515 	 * enabled while there is PI state around. We cleanup the owner
1516 	 * died bit, because we are the owner.
1517 	 */
1518 	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1519 
1520 	if (unlikely(should_fail_futex(true)))
1521 		ret = -EFAULT;
1522 
1523 	ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1524 	if (!ret && (curval != uval)) {
1525 		/*
1526 		 * If a unconditional UNLOCK_PI operation (user space did not
1527 		 * try the TID->0 transition) raced with a waiter setting the
1528 		 * FUTEX_WAITERS flag between get_user() and locking the hash
1529 		 * bucket lock, retry the operation.
1530 		 */
1531 		if ((FUTEX_TID_MASK & curval) == uval)
1532 			ret = -EAGAIN;
1533 		else
1534 			ret = -EINVAL;
1535 	}
1536 
1537 	if (ret)
1538 		goto out_unlock;
1539 
1540 	/*
1541 	 * This is a point of no return; once we modify the uval there is no
1542 	 * going back and subsequent operations must not fail.
1543 	 */
1544 
1545 	raw_spin_lock(&pi_state->owner->pi_lock);
1546 	WARN_ON(list_empty(&pi_state->list));
1547 	list_del_init(&pi_state->list);
1548 	raw_spin_unlock(&pi_state->owner->pi_lock);
1549 
1550 	raw_spin_lock(&new_owner->pi_lock);
1551 	WARN_ON(!list_empty(&pi_state->list));
1552 	list_add(&pi_state->list, &new_owner->pi_state_list);
1553 	pi_state->owner = new_owner;
1554 	raw_spin_unlock(&new_owner->pi_lock);
1555 
1556 	postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1557 
1558 out_unlock:
1559 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1560 
1561 	if (postunlock)
1562 		rt_mutex_postunlock(&wake_q);
1563 
1564 	return ret;
1565 }
1566 
1567 /*
1568  * Express the locking dependencies for lockdep:
1569  */
1570 static inline void
double_lock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)1571 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1572 {
1573 	if (hb1 <= hb2) {
1574 		spin_lock(&hb1->lock);
1575 		if (hb1 < hb2)
1576 			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1577 	} else { /* hb1 > hb2 */
1578 		spin_lock(&hb2->lock);
1579 		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1580 	}
1581 }
1582 
1583 static inline void
double_unlock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)1584 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1585 {
1586 	spin_unlock(&hb1->lock);
1587 	if (hb1 != hb2)
1588 		spin_unlock(&hb2->lock);
1589 }
1590 
1591 /*
1592  * Wake up waiters matching bitset queued on this futex (uaddr).
1593  */
1594 static int
futex_wake(u32 __user * uaddr,unsigned int flags,int nr_wake,u32 bitset)1595 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1596 {
1597 	struct futex_hash_bucket *hb;
1598 	struct futex_q *this, *next;
1599 	union futex_key key = FUTEX_KEY_INIT;
1600 	int ret;
1601 	DEFINE_WAKE_Q(wake_q);
1602 
1603 	if (!bitset)
1604 		return -EINVAL;
1605 
1606 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
1607 	if (unlikely(ret != 0))
1608 		goto out;
1609 
1610 	hb = hash_futex(&key);
1611 
1612 	/* Make sure we really have tasks to wakeup */
1613 	if (!hb_waiters_pending(hb))
1614 		goto out_put_key;
1615 
1616 	spin_lock(&hb->lock);
1617 
1618 	plist_for_each_entry_safe(this, next, &hb->chain, list) {
1619 		if (match_futex (&this->key, &key)) {
1620 			if (this->pi_state || this->rt_waiter) {
1621 				ret = -EINVAL;
1622 				break;
1623 			}
1624 
1625 			/* Check if one of the bits is set in both bitsets */
1626 			if (!(this->bitset & bitset))
1627 				continue;
1628 
1629 			mark_wake_futex(&wake_q, this);
1630 			if (++ret >= nr_wake)
1631 				break;
1632 		}
1633 	}
1634 
1635 	spin_unlock(&hb->lock);
1636 	wake_up_q(&wake_q);
1637 out_put_key:
1638 	put_futex_key(&key);
1639 out:
1640 	return ret;
1641 }
1642 
futex_atomic_op_inuser(unsigned int encoded_op,u32 __user * uaddr)1643 static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1644 {
1645 	unsigned int op =	  (encoded_op & 0x70000000) >> 28;
1646 	unsigned int cmp =	  (encoded_op & 0x0f000000) >> 24;
1647 	int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1648 	int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1649 	int oldval, ret;
1650 
1651 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1652 		if (oparg < 0 || oparg > 31) {
1653 			char comm[sizeof(current->comm)];
1654 			/*
1655 			 * kill this print and return -EINVAL when userspace
1656 			 * is sane again
1657 			 */
1658 			pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1659 					get_task_comm(comm, current), oparg);
1660 			oparg &= 31;
1661 		}
1662 		oparg = 1 << oparg;
1663 	}
1664 
1665 	if (!access_ok(uaddr, sizeof(u32)))
1666 		return -EFAULT;
1667 
1668 	ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1669 	if (ret)
1670 		return ret;
1671 
1672 	switch (cmp) {
1673 	case FUTEX_OP_CMP_EQ:
1674 		return oldval == cmparg;
1675 	case FUTEX_OP_CMP_NE:
1676 		return oldval != cmparg;
1677 	case FUTEX_OP_CMP_LT:
1678 		return oldval < cmparg;
1679 	case FUTEX_OP_CMP_GE:
1680 		return oldval >= cmparg;
1681 	case FUTEX_OP_CMP_LE:
1682 		return oldval <= cmparg;
1683 	case FUTEX_OP_CMP_GT:
1684 		return oldval > cmparg;
1685 	default:
1686 		return -ENOSYS;
1687 	}
1688 }
1689 
1690 /*
1691  * Wake up all waiters hashed on the physical page that is mapped
1692  * to this virtual address:
1693  */
1694 static int
futex_wake_op(u32 __user * uaddr1,unsigned int flags,u32 __user * uaddr2,int nr_wake,int nr_wake2,int op)1695 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1696 	      int nr_wake, int nr_wake2, int op)
1697 {
1698 	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1699 	struct futex_hash_bucket *hb1, *hb2;
1700 	struct futex_q *this, *next;
1701 	int ret, op_ret;
1702 	DEFINE_WAKE_Q(wake_q);
1703 
1704 retry:
1705 	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1706 	if (unlikely(ret != 0))
1707 		goto out;
1708 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1709 	if (unlikely(ret != 0))
1710 		goto out_put_key1;
1711 
1712 	hb1 = hash_futex(&key1);
1713 	hb2 = hash_futex(&key2);
1714 
1715 retry_private:
1716 	double_lock_hb(hb1, hb2);
1717 	op_ret = futex_atomic_op_inuser(op, uaddr2);
1718 	if (unlikely(op_ret < 0)) {
1719 		double_unlock_hb(hb1, hb2);
1720 
1721 		if (!IS_ENABLED(CONFIG_MMU) ||
1722 		    unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
1723 			/*
1724 			 * we don't get EFAULT from MMU faults if we don't have
1725 			 * an MMU, but we might get them from range checking
1726 			 */
1727 			ret = op_ret;
1728 			goto out_put_keys;
1729 		}
1730 
1731 		if (op_ret == -EFAULT) {
1732 			ret = fault_in_user_writeable(uaddr2);
1733 			if (ret)
1734 				goto out_put_keys;
1735 		}
1736 
1737 		if (!(flags & FLAGS_SHARED)) {
1738 			cond_resched();
1739 			goto retry_private;
1740 		}
1741 
1742 		put_futex_key(&key2);
1743 		put_futex_key(&key1);
1744 		cond_resched();
1745 		goto retry;
1746 	}
1747 
1748 	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1749 		if (match_futex (&this->key, &key1)) {
1750 			if (this->pi_state || this->rt_waiter) {
1751 				ret = -EINVAL;
1752 				goto out_unlock;
1753 			}
1754 			mark_wake_futex(&wake_q, this);
1755 			if (++ret >= nr_wake)
1756 				break;
1757 		}
1758 	}
1759 
1760 	if (op_ret > 0) {
1761 		op_ret = 0;
1762 		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1763 			if (match_futex (&this->key, &key2)) {
1764 				if (this->pi_state || this->rt_waiter) {
1765 					ret = -EINVAL;
1766 					goto out_unlock;
1767 				}
1768 				mark_wake_futex(&wake_q, this);
1769 				if (++op_ret >= nr_wake2)
1770 					break;
1771 			}
1772 		}
1773 		ret += op_ret;
1774 	}
1775 
1776 out_unlock:
1777 	double_unlock_hb(hb1, hb2);
1778 	wake_up_q(&wake_q);
1779 out_put_keys:
1780 	put_futex_key(&key2);
1781 out_put_key1:
1782 	put_futex_key(&key1);
1783 out:
1784 	return ret;
1785 }
1786 
1787 /**
1788  * requeue_futex() - Requeue a futex_q from one hb to another
1789  * @q:		the futex_q to requeue
1790  * @hb1:	the source hash_bucket
1791  * @hb2:	the target hash_bucket
1792  * @key2:	the new key for the requeued futex_q
1793  */
1794 static inline
requeue_futex(struct futex_q * q,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key2)1795 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1796 		   struct futex_hash_bucket *hb2, union futex_key *key2)
1797 {
1798 
1799 	/*
1800 	 * If key1 and key2 hash to the same bucket, no need to
1801 	 * requeue.
1802 	 */
1803 	if (likely(&hb1->chain != &hb2->chain)) {
1804 		plist_del(&q->list, &hb1->chain);
1805 		hb_waiters_dec(hb1);
1806 		hb_waiters_inc(hb2);
1807 		plist_add(&q->list, &hb2->chain);
1808 		q->lock_ptr = &hb2->lock;
1809 	}
1810 	get_futex_key_refs(key2);
1811 	q->key = *key2;
1812 }
1813 
1814 /**
1815  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1816  * @q:		the futex_q
1817  * @key:	the key of the requeue target futex
1818  * @hb:		the hash_bucket of the requeue target futex
1819  *
1820  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1821  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1822  * to the requeue target futex so the waiter can detect the wakeup on the right
1823  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1824  * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1825  * to protect access to the pi_state to fixup the owner later.  Must be called
1826  * with both q->lock_ptr and hb->lock held.
1827  */
1828 static inline
requeue_pi_wake_futex(struct futex_q * q,union futex_key * key,struct futex_hash_bucket * hb)1829 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1830 			   struct futex_hash_bucket *hb)
1831 {
1832 	get_futex_key_refs(key);
1833 	q->key = *key;
1834 
1835 	__unqueue_futex(q);
1836 
1837 	WARN_ON(!q->rt_waiter);
1838 	q->rt_waiter = NULL;
1839 
1840 	q->lock_ptr = &hb->lock;
1841 
1842 	wake_up_state(q->task, TASK_NORMAL);
1843 }
1844 
1845 /**
1846  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1847  * @pifutex:		the user address of the to futex
1848  * @hb1:		the from futex hash bucket, must be locked by the caller
1849  * @hb2:		the to futex hash bucket, must be locked by the caller
1850  * @key1:		the from futex key
1851  * @key2:		the to futex key
1852  * @ps:			address to store the pi_state pointer
1853  * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1854  *
1855  * Try and get the lock on behalf of the top waiter if we can do it atomically.
1856  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1857  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1858  * hb1 and hb2 must be held by the caller.
1859  *
1860  * Return:
1861  *  -  0 - failed to acquire the lock atomically;
1862  *  - >0 - acquired the lock, return value is vpid of the top_waiter
1863  *  - <0 - error
1864  */
futex_proxy_trylock_atomic(u32 __user * pifutex,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key1,union futex_key * key2,struct futex_pi_state ** ps,int set_waiters)1865 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1866 				 struct futex_hash_bucket *hb1,
1867 				 struct futex_hash_bucket *hb2,
1868 				 union futex_key *key1, union futex_key *key2,
1869 				 struct futex_pi_state **ps, int set_waiters)
1870 {
1871 	struct futex_q *top_waiter = NULL;
1872 	u32 curval;
1873 	int ret, vpid;
1874 
1875 	if (get_futex_value_locked(&curval, pifutex))
1876 		return -EFAULT;
1877 
1878 	if (unlikely(should_fail_futex(true)))
1879 		return -EFAULT;
1880 
1881 	/*
1882 	 * Find the top_waiter and determine if there are additional waiters.
1883 	 * If the caller intends to requeue more than 1 waiter to pifutex,
1884 	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1885 	 * as we have means to handle the possible fault.  If not, don't set
1886 	 * the bit unecessarily as it will force the subsequent unlock to enter
1887 	 * the kernel.
1888 	 */
1889 	top_waiter = futex_top_waiter(hb1, key1);
1890 
1891 	/* There are no waiters, nothing for us to do. */
1892 	if (!top_waiter)
1893 		return 0;
1894 
1895 	/* Ensure we requeue to the expected futex. */
1896 	if (!match_futex(top_waiter->requeue_pi_key, key2))
1897 		return -EINVAL;
1898 
1899 	/*
1900 	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1901 	 * the contended case or if set_waiters is 1.  The pi_state is returned
1902 	 * in ps in contended cases.
1903 	 */
1904 	vpid = task_pid_vnr(top_waiter->task);
1905 	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1906 				   set_waiters);
1907 	if (ret == 1) {
1908 		requeue_pi_wake_futex(top_waiter, key2, hb2);
1909 		return vpid;
1910 	}
1911 	return ret;
1912 }
1913 
1914 /**
1915  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1916  * @uaddr1:	source futex user address
1917  * @flags:	futex flags (FLAGS_SHARED, etc.)
1918  * @uaddr2:	target futex user address
1919  * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
1920  * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
1921  * @cmpval:	@uaddr1 expected value (or %NULL)
1922  * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1923  *		pi futex (pi to pi requeue is not supported)
1924  *
1925  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1926  * uaddr2 atomically on behalf of the top waiter.
1927  *
1928  * Return:
1929  *  - >=0 - on success, the number of tasks requeued or woken;
1930  *  -  <0 - on error
1931  */
futex_requeue(u32 __user * uaddr1,unsigned int flags,u32 __user * uaddr2,int nr_wake,int nr_requeue,u32 * cmpval,int requeue_pi)1932 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1933 			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1934 			 u32 *cmpval, int requeue_pi)
1935 {
1936 	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1937 	int drop_count = 0, task_count = 0, ret;
1938 	struct futex_pi_state *pi_state = NULL;
1939 	struct futex_hash_bucket *hb1, *hb2;
1940 	struct futex_q *this, *next;
1941 	DEFINE_WAKE_Q(wake_q);
1942 
1943 	if (nr_wake < 0 || nr_requeue < 0)
1944 		return -EINVAL;
1945 
1946 	/*
1947 	 * When PI not supported: return -ENOSYS if requeue_pi is true,
1948 	 * consequently the compiler knows requeue_pi is always false past
1949 	 * this point which will optimize away all the conditional code
1950 	 * further down.
1951 	 */
1952 	if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
1953 		return -ENOSYS;
1954 
1955 	if (requeue_pi) {
1956 		/*
1957 		 * Requeue PI only works on two distinct uaddrs. This
1958 		 * check is only valid for private futexes. See below.
1959 		 */
1960 		if (uaddr1 == uaddr2)
1961 			return -EINVAL;
1962 
1963 		/*
1964 		 * requeue_pi requires a pi_state, try to allocate it now
1965 		 * without any locks in case it fails.
1966 		 */
1967 		if (refill_pi_state_cache())
1968 			return -ENOMEM;
1969 		/*
1970 		 * requeue_pi must wake as many tasks as it can, up to nr_wake
1971 		 * + nr_requeue, since it acquires the rt_mutex prior to
1972 		 * returning to userspace, so as to not leave the rt_mutex with
1973 		 * waiters and no owner.  However, second and third wake-ups
1974 		 * cannot be predicted as they involve race conditions with the
1975 		 * first wake and a fault while looking up the pi_state.  Both
1976 		 * pthread_cond_signal() and pthread_cond_broadcast() should
1977 		 * use nr_wake=1.
1978 		 */
1979 		if (nr_wake != 1)
1980 			return -EINVAL;
1981 	}
1982 
1983 retry:
1984 	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1985 	if (unlikely(ret != 0))
1986 		goto out;
1987 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1988 			    requeue_pi ? FUTEX_WRITE : FUTEX_READ);
1989 	if (unlikely(ret != 0))
1990 		goto out_put_key1;
1991 
1992 	/*
1993 	 * The check above which compares uaddrs is not sufficient for
1994 	 * shared futexes. We need to compare the keys:
1995 	 */
1996 	if (requeue_pi && match_futex(&key1, &key2)) {
1997 		ret = -EINVAL;
1998 		goto out_put_keys;
1999 	}
2000 
2001 	hb1 = hash_futex(&key1);
2002 	hb2 = hash_futex(&key2);
2003 
2004 retry_private:
2005 	hb_waiters_inc(hb2);
2006 	double_lock_hb(hb1, hb2);
2007 
2008 	if (likely(cmpval != NULL)) {
2009 		u32 curval;
2010 
2011 		ret = get_futex_value_locked(&curval, uaddr1);
2012 
2013 		if (unlikely(ret)) {
2014 			double_unlock_hb(hb1, hb2);
2015 			hb_waiters_dec(hb2);
2016 
2017 			ret = get_user(curval, uaddr1);
2018 			if (ret)
2019 				goto out_put_keys;
2020 
2021 			if (!(flags & FLAGS_SHARED))
2022 				goto retry_private;
2023 
2024 			put_futex_key(&key2);
2025 			put_futex_key(&key1);
2026 			goto retry;
2027 		}
2028 		if (curval != *cmpval) {
2029 			ret = -EAGAIN;
2030 			goto out_unlock;
2031 		}
2032 	}
2033 
2034 	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2035 		/*
2036 		 * Attempt to acquire uaddr2 and wake the top waiter. If we
2037 		 * intend to requeue waiters, force setting the FUTEX_WAITERS
2038 		 * bit.  We force this here where we are able to easily handle
2039 		 * faults rather in the requeue loop below.
2040 		 */
2041 		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2042 						 &key2, &pi_state, nr_requeue);
2043 
2044 		/*
2045 		 * At this point the top_waiter has either taken uaddr2 or is
2046 		 * waiting on it.  If the former, then the pi_state will not
2047 		 * exist yet, look it up one more time to ensure we have a
2048 		 * reference to it. If the lock was taken, ret contains the
2049 		 * vpid of the top waiter task.
2050 		 * If the lock was not taken, we have pi_state and an initial
2051 		 * refcount on it. In case of an error we have nothing.
2052 		 */
2053 		if (ret > 0) {
2054 			WARN_ON(pi_state);
2055 			drop_count++;
2056 			task_count++;
2057 			/*
2058 			 * If we acquired the lock, then the user space value
2059 			 * of uaddr2 should be vpid. It cannot be changed by
2060 			 * the top waiter as it is blocked on hb2 lock if it
2061 			 * tries to do so. If something fiddled with it behind
2062 			 * our back the pi state lookup might unearth it. So
2063 			 * we rather use the known value than rereading and
2064 			 * handing potential crap to lookup_pi_state.
2065 			 *
2066 			 * If that call succeeds then we have pi_state and an
2067 			 * initial refcount on it.
2068 			 */
2069 			ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
2070 		}
2071 
2072 		switch (ret) {
2073 		case 0:
2074 			/* We hold a reference on the pi state. */
2075 			break;
2076 
2077 			/* If the above failed, then pi_state is NULL */
2078 		case -EFAULT:
2079 			double_unlock_hb(hb1, hb2);
2080 			hb_waiters_dec(hb2);
2081 			put_futex_key(&key2);
2082 			put_futex_key(&key1);
2083 			ret = fault_in_user_writeable(uaddr2);
2084 			if (!ret)
2085 				goto retry;
2086 			goto out;
2087 		case -EAGAIN:
2088 			/*
2089 			 * Two reasons for this:
2090 			 * - Owner is exiting and we just wait for the
2091 			 *   exit to complete.
2092 			 * - The user space value changed.
2093 			 */
2094 			double_unlock_hb(hb1, hb2);
2095 			hb_waiters_dec(hb2);
2096 			put_futex_key(&key2);
2097 			put_futex_key(&key1);
2098 			cond_resched();
2099 			goto retry;
2100 		default:
2101 			goto out_unlock;
2102 		}
2103 	}
2104 
2105 	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2106 		if (task_count - nr_wake >= nr_requeue)
2107 			break;
2108 
2109 		if (!match_futex(&this->key, &key1))
2110 			continue;
2111 
2112 		/*
2113 		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
2114 		 * be paired with each other and no other futex ops.
2115 		 *
2116 		 * We should never be requeueing a futex_q with a pi_state,
2117 		 * which is awaiting a futex_unlock_pi().
2118 		 */
2119 		if ((requeue_pi && !this->rt_waiter) ||
2120 		    (!requeue_pi && this->rt_waiter) ||
2121 		    this->pi_state) {
2122 			ret = -EINVAL;
2123 			break;
2124 		}
2125 
2126 		/*
2127 		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
2128 		 * lock, we already woke the top_waiter.  If not, it will be
2129 		 * woken by futex_unlock_pi().
2130 		 */
2131 		if (++task_count <= nr_wake && !requeue_pi) {
2132 			mark_wake_futex(&wake_q, this);
2133 			continue;
2134 		}
2135 
2136 		/* Ensure we requeue to the expected futex for requeue_pi. */
2137 		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
2138 			ret = -EINVAL;
2139 			break;
2140 		}
2141 
2142 		/*
2143 		 * Requeue nr_requeue waiters and possibly one more in the case
2144 		 * of requeue_pi if we couldn't acquire the lock atomically.
2145 		 */
2146 		if (requeue_pi) {
2147 			/*
2148 			 * Prepare the waiter to take the rt_mutex. Take a
2149 			 * refcount on the pi_state and store the pointer in
2150 			 * the futex_q object of the waiter.
2151 			 */
2152 			get_pi_state(pi_state);
2153 			this->pi_state = pi_state;
2154 			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
2155 							this->rt_waiter,
2156 							this->task);
2157 			if (ret == 1) {
2158 				/*
2159 				 * We got the lock. We do neither drop the
2160 				 * refcount on pi_state nor clear
2161 				 * this->pi_state because the waiter needs the
2162 				 * pi_state for cleaning up the user space
2163 				 * value. It will drop the refcount after
2164 				 * doing so.
2165 				 */
2166 				requeue_pi_wake_futex(this, &key2, hb2);
2167 				drop_count++;
2168 				continue;
2169 			} else if (ret) {
2170 				/*
2171 				 * rt_mutex_start_proxy_lock() detected a
2172 				 * potential deadlock when we tried to queue
2173 				 * that waiter. Drop the pi_state reference
2174 				 * which we took above and remove the pointer
2175 				 * to the state from the waiters futex_q
2176 				 * object.
2177 				 */
2178 				this->pi_state = NULL;
2179 				put_pi_state(pi_state);
2180 				/*
2181 				 * We stop queueing more waiters and let user
2182 				 * space deal with the mess.
2183 				 */
2184 				break;
2185 			}
2186 		}
2187 		requeue_futex(this, hb1, hb2, &key2);
2188 		drop_count++;
2189 	}
2190 
2191 	/*
2192 	 * We took an extra initial reference to the pi_state either
2193 	 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2194 	 * need to drop it here again.
2195 	 */
2196 	put_pi_state(pi_state);
2197 
2198 out_unlock:
2199 	double_unlock_hb(hb1, hb2);
2200 	wake_up_q(&wake_q);
2201 	hb_waiters_dec(hb2);
2202 
2203 	/*
2204 	 * drop_futex_key_refs() must be called outside the spinlocks. During
2205 	 * the requeue we moved futex_q's from the hash bucket at key1 to the
2206 	 * one at key2 and updated their key pointer.  We no longer need to
2207 	 * hold the references to key1.
2208 	 */
2209 	while (--drop_count >= 0)
2210 		drop_futex_key_refs(&key1);
2211 
2212 out_put_keys:
2213 	put_futex_key(&key2);
2214 out_put_key1:
2215 	put_futex_key(&key1);
2216 out:
2217 	return ret ? ret : task_count;
2218 }
2219 
2220 /* The key must be already stored in q->key. */
queue_lock(struct futex_q * q)2221 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2222 	__acquires(&hb->lock)
2223 {
2224 	struct futex_hash_bucket *hb;
2225 
2226 	hb = hash_futex(&q->key);
2227 
2228 	/*
2229 	 * Increment the counter before taking the lock so that
2230 	 * a potential waker won't miss a to-be-slept task that is
2231 	 * waiting for the spinlock. This is safe as all queue_lock()
2232 	 * users end up calling queue_me(). Similarly, for housekeeping,
2233 	 * decrement the counter at queue_unlock() when some error has
2234 	 * occurred and we don't end up adding the task to the list.
2235 	 */
2236 	hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2237 
2238 	q->lock_ptr = &hb->lock;
2239 
2240 	spin_lock(&hb->lock);
2241 	return hb;
2242 }
2243 
2244 static inline void
queue_unlock(struct futex_hash_bucket * hb)2245 queue_unlock(struct futex_hash_bucket *hb)
2246 	__releases(&hb->lock)
2247 {
2248 	spin_unlock(&hb->lock);
2249 	hb_waiters_dec(hb);
2250 }
2251 
__queue_me(struct futex_q * q,struct futex_hash_bucket * hb)2252 static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2253 {
2254 	int prio;
2255 
2256 	/*
2257 	 * The priority used to register this element is
2258 	 * - either the real thread-priority for the real-time threads
2259 	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2260 	 * - or MAX_RT_PRIO for non-RT threads.
2261 	 * Thus, all RT-threads are woken first in priority order, and
2262 	 * the others are woken last, in FIFO order.
2263 	 */
2264 	prio = min(current->normal_prio, MAX_RT_PRIO);
2265 
2266 	plist_node_init(&q->list, prio);
2267 	plist_add(&q->list, &hb->chain);
2268 	q->task = current;
2269 }
2270 
2271 /**
2272  * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2273  * @q:	The futex_q to enqueue
2274  * @hb:	The destination hash bucket
2275  *
2276  * The hb->lock must be held by the caller, and is released here. A call to
2277  * queue_me() is typically paired with exactly one call to unqueue_me().  The
2278  * exceptions involve the PI related operations, which may use unqueue_me_pi()
2279  * or nothing if the unqueue is done as part of the wake process and the unqueue
2280  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2281  * an example).
2282  */
queue_me(struct futex_q * q,struct futex_hash_bucket * hb)2283 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2284 	__releases(&hb->lock)
2285 {
2286 	__queue_me(q, hb);
2287 	spin_unlock(&hb->lock);
2288 }
2289 
2290 /**
2291  * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2292  * @q:	The futex_q to unqueue
2293  *
2294  * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2295  * be paired with exactly one earlier call to queue_me().
2296  *
2297  * Return:
2298  *  - 1 - if the futex_q was still queued (and we removed unqueued it);
2299  *  - 0 - if the futex_q was already removed by the waking thread
2300  */
unqueue_me(struct futex_q * q)2301 static int unqueue_me(struct futex_q *q)
2302 {
2303 	spinlock_t *lock_ptr;
2304 	int ret = 0;
2305 
2306 	/* In the common case we don't take the spinlock, which is nice. */
2307 retry:
2308 	/*
2309 	 * q->lock_ptr can change between this read and the following spin_lock.
2310 	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2311 	 * optimizing lock_ptr out of the logic below.
2312 	 */
2313 	lock_ptr = READ_ONCE(q->lock_ptr);
2314 	if (lock_ptr != NULL) {
2315 		spin_lock(lock_ptr);
2316 		/*
2317 		 * q->lock_ptr can change between reading it and
2318 		 * spin_lock(), causing us to take the wrong lock.  This
2319 		 * corrects the race condition.
2320 		 *
2321 		 * Reasoning goes like this: if we have the wrong lock,
2322 		 * q->lock_ptr must have changed (maybe several times)
2323 		 * between reading it and the spin_lock().  It can
2324 		 * change again after the spin_lock() but only if it was
2325 		 * already changed before the spin_lock().  It cannot,
2326 		 * however, change back to the original value.  Therefore
2327 		 * we can detect whether we acquired the correct lock.
2328 		 */
2329 		if (unlikely(lock_ptr != q->lock_ptr)) {
2330 			spin_unlock(lock_ptr);
2331 			goto retry;
2332 		}
2333 		__unqueue_futex(q);
2334 
2335 		BUG_ON(q->pi_state);
2336 
2337 		spin_unlock(lock_ptr);
2338 		ret = 1;
2339 	}
2340 
2341 	drop_futex_key_refs(&q->key);
2342 	return ret;
2343 }
2344 
2345 /*
2346  * PI futexes can not be requeued and must remove themself from the
2347  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2348  * and dropped here.
2349  */
unqueue_me_pi(struct futex_q * q)2350 static void unqueue_me_pi(struct futex_q *q)
2351 	__releases(q->lock_ptr)
2352 {
2353 	__unqueue_futex(q);
2354 
2355 	BUG_ON(!q->pi_state);
2356 	put_pi_state(q->pi_state);
2357 	q->pi_state = NULL;
2358 
2359 	spin_unlock(q->lock_ptr);
2360 }
2361 
fixup_pi_state_owner(u32 __user * uaddr,struct futex_q * q,struct task_struct * argowner)2362 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2363 				struct task_struct *argowner)
2364 {
2365 	struct futex_pi_state *pi_state = q->pi_state;
2366 	u32 uval, uninitialized_var(curval), newval;
2367 	struct task_struct *oldowner, *newowner;
2368 	u32 newtid;
2369 	int ret, err = 0;
2370 
2371 	lockdep_assert_held(q->lock_ptr);
2372 
2373 	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2374 
2375 	oldowner = pi_state->owner;
2376 
2377 	/*
2378 	 * We are here because either:
2379 	 *
2380 	 *  - we stole the lock and pi_state->owner needs updating to reflect
2381 	 *    that (@argowner == current),
2382 	 *
2383 	 * or:
2384 	 *
2385 	 *  - someone stole our lock and we need to fix things to point to the
2386 	 *    new owner (@argowner == NULL).
2387 	 *
2388 	 * Either way, we have to replace the TID in the user space variable.
2389 	 * This must be atomic as we have to preserve the owner died bit here.
2390 	 *
2391 	 * Note: We write the user space value _before_ changing the pi_state
2392 	 * because we can fault here. Imagine swapped out pages or a fork
2393 	 * that marked all the anonymous memory readonly for cow.
2394 	 *
2395 	 * Modifying pi_state _before_ the user space value would leave the
2396 	 * pi_state in an inconsistent state when we fault here, because we
2397 	 * need to drop the locks to handle the fault. This might be observed
2398 	 * in the PID check in lookup_pi_state.
2399 	 */
2400 retry:
2401 	if (!argowner) {
2402 		if (oldowner != current) {
2403 			/*
2404 			 * We raced against a concurrent self; things are
2405 			 * already fixed up. Nothing to do.
2406 			 */
2407 			ret = 0;
2408 			goto out_unlock;
2409 		}
2410 
2411 		if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2412 			/* We got the lock after all, nothing to fix. */
2413 			ret = 0;
2414 			goto out_unlock;
2415 		}
2416 
2417 		/*
2418 		 * Since we just failed the trylock; there must be an owner.
2419 		 */
2420 		newowner = rt_mutex_owner(&pi_state->pi_mutex);
2421 		BUG_ON(!newowner);
2422 	} else {
2423 		WARN_ON_ONCE(argowner != current);
2424 		if (oldowner == current) {
2425 			/*
2426 			 * We raced against a concurrent self; things are
2427 			 * already fixed up. Nothing to do.
2428 			 */
2429 			ret = 0;
2430 			goto out_unlock;
2431 		}
2432 		newowner = argowner;
2433 	}
2434 
2435 	newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2436 	/* Owner died? */
2437 	if (!pi_state->owner)
2438 		newtid |= FUTEX_OWNER_DIED;
2439 
2440 	err = get_futex_value_locked(&uval, uaddr);
2441 	if (err)
2442 		goto handle_err;
2443 
2444 	for (;;) {
2445 		newval = (uval & FUTEX_OWNER_DIED) | newtid;
2446 
2447 		err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
2448 		if (err)
2449 			goto handle_err;
2450 
2451 		if (curval == uval)
2452 			break;
2453 		uval = curval;
2454 	}
2455 
2456 	/*
2457 	 * We fixed up user space. Now we need to fix the pi_state
2458 	 * itself.
2459 	 */
2460 	if (pi_state->owner != NULL) {
2461 		raw_spin_lock(&pi_state->owner->pi_lock);
2462 		WARN_ON(list_empty(&pi_state->list));
2463 		list_del_init(&pi_state->list);
2464 		raw_spin_unlock(&pi_state->owner->pi_lock);
2465 	}
2466 
2467 	pi_state->owner = newowner;
2468 
2469 	raw_spin_lock(&newowner->pi_lock);
2470 	WARN_ON(!list_empty(&pi_state->list));
2471 	list_add(&pi_state->list, &newowner->pi_state_list);
2472 	raw_spin_unlock(&newowner->pi_lock);
2473 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2474 
2475 	return 0;
2476 
2477 	/*
2478 	 * In order to reschedule or handle a page fault, we need to drop the
2479 	 * locks here. In the case of a fault, this gives the other task
2480 	 * (either the highest priority waiter itself or the task which stole
2481 	 * the rtmutex) the chance to try the fixup of the pi_state. So once we
2482 	 * are back from handling the fault we need to check the pi_state after
2483 	 * reacquiring the locks and before trying to do another fixup. When
2484 	 * the fixup has been done already we simply return.
2485 	 *
2486 	 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2487 	 * drop hb->lock since the caller owns the hb -> futex_q relation.
2488 	 * Dropping the pi_mutex->wait_lock requires the state revalidate.
2489 	 */
2490 handle_err:
2491 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2492 	spin_unlock(q->lock_ptr);
2493 
2494 	switch (err) {
2495 	case -EFAULT:
2496 		ret = fault_in_user_writeable(uaddr);
2497 		break;
2498 
2499 	case -EAGAIN:
2500 		cond_resched();
2501 		ret = 0;
2502 		break;
2503 
2504 	default:
2505 		WARN_ON_ONCE(1);
2506 		ret = err;
2507 		break;
2508 	}
2509 
2510 	spin_lock(q->lock_ptr);
2511 	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2512 
2513 	/*
2514 	 * Check if someone else fixed it for us:
2515 	 */
2516 	if (pi_state->owner != oldowner) {
2517 		ret = 0;
2518 		goto out_unlock;
2519 	}
2520 
2521 	if (ret)
2522 		goto out_unlock;
2523 
2524 	goto retry;
2525 
2526 out_unlock:
2527 	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2528 	return ret;
2529 }
2530 
2531 static long futex_wait_restart(struct restart_block *restart);
2532 
2533 /**
2534  * fixup_owner() - Post lock pi_state and corner case management
2535  * @uaddr:	user address of the futex
2536  * @q:		futex_q (contains pi_state and access to the rt_mutex)
2537  * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
2538  *
2539  * After attempting to lock an rt_mutex, this function is called to cleanup
2540  * the pi_state owner as well as handle race conditions that may allow us to
2541  * acquire the lock. Must be called with the hb lock held.
2542  *
2543  * Return:
2544  *  -  1 - success, lock taken;
2545  *  -  0 - success, lock not taken;
2546  *  - <0 - on error (-EFAULT)
2547  */
fixup_owner(u32 __user * uaddr,struct futex_q * q,int locked)2548 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2549 {
2550 	int ret = 0;
2551 
2552 	if (locked) {
2553 		/*
2554 		 * Got the lock. We might not be the anticipated owner if we
2555 		 * did a lock-steal - fix up the PI-state in that case:
2556 		 *
2557 		 * Speculative pi_state->owner read (we don't hold wait_lock);
2558 		 * since we own the lock pi_state->owner == current is the
2559 		 * stable state, anything else needs more attention.
2560 		 */
2561 		if (q->pi_state->owner != current)
2562 			ret = fixup_pi_state_owner(uaddr, q, current);
2563 		goto out;
2564 	}
2565 
2566 	/*
2567 	 * If we didn't get the lock; check if anybody stole it from us. In
2568 	 * that case, we need to fix up the uval to point to them instead of
2569 	 * us, otherwise bad things happen. [10]
2570 	 *
2571 	 * Another speculative read; pi_state->owner == current is unstable
2572 	 * but needs our attention.
2573 	 */
2574 	if (q->pi_state->owner == current) {
2575 		ret = fixup_pi_state_owner(uaddr, q, NULL);
2576 		goto out;
2577 	}
2578 
2579 	/*
2580 	 * Paranoia check. If we did not take the lock, then we should not be
2581 	 * the owner of the rt_mutex.
2582 	 */
2583 	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
2584 		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2585 				"pi-state %p\n", ret,
2586 				q->pi_state->pi_mutex.owner,
2587 				q->pi_state->owner);
2588 	}
2589 
2590 out:
2591 	return ret ? ret : locked;
2592 }
2593 
2594 /**
2595  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2596  * @hb:		the futex hash bucket, must be locked by the caller
2597  * @q:		the futex_q to queue up on
2598  * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
2599  */
futex_wait_queue_me(struct futex_hash_bucket * hb,struct futex_q * q,struct hrtimer_sleeper * timeout)2600 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2601 				struct hrtimer_sleeper *timeout)
2602 {
2603 	/*
2604 	 * The task state is guaranteed to be set before another task can
2605 	 * wake it. set_current_state() is implemented using smp_store_mb() and
2606 	 * queue_me() calls spin_unlock() upon completion, both serializing
2607 	 * access to the hash list and forcing another memory barrier.
2608 	 */
2609 	set_current_state(TASK_INTERRUPTIBLE);
2610 	queue_me(q, hb);
2611 
2612 	/* Arm the timer */
2613 	if (timeout)
2614 		hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
2615 
2616 	/*
2617 	 * If we have been removed from the hash list, then another task
2618 	 * has tried to wake us, and we can skip the call to schedule().
2619 	 */
2620 	if (likely(!plist_node_empty(&q->list))) {
2621 		/*
2622 		 * If the timer has already expired, current will already be
2623 		 * flagged for rescheduling. Only call schedule if there
2624 		 * is no timeout, or if it has yet to expire.
2625 		 */
2626 		if (!timeout || timeout->task)
2627 			freezable_schedule();
2628 	}
2629 	__set_current_state(TASK_RUNNING);
2630 }
2631 
2632 /**
2633  * futex_wait_setup() - Prepare to wait on a futex
2634  * @uaddr:	the futex userspace address
2635  * @val:	the expected value
2636  * @flags:	futex flags (FLAGS_SHARED, etc.)
2637  * @q:		the associated futex_q
2638  * @hb:		storage for hash_bucket pointer to be returned to caller
2639  *
2640  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
2641  * compare it with the expected value.  Handle atomic faults internally.
2642  * Return with the hb lock held and a q.key reference on success, and unlocked
2643  * with no q.key reference on failure.
2644  *
2645  * Return:
2646  *  -  0 - uaddr contains val and hb has been locked;
2647  *  - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2648  */
futex_wait_setup(u32 __user * uaddr,u32 val,unsigned int flags,struct futex_q * q,struct futex_hash_bucket ** hb)2649 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2650 			   struct futex_q *q, struct futex_hash_bucket **hb)
2651 {
2652 	u32 uval;
2653 	int ret;
2654 
2655 	/*
2656 	 * Access the page AFTER the hash-bucket is locked.
2657 	 * Order is important:
2658 	 *
2659 	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2660 	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
2661 	 *
2662 	 * The basic logical guarantee of a futex is that it blocks ONLY
2663 	 * if cond(var) is known to be true at the time of blocking, for
2664 	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
2665 	 * would open a race condition where we could block indefinitely with
2666 	 * cond(var) false, which would violate the guarantee.
2667 	 *
2668 	 * On the other hand, we insert q and release the hash-bucket only
2669 	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
2670 	 * absorb a wakeup if *uaddr does not match the desired values
2671 	 * while the syscall executes.
2672 	 */
2673 retry:
2674 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2675 	if (unlikely(ret != 0))
2676 		return ret;
2677 
2678 retry_private:
2679 	*hb = queue_lock(q);
2680 
2681 	ret = get_futex_value_locked(&uval, uaddr);
2682 
2683 	if (ret) {
2684 		queue_unlock(*hb);
2685 
2686 		ret = get_user(uval, uaddr);
2687 		if (ret)
2688 			goto out;
2689 
2690 		if (!(flags & FLAGS_SHARED))
2691 			goto retry_private;
2692 
2693 		put_futex_key(&q->key);
2694 		goto retry;
2695 	}
2696 
2697 	if (uval != val) {
2698 		queue_unlock(*hb);
2699 		ret = -EWOULDBLOCK;
2700 	}
2701 
2702 out:
2703 	if (ret)
2704 		put_futex_key(&q->key);
2705 	return ret;
2706 }
2707 
futex_wait(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset)2708 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2709 		      ktime_t *abs_time, u32 bitset)
2710 {
2711 	struct hrtimer_sleeper timeout, *to;
2712 	struct restart_block *restart;
2713 	struct futex_hash_bucket *hb;
2714 	struct futex_q q = futex_q_init;
2715 	int ret;
2716 
2717 	if (!bitset)
2718 		return -EINVAL;
2719 	q.bitset = bitset;
2720 
2721 	to = futex_setup_timer(abs_time, &timeout, flags,
2722 			       current->timer_slack_ns);
2723 retry:
2724 	/*
2725 	 * Prepare to wait on uaddr. On success, holds hb lock and increments
2726 	 * q.key refs.
2727 	 */
2728 	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2729 	if (ret)
2730 		goto out;
2731 
2732 	/* queue_me and wait for wakeup, timeout, or a signal. */
2733 	futex_wait_queue_me(hb, &q, to);
2734 
2735 	/* If we were woken (and unqueued), we succeeded, whatever. */
2736 	ret = 0;
2737 	/* unqueue_me() drops q.key ref */
2738 	if (!unqueue_me(&q))
2739 		goto out;
2740 	ret = -ETIMEDOUT;
2741 	if (to && !to->task)
2742 		goto out;
2743 
2744 	/*
2745 	 * We expect signal_pending(current), but we might be the
2746 	 * victim of a spurious wakeup as well.
2747 	 */
2748 	if (!signal_pending(current))
2749 		goto retry;
2750 
2751 	ret = -ERESTARTSYS;
2752 	if (!abs_time)
2753 		goto out;
2754 
2755 	restart = &current->restart_block;
2756 	restart->fn = futex_wait_restart;
2757 	restart->futex.uaddr = uaddr;
2758 	restart->futex.val = val;
2759 	restart->futex.time = *abs_time;
2760 	restart->futex.bitset = bitset;
2761 	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2762 
2763 	ret = -ERESTART_RESTARTBLOCK;
2764 
2765 out:
2766 	if (to) {
2767 		hrtimer_cancel(&to->timer);
2768 		destroy_hrtimer_on_stack(&to->timer);
2769 	}
2770 	return ret;
2771 }
2772 
2773 
futex_wait_restart(struct restart_block * restart)2774 static long futex_wait_restart(struct restart_block *restart)
2775 {
2776 	u32 __user *uaddr = restart->futex.uaddr;
2777 	ktime_t t, *tp = NULL;
2778 
2779 	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2780 		t = restart->futex.time;
2781 		tp = &t;
2782 	}
2783 	restart->fn = do_no_restart_syscall;
2784 
2785 	return (long)futex_wait(uaddr, restart->futex.flags,
2786 				restart->futex.val, tp, restart->futex.bitset);
2787 }
2788 
2789 
2790 /*
2791  * Userspace tried a 0 -> TID atomic transition of the futex value
2792  * and failed. The kernel side here does the whole locking operation:
2793  * if there are waiters then it will block as a consequence of relying
2794  * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2795  * a 0 value of the futex too.).
2796  *
2797  * Also serves as futex trylock_pi()'ing, and due semantics.
2798  */
futex_lock_pi(u32 __user * uaddr,unsigned int flags,ktime_t * time,int trylock)2799 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2800 			 ktime_t *time, int trylock)
2801 {
2802 	struct hrtimer_sleeper timeout, *to;
2803 	struct futex_pi_state *pi_state = NULL;
2804 	struct rt_mutex_waiter rt_waiter;
2805 	struct futex_hash_bucket *hb;
2806 	struct futex_q q = futex_q_init;
2807 	int res, ret;
2808 
2809 	if (!IS_ENABLED(CONFIG_FUTEX_PI))
2810 		return -ENOSYS;
2811 
2812 	if (refill_pi_state_cache())
2813 		return -ENOMEM;
2814 
2815 	to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
2816 
2817 retry:
2818 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2819 	if (unlikely(ret != 0))
2820 		goto out;
2821 
2822 retry_private:
2823 	hb = queue_lock(&q);
2824 
2825 	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2826 	if (unlikely(ret)) {
2827 		/*
2828 		 * Atomic work succeeded and we got the lock,
2829 		 * or failed. Either way, we do _not_ block.
2830 		 */
2831 		switch (ret) {
2832 		case 1:
2833 			/* We got the lock. */
2834 			ret = 0;
2835 			goto out_unlock_put_key;
2836 		case -EFAULT:
2837 			goto uaddr_faulted;
2838 		case -EAGAIN:
2839 			/*
2840 			 * Two reasons for this:
2841 			 * - Task is exiting and we just wait for the
2842 			 *   exit to complete.
2843 			 * - The user space value changed.
2844 			 */
2845 			queue_unlock(hb);
2846 			put_futex_key(&q.key);
2847 			cond_resched();
2848 			goto retry;
2849 		default:
2850 			goto out_unlock_put_key;
2851 		}
2852 	}
2853 
2854 	WARN_ON(!q.pi_state);
2855 
2856 	/*
2857 	 * Only actually queue now that the atomic ops are done:
2858 	 */
2859 	__queue_me(&q, hb);
2860 
2861 	if (trylock) {
2862 		ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2863 		/* Fixup the trylock return value: */
2864 		ret = ret ? 0 : -EWOULDBLOCK;
2865 		goto no_block;
2866 	}
2867 
2868 	rt_mutex_init_waiter(&rt_waiter);
2869 
2870 	/*
2871 	 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2872 	 * hold it while doing rt_mutex_start_proxy(), because then it will
2873 	 * include hb->lock in the blocking chain, even through we'll not in
2874 	 * fact hold it while blocking. This will lead it to report -EDEADLK
2875 	 * and BUG when futex_unlock_pi() interleaves with this.
2876 	 *
2877 	 * Therefore acquire wait_lock while holding hb->lock, but drop the
2878 	 * latter before calling __rt_mutex_start_proxy_lock(). This
2879 	 * interleaves with futex_unlock_pi() -- which does a similar lock
2880 	 * handoff -- such that the latter can observe the futex_q::pi_state
2881 	 * before __rt_mutex_start_proxy_lock() is done.
2882 	 */
2883 	raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2884 	spin_unlock(q.lock_ptr);
2885 	/*
2886 	 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
2887 	 * such that futex_unlock_pi() is guaranteed to observe the waiter when
2888 	 * it sees the futex_q::pi_state.
2889 	 */
2890 	ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2891 	raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2892 
2893 	if (ret) {
2894 		if (ret == 1)
2895 			ret = 0;
2896 		goto cleanup;
2897 	}
2898 
2899 	if (unlikely(to))
2900 		hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
2901 
2902 	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2903 
2904 cleanup:
2905 	spin_lock(q.lock_ptr);
2906 	/*
2907 	 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2908 	 * first acquire the hb->lock before removing the lock from the
2909 	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
2910 	 * lists consistent.
2911 	 *
2912 	 * In particular; it is important that futex_unlock_pi() can not
2913 	 * observe this inconsistency.
2914 	 */
2915 	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
2916 		ret = 0;
2917 
2918 no_block:
2919 	/*
2920 	 * Fixup the pi_state owner and possibly acquire the lock if we
2921 	 * haven't already.
2922 	 */
2923 	res = fixup_owner(uaddr, &q, !ret);
2924 	/*
2925 	 * If fixup_owner() returned an error, proprogate that.  If it acquired
2926 	 * the lock, clear our -ETIMEDOUT or -EINTR.
2927 	 */
2928 	if (res)
2929 		ret = (res < 0) ? res : 0;
2930 
2931 	/*
2932 	 * If fixup_owner() faulted and was unable to handle the fault, unlock
2933 	 * it and return the fault to userspace.
2934 	 */
2935 	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
2936 		pi_state = q.pi_state;
2937 		get_pi_state(pi_state);
2938 	}
2939 
2940 	/* Unqueue and drop the lock */
2941 	unqueue_me_pi(&q);
2942 
2943 	if (pi_state) {
2944 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
2945 		put_pi_state(pi_state);
2946 	}
2947 
2948 	goto out_put_key;
2949 
2950 out_unlock_put_key:
2951 	queue_unlock(hb);
2952 
2953 out_put_key:
2954 	put_futex_key(&q.key);
2955 out:
2956 	if (to) {
2957 		hrtimer_cancel(&to->timer);
2958 		destroy_hrtimer_on_stack(&to->timer);
2959 	}
2960 	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2961 
2962 uaddr_faulted:
2963 	queue_unlock(hb);
2964 
2965 	ret = fault_in_user_writeable(uaddr);
2966 	if (ret)
2967 		goto out_put_key;
2968 
2969 	if (!(flags & FLAGS_SHARED))
2970 		goto retry_private;
2971 
2972 	put_futex_key(&q.key);
2973 	goto retry;
2974 }
2975 
2976 /*
2977  * Userspace attempted a TID -> 0 atomic transition, and failed.
2978  * This is the in-kernel slowpath: we look up the PI state (if any),
2979  * and do the rt-mutex unlock.
2980  */
futex_unlock_pi(u32 __user * uaddr,unsigned int flags)2981 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2982 {
2983 	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2984 	union futex_key key = FUTEX_KEY_INIT;
2985 	struct futex_hash_bucket *hb;
2986 	struct futex_q *top_waiter;
2987 	int ret;
2988 
2989 	if (!IS_ENABLED(CONFIG_FUTEX_PI))
2990 		return -ENOSYS;
2991 
2992 retry:
2993 	if (get_user(uval, uaddr))
2994 		return -EFAULT;
2995 	/*
2996 	 * We release only a lock we actually own:
2997 	 */
2998 	if ((uval & FUTEX_TID_MASK) != vpid)
2999 		return -EPERM;
3000 
3001 	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
3002 	if (ret)
3003 		return ret;
3004 
3005 	hb = hash_futex(&key);
3006 	spin_lock(&hb->lock);
3007 
3008 	/*
3009 	 * Check waiters first. We do not trust user space values at
3010 	 * all and we at least want to know if user space fiddled
3011 	 * with the futex value instead of blindly unlocking.
3012 	 */
3013 	top_waiter = futex_top_waiter(hb, &key);
3014 	if (top_waiter) {
3015 		struct futex_pi_state *pi_state = top_waiter->pi_state;
3016 
3017 		ret = -EINVAL;
3018 		if (!pi_state)
3019 			goto out_unlock;
3020 
3021 		/*
3022 		 * If current does not own the pi_state then the futex is
3023 		 * inconsistent and user space fiddled with the futex value.
3024 		 */
3025 		if (pi_state->owner != current)
3026 			goto out_unlock;
3027 
3028 		get_pi_state(pi_state);
3029 		/*
3030 		 * By taking wait_lock while still holding hb->lock, we ensure
3031 		 * there is no point where we hold neither; and therefore
3032 		 * wake_futex_pi() must observe a state consistent with what we
3033 		 * observed.
3034 		 *
3035 		 * In particular; this forces __rt_mutex_start_proxy() to
3036 		 * complete such that we're guaranteed to observe the
3037 		 * rt_waiter. Also see the WARN in wake_futex_pi().
3038 		 */
3039 		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3040 		spin_unlock(&hb->lock);
3041 
3042 		/* drops pi_state->pi_mutex.wait_lock */
3043 		ret = wake_futex_pi(uaddr, uval, pi_state);
3044 
3045 		put_pi_state(pi_state);
3046 
3047 		/*
3048 		 * Success, we're done! No tricky corner cases.
3049 		 */
3050 		if (!ret)
3051 			goto out_putkey;
3052 		/*
3053 		 * The atomic access to the futex value generated a
3054 		 * pagefault, so retry the user-access and the wakeup:
3055 		 */
3056 		if (ret == -EFAULT)
3057 			goto pi_faulted;
3058 		/*
3059 		 * A unconditional UNLOCK_PI op raced against a waiter
3060 		 * setting the FUTEX_WAITERS bit. Try again.
3061 		 */
3062 		if (ret == -EAGAIN)
3063 			goto pi_retry;
3064 		/*
3065 		 * wake_futex_pi has detected invalid state. Tell user
3066 		 * space.
3067 		 */
3068 		goto out_putkey;
3069 	}
3070 
3071 	/*
3072 	 * We have no kernel internal state, i.e. no waiters in the
3073 	 * kernel. Waiters which are about to queue themselves are stuck
3074 	 * on hb->lock. So we can safely ignore them. We do neither
3075 	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
3076 	 * owner.
3077 	 */
3078 	if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3079 		spin_unlock(&hb->lock);
3080 		switch (ret) {
3081 		case -EFAULT:
3082 			goto pi_faulted;
3083 
3084 		case -EAGAIN:
3085 			goto pi_retry;
3086 
3087 		default:
3088 			WARN_ON_ONCE(1);
3089 			goto out_putkey;
3090 		}
3091 	}
3092 
3093 	/*
3094 	 * If uval has changed, let user space handle it.
3095 	 */
3096 	ret = (curval == uval) ? 0 : -EAGAIN;
3097 
3098 out_unlock:
3099 	spin_unlock(&hb->lock);
3100 out_putkey:
3101 	put_futex_key(&key);
3102 	return ret;
3103 
3104 pi_retry:
3105 	put_futex_key(&key);
3106 	cond_resched();
3107 	goto retry;
3108 
3109 pi_faulted:
3110 	put_futex_key(&key);
3111 
3112 	ret = fault_in_user_writeable(uaddr);
3113 	if (!ret)
3114 		goto retry;
3115 
3116 	return ret;
3117 }
3118 
3119 /**
3120  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
3121  * @hb:		the hash_bucket futex_q was original enqueued on
3122  * @q:		the futex_q woken while waiting to be requeued
3123  * @key2:	the futex_key of the requeue target futex
3124  * @timeout:	the timeout associated with the wait (NULL if none)
3125  *
3126  * Detect if the task was woken on the initial futex as opposed to the requeue
3127  * target futex.  If so, determine if it was a timeout or a signal that caused
3128  * the wakeup and return the appropriate error code to the caller.  Must be
3129  * called with the hb lock held.
3130  *
3131  * Return:
3132  *  -  0 = no early wakeup detected;
3133  *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3134  */
3135 static inline
handle_early_requeue_pi_wakeup(struct futex_hash_bucket * hb,struct futex_q * q,union futex_key * key2,struct hrtimer_sleeper * timeout)3136 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
3137 				   struct futex_q *q, union futex_key *key2,
3138 				   struct hrtimer_sleeper *timeout)
3139 {
3140 	int ret = 0;
3141 
3142 	/*
3143 	 * With the hb lock held, we avoid races while we process the wakeup.
3144 	 * We only need to hold hb (and not hb2) to ensure atomicity as the
3145 	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
3146 	 * It can't be requeued from uaddr2 to something else since we don't
3147 	 * support a PI aware source futex for requeue.
3148 	 */
3149 	if (!match_futex(&q->key, key2)) {
3150 		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
3151 		/*
3152 		 * We were woken prior to requeue by a timeout or a signal.
3153 		 * Unqueue the futex_q and determine which it was.
3154 		 */
3155 		plist_del(&q->list, &hb->chain);
3156 		hb_waiters_dec(hb);
3157 
3158 		/* Handle spurious wakeups gracefully */
3159 		ret = -EWOULDBLOCK;
3160 		if (timeout && !timeout->task)
3161 			ret = -ETIMEDOUT;
3162 		else if (signal_pending(current))
3163 			ret = -ERESTARTNOINTR;
3164 	}
3165 	return ret;
3166 }
3167 
3168 /**
3169  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3170  * @uaddr:	the futex we initially wait on (non-pi)
3171  * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3172  *		the same type, no requeueing from private to shared, etc.
3173  * @val:	the expected value of uaddr
3174  * @abs_time:	absolute timeout
3175  * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
3176  * @uaddr2:	the pi futex we will take prior to returning to user-space
3177  *
3178  * The caller will wait on uaddr and will be requeued by futex_requeue() to
3179  * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
3180  * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3181  * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
3182  * without one, the pi logic would not know which task to boost/deboost, if
3183  * there was a need to.
3184  *
3185  * We call schedule in futex_wait_queue_me() when we enqueue and return there
3186  * via the following--
3187  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3188  * 2) wakeup on uaddr2 after a requeue
3189  * 3) signal
3190  * 4) timeout
3191  *
3192  * If 3, cleanup and return -ERESTARTNOINTR.
3193  *
3194  * If 2, we may then block on trying to take the rt_mutex and return via:
3195  * 5) successful lock
3196  * 6) signal
3197  * 7) timeout
3198  * 8) other lock acquisition failure
3199  *
3200  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3201  *
3202  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
3203  *
3204  * Return:
3205  *  -  0 - On success;
3206  *  - <0 - On error
3207  */
futex_wait_requeue_pi(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset,u32 __user * uaddr2)3208 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3209 				 u32 val, ktime_t *abs_time, u32 bitset,
3210 				 u32 __user *uaddr2)
3211 {
3212 	struct hrtimer_sleeper timeout, *to;
3213 	struct futex_pi_state *pi_state = NULL;
3214 	struct rt_mutex_waiter rt_waiter;
3215 	struct futex_hash_bucket *hb;
3216 	union futex_key key2 = FUTEX_KEY_INIT;
3217 	struct futex_q q = futex_q_init;
3218 	int res, ret;
3219 
3220 	if (!IS_ENABLED(CONFIG_FUTEX_PI))
3221 		return -ENOSYS;
3222 
3223 	if (uaddr == uaddr2)
3224 		return -EINVAL;
3225 
3226 	if (!bitset)
3227 		return -EINVAL;
3228 
3229 	to = futex_setup_timer(abs_time, &timeout, flags,
3230 			       current->timer_slack_ns);
3231 
3232 	/*
3233 	 * The waiter is allocated on our stack, manipulated by the requeue
3234 	 * code while we sleep on uaddr.
3235 	 */
3236 	rt_mutex_init_waiter(&rt_waiter);
3237 
3238 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3239 	if (unlikely(ret != 0))
3240 		goto out;
3241 
3242 	q.bitset = bitset;
3243 	q.rt_waiter = &rt_waiter;
3244 	q.requeue_pi_key = &key2;
3245 
3246 	/*
3247 	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3248 	 * count.
3249 	 */
3250 	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
3251 	if (ret)
3252 		goto out_key2;
3253 
3254 	/*
3255 	 * The check above which compares uaddrs is not sufficient for
3256 	 * shared futexes. We need to compare the keys:
3257 	 */
3258 	if (match_futex(&q.key, &key2)) {
3259 		queue_unlock(hb);
3260 		ret = -EINVAL;
3261 		goto out_put_keys;
3262 	}
3263 
3264 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
3265 	futex_wait_queue_me(hb, &q, to);
3266 
3267 	spin_lock(&hb->lock);
3268 	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
3269 	spin_unlock(&hb->lock);
3270 	if (ret)
3271 		goto out_put_keys;
3272 
3273 	/*
3274 	 * In order for us to be here, we know our q.key == key2, and since
3275 	 * we took the hb->lock above, we also know that futex_requeue() has
3276 	 * completed and we no longer have to concern ourselves with a wakeup
3277 	 * race with the atomic proxy lock acquisition by the requeue code. The
3278 	 * futex_requeue dropped our key1 reference and incremented our key2
3279 	 * reference count.
3280 	 */
3281 
3282 	/* Check if the requeue code acquired the second futex for us. */
3283 	if (!q.rt_waiter) {
3284 		/*
3285 		 * Got the lock. We might not be the anticipated owner if we
3286 		 * did a lock-steal - fix up the PI-state in that case.
3287 		 */
3288 		if (q.pi_state && (q.pi_state->owner != current)) {
3289 			spin_lock(q.lock_ptr);
3290 			ret = fixup_pi_state_owner(uaddr2, &q, current);
3291 			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3292 				pi_state = q.pi_state;
3293 				get_pi_state(pi_state);
3294 			}
3295 			/*
3296 			 * Drop the reference to the pi state which
3297 			 * the requeue_pi() code acquired for us.
3298 			 */
3299 			put_pi_state(q.pi_state);
3300 			spin_unlock(q.lock_ptr);
3301 		}
3302 	} else {
3303 		struct rt_mutex *pi_mutex;
3304 
3305 		/*
3306 		 * We have been woken up by futex_unlock_pi(), a timeout, or a
3307 		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
3308 		 * the pi_state.
3309 		 */
3310 		WARN_ON(!q.pi_state);
3311 		pi_mutex = &q.pi_state->pi_mutex;
3312 		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3313 
3314 		spin_lock(q.lock_ptr);
3315 		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
3316 			ret = 0;
3317 
3318 		debug_rt_mutex_free_waiter(&rt_waiter);
3319 		/*
3320 		 * Fixup the pi_state owner and possibly acquire the lock if we
3321 		 * haven't already.
3322 		 */
3323 		res = fixup_owner(uaddr2, &q, !ret);
3324 		/*
3325 		 * If fixup_owner() returned an error, proprogate that.  If it
3326 		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3327 		 */
3328 		if (res)
3329 			ret = (res < 0) ? res : 0;
3330 
3331 		/*
3332 		 * If fixup_pi_state_owner() faulted and was unable to handle
3333 		 * the fault, unlock the rt_mutex and return the fault to
3334 		 * userspace.
3335 		 */
3336 		if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3337 			pi_state = q.pi_state;
3338 			get_pi_state(pi_state);
3339 		}
3340 
3341 		/* Unqueue and drop the lock. */
3342 		unqueue_me_pi(&q);
3343 	}
3344 
3345 	if (pi_state) {
3346 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
3347 		put_pi_state(pi_state);
3348 	}
3349 
3350 	if (ret == -EINTR) {
3351 		/*
3352 		 * We've already been requeued, but cannot restart by calling
3353 		 * futex_lock_pi() directly. We could restart this syscall, but
3354 		 * it would detect that the user space "val" changed and return
3355 		 * -EWOULDBLOCK.  Save the overhead of the restart and return
3356 		 * -EWOULDBLOCK directly.
3357 		 */
3358 		ret = -EWOULDBLOCK;
3359 	}
3360 
3361 out_put_keys:
3362 	put_futex_key(&q.key);
3363 out_key2:
3364 	put_futex_key(&key2);
3365 
3366 out:
3367 	if (to) {
3368 		hrtimer_cancel(&to->timer);
3369 		destroy_hrtimer_on_stack(&to->timer);
3370 	}
3371 	return ret;
3372 }
3373 
3374 /*
3375  * Support for robust futexes: the kernel cleans up held futexes at
3376  * thread exit time.
3377  *
3378  * Implementation: user-space maintains a per-thread list of locks it
3379  * is holding. Upon do_exit(), the kernel carefully walks this list,
3380  * and marks all locks that are owned by this thread with the
3381  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3382  * always manipulated with the lock held, so the list is private and
3383  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3384  * field, to allow the kernel to clean up if the thread dies after
3385  * acquiring the lock, but just before it could have added itself to
3386  * the list. There can only be one such pending lock.
3387  */
3388 
3389 /**
3390  * sys_set_robust_list() - Set the robust-futex list head of a task
3391  * @head:	pointer to the list-head
3392  * @len:	length of the list-head, as userspace expects
3393  */
SYSCALL_DEFINE2(set_robust_list,struct robust_list_head __user *,head,size_t,len)3394 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3395 		size_t, len)
3396 {
3397 	if (!futex_cmpxchg_enabled)
3398 		return -ENOSYS;
3399 	/*
3400 	 * The kernel knows only one size for now:
3401 	 */
3402 	if (unlikely(len != sizeof(*head)))
3403 		return -EINVAL;
3404 
3405 	current->robust_list = head;
3406 
3407 	return 0;
3408 }
3409 
3410 /**
3411  * sys_get_robust_list() - Get the robust-futex list head of a task
3412  * @pid:	pid of the process [zero for current task]
3413  * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
3414  * @len_ptr:	pointer to a length field, the kernel fills in the header size
3415  */
SYSCALL_DEFINE3(get_robust_list,int,pid,struct robust_list_head __user * __user *,head_ptr,size_t __user *,len_ptr)3416 SYSCALL_DEFINE3(get_robust_list, int, pid,
3417 		struct robust_list_head __user * __user *, head_ptr,
3418 		size_t __user *, len_ptr)
3419 {
3420 	struct robust_list_head __user *head;
3421 	unsigned long ret;
3422 	struct task_struct *p;
3423 
3424 	if (!futex_cmpxchg_enabled)
3425 		return -ENOSYS;
3426 
3427 	rcu_read_lock();
3428 
3429 	ret = -ESRCH;
3430 	if (!pid)
3431 		p = current;
3432 	else {
3433 		p = find_task_by_vpid(pid);
3434 		if (!p)
3435 			goto err_unlock;
3436 	}
3437 
3438 	ret = -EPERM;
3439 	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3440 		goto err_unlock;
3441 
3442 	head = p->robust_list;
3443 	rcu_read_unlock();
3444 
3445 	if (put_user(sizeof(*head), len_ptr))
3446 		return -EFAULT;
3447 	return put_user(head, head_ptr);
3448 
3449 err_unlock:
3450 	rcu_read_unlock();
3451 
3452 	return ret;
3453 }
3454 
3455 /*
3456  * Process a futex-list entry, check whether it's owned by the
3457  * dying task, and do notification if so:
3458  */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,int pi)3459 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3460 {
3461 	u32 uval, uninitialized_var(nval), mval;
3462 	int err;
3463 
3464 	/* Futex address must be 32bit aligned */
3465 	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
3466 		return -1;
3467 
3468 retry:
3469 	if (get_user(uval, uaddr))
3470 		return -1;
3471 
3472 	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
3473 		return 0;
3474 
3475 	/*
3476 	 * Ok, this dying thread is truly holding a futex
3477 	 * of interest. Set the OWNER_DIED bit atomically
3478 	 * via cmpxchg, and if the value had FUTEX_WAITERS
3479 	 * set, wake up a waiter (if any). (We have to do a
3480 	 * futex_wake() even if OWNER_DIED is already set -
3481 	 * to handle the rare but possible case of recursive
3482 	 * thread-death.) The rest of the cleanup is done in
3483 	 * userspace.
3484 	 */
3485 	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3486 
3487 	/*
3488 	 * We are not holding a lock here, but we want to have
3489 	 * the pagefault_disable/enable() protection because
3490 	 * we want to handle the fault gracefully. If the
3491 	 * access fails we try to fault in the futex with R/W
3492 	 * verification via get_user_pages. get_user() above
3493 	 * does not guarantee R/W access. If that fails we
3494 	 * give up and leave the futex locked.
3495 	 */
3496 	if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
3497 		switch (err) {
3498 		case -EFAULT:
3499 			if (fault_in_user_writeable(uaddr))
3500 				return -1;
3501 			goto retry;
3502 
3503 		case -EAGAIN:
3504 			cond_resched();
3505 			goto retry;
3506 
3507 		default:
3508 			WARN_ON_ONCE(1);
3509 			return err;
3510 		}
3511 	}
3512 
3513 	if (nval != uval)
3514 		goto retry;
3515 
3516 	/*
3517 	 * Wake robust non-PI futexes here. The wakeup of
3518 	 * PI futexes happens in exit_pi_state():
3519 	 */
3520 	if (!pi && (uval & FUTEX_WAITERS))
3521 		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3522 
3523 	return 0;
3524 }
3525 
3526 /*
3527  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3528  */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)3529 static inline int fetch_robust_entry(struct robust_list __user **entry,
3530 				     struct robust_list __user * __user *head,
3531 				     unsigned int *pi)
3532 {
3533 	unsigned long uentry;
3534 
3535 	if (get_user(uentry, (unsigned long __user *)head))
3536 		return -EFAULT;
3537 
3538 	*entry = (void __user *)(uentry & ~1UL);
3539 	*pi = uentry & 1;
3540 
3541 	return 0;
3542 }
3543 
3544 /*
3545  * Walk curr->robust_list (very carefully, it's a userspace list!)
3546  * and mark any locks found there dead, and notify any waiters.
3547  *
3548  * We silently return on any sign of list-walking problem.
3549  */
exit_robust_list(struct task_struct * curr)3550 void exit_robust_list(struct task_struct *curr)
3551 {
3552 	struct robust_list_head __user *head = curr->robust_list;
3553 	struct robust_list __user *entry, *next_entry, *pending;
3554 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3555 	unsigned int uninitialized_var(next_pi);
3556 	unsigned long futex_offset;
3557 	int rc;
3558 
3559 	if (!futex_cmpxchg_enabled)
3560 		return;
3561 
3562 	/*
3563 	 * Fetch the list head (which was registered earlier, via
3564 	 * sys_set_robust_list()):
3565 	 */
3566 	if (fetch_robust_entry(&entry, &head->list.next, &pi))
3567 		return;
3568 	/*
3569 	 * Fetch the relative futex offset:
3570 	 */
3571 	if (get_user(futex_offset, &head->futex_offset))
3572 		return;
3573 	/*
3574 	 * Fetch any possibly pending lock-add first, and handle it
3575 	 * if it exists:
3576 	 */
3577 	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3578 		return;
3579 
3580 	next_entry = NULL;	/* avoid warning with gcc */
3581 	while (entry != &head->list) {
3582 		/*
3583 		 * Fetch the next entry in the list before calling
3584 		 * handle_futex_death:
3585 		 */
3586 		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3587 		/*
3588 		 * A pending lock might already be on the list, so
3589 		 * don't process it twice:
3590 		 */
3591 		if (entry != pending)
3592 			if (handle_futex_death((void __user *)entry + futex_offset,
3593 						curr, pi))
3594 				return;
3595 		if (rc)
3596 			return;
3597 		entry = next_entry;
3598 		pi = next_pi;
3599 		/*
3600 		 * Avoid excessively long or circular lists:
3601 		 */
3602 		if (!--limit)
3603 			break;
3604 
3605 		cond_resched();
3606 	}
3607 
3608 	if (pending)
3609 		handle_futex_death((void __user *)pending + futex_offset,
3610 				   curr, pip);
3611 }
3612 
do_futex(u32 __user * uaddr,int op,u32 val,ktime_t * timeout,u32 __user * uaddr2,u32 val2,u32 val3)3613 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3614 		u32 __user *uaddr2, u32 val2, u32 val3)
3615 {
3616 	int cmd = op & FUTEX_CMD_MASK;
3617 	unsigned int flags = 0;
3618 
3619 	if (!(op & FUTEX_PRIVATE_FLAG))
3620 		flags |= FLAGS_SHARED;
3621 
3622 	if (op & FUTEX_CLOCK_REALTIME) {
3623 		flags |= FLAGS_CLOCKRT;
3624 		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3625 		    cmd != FUTEX_WAIT_REQUEUE_PI)
3626 			return -ENOSYS;
3627 	}
3628 
3629 	switch (cmd) {
3630 	case FUTEX_LOCK_PI:
3631 	case FUTEX_UNLOCK_PI:
3632 	case FUTEX_TRYLOCK_PI:
3633 	case FUTEX_WAIT_REQUEUE_PI:
3634 	case FUTEX_CMP_REQUEUE_PI:
3635 		if (!futex_cmpxchg_enabled)
3636 			return -ENOSYS;
3637 	}
3638 
3639 	switch (cmd) {
3640 	case FUTEX_WAIT:
3641 		val3 = FUTEX_BITSET_MATCH_ANY;
3642 		/* fall through */
3643 	case FUTEX_WAIT_BITSET:
3644 		return futex_wait(uaddr, flags, val, timeout, val3);
3645 	case FUTEX_WAKE:
3646 		val3 = FUTEX_BITSET_MATCH_ANY;
3647 		/* fall through */
3648 	case FUTEX_WAKE_BITSET:
3649 		return futex_wake(uaddr, flags, val, val3);
3650 	case FUTEX_REQUEUE:
3651 		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3652 	case FUTEX_CMP_REQUEUE:
3653 		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3654 	case FUTEX_WAKE_OP:
3655 		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3656 	case FUTEX_LOCK_PI:
3657 		return futex_lock_pi(uaddr, flags, timeout, 0);
3658 	case FUTEX_UNLOCK_PI:
3659 		return futex_unlock_pi(uaddr, flags);
3660 	case FUTEX_TRYLOCK_PI:
3661 		return futex_lock_pi(uaddr, flags, NULL, 1);
3662 	case FUTEX_WAIT_REQUEUE_PI:
3663 		val3 = FUTEX_BITSET_MATCH_ANY;
3664 		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3665 					     uaddr2);
3666 	case FUTEX_CMP_REQUEUE_PI:
3667 		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3668 	}
3669 	return -ENOSYS;
3670 }
3671 
3672 
SYSCALL_DEFINE6(futex,u32 __user *,uaddr,int,op,u32,val,struct __kernel_timespec __user *,utime,u32 __user *,uaddr2,u32,val3)3673 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3674 		struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3675 		u32, val3)
3676 {
3677 	struct timespec64 ts;
3678 	ktime_t t, *tp = NULL;
3679 	u32 val2 = 0;
3680 	int cmd = op & FUTEX_CMD_MASK;
3681 
3682 	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3683 		      cmd == FUTEX_WAIT_BITSET ||
3684 		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3685 		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3686 			return -EFAULT;
3687 		if (get_timespec64(&ts, utime))
3688 			return -EFAULT;
3689 		if (!timespec64_valid(&ts))
3690 			return -EINVAL;
3691 
3692 		t = timespec64_to_ktime(ts);
3693 		if (cmd == FUTEX_WAIT)
3694 			t = ktime_add_safe(ktime_get(), t);
3695 		tp = &t;
3696 	}
3697 	/*
3698 	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3699 	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3700 	 */
3701 	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3702 	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3703 		val2 = (u32) (unsigned long) utime;
3704 
3705 	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3706 }
3707 
3708 #ifdef CONFIG_COMPAT
3709 /*
3710  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3711  */
3712 static inline int
compat_fetch_robust_entry(compat_uptr_t * uentry,struct robust_list __user ** entry,compat_uptr_t __user * head,unsigned int * pi)3713 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
3714 		   compat_uptr_t __user *head, unsigned int *pi)
3715 {
3716 	if (get_user(*uentry, head))
3717 		return -EFAULT;
3718 
3719 	*entry = compat_ptr((*uentry) & ~1);
3720 	*pi = (unsigned int)(*uentry) & 1;
3721 
3722 	return 0;
3723 }
3724 
futex_uaddr(struct robust_list __user * entry,compat_long_t futex_offset)3725 static void __user *futex_uaddr(struct robust_list __user *entry,
3726 				compat_long_t futex_offset)
3727 {
3728 	compat_uptr_t base = ptr_to_compat(entry);
3729 	void __user *uaddr = compat_ptr(base + futex_offset);
3730 
3731 	return uaddr;
3732 }
3733 
3734 /*
3735  * Walk curr->robust_list (very carefully, it's a userspace list!)
3736  * and mark any locks found there dead, and notify any waiters.
3737  *
3738  * We silently return on any sign of list-walking problem.
3739  */
compat_exit_robust_list(struct task_struct * curr)3740 void compat_exit_robust_list(struct task_struct *curr)
3741 {
3742 	struct compat_robust_list_head __user *head = curr->compat_robust_list;
3743 	struct robust_list __user *entry, *next_entry, *pending;
3744 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3745 	unsigned int uninitialized_var(next_pi);
3746 	compat_uptr_t uentry, next_uentry, upending;
3747 	compat_long_t futex_offset;
3748 	int rc;
3749 
3750 	if (!futex_cmpxchg_enabled)
3751 		return;
3752 
3753 	/*
3754 	 * Fetch the list head (which was registered earlier, via
3755 	 * sys_set_robust_list()):
3756 	 */
3757 	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
3758 		return;
3759 	/*
3760 	 * Fetch the relative futex offset:
3761 	 */
3762 	if (get_user(futex_offset, &head->futex_offset))
3763 		return;
3764 	/*
3765 	 * Fetch any possibly pending lock-add first, and handle it
3766 	 * if it exists:
3767 	 */
3768 	if (compat_fetch_robust_entry(&upending, &pending,
3769 			       &head->list_op_pending, &pip))
3770 		return;
3771 
3772 	next_entry = NULL;	/* avoid warning with gcc */
3773 	while (entry != (struct robust_list __user *) &head->list) {
3774 		/*
3775 		 * Fetch the next entry in the list before calling
3776 		 * handle_futex_death:
3777 		 */
3778 		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
3779 			(compat_uptr_t __user *)&entry->next, &next_pi);
3780 		/*
3781 		 * A pending lock might already be on the list, so
3782 		 * dont process it twice:
3783 		 */
3784 		if (entry != pending) {
3785 			void __user *uaddr = futex_uaddr(entry, futex_offset);
3786 
3787 			if (handle_futex_death(uaddr, curr, pi))
3788 				return;
3789 		}
3790 		if (rc)
3791 			return;
3792 		uentry = next_uentry;
3793 		entry = next_entry;
3794 		pi = next_pi;
3795 		/*
3796 		 * Avoid excessively long or circular lists:
3797 		 */
3798 		if (!--limit)
3799 			break;
3800 
3801 		cond_resched();
3802 	}
3803 	if (pending) {
3804 		void __user *uaddr = futex_uaddr(pending, futex_offset);
3805 
3806 		handle_futex_death(uaddr, curr, pip);
3807 	}
3808 }
3809 
COMPAT_SYSCALL_DEFINE2(set_robust_list,struct compat_robust_list_head __user *,head,compat_size_t,len)3810 COMPAT_SYSCALL_DEFINE2(set_robust_list,
3811 		struct compat_robust_list_head __user *, head,
3812 		compat_size_t, len)
3813 {
3814 	if (!futex_cmpxchg_enabled)
3815 		return -ENOSYS;
3816 
3817 	if (unlikely(len != sizeof(*head)))
3818 		return -EINVAL;
3819 
3820 	current->compat_robust_list = head;
3821 
3822 	return 0;
3823 }
3824 
COMPAT_SYSCALL_DEFINE3(get_robust_list,int,pid,compat_uptr_t __user *,head_ptr,compat_size_t __user *,len_ptr)3825 COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
3826 			compat_uptr_t __user *, head_ptr,
3827 			compat_size_t __user *, len_ptr)
3828 {
3829 	struct compat_robust_list_head __user *head;
3830 	unsigned long ret;
3831 	struct task_struct *p;
3832 
3833 	if (!futex_cmpxchg_enabled)
3834 		return -ENOSYS;
3835 
3836 	rcu_read_lock();
3837 
3838 	ret = -ESRCH;
3839 	if (!pid)
3840 		p = current;
3841 	else {
3842 		p = find_task_by_vpid(pid);
3843 		if (!p)
3844 			goto err_unlock;
3845 	}
3846 
3847 	ret = -EPERM;
3848 	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3849 		goto err_unlock;
3850 
3851 	head = p->compat_robust_list;
3852 	rcu_read_unlock();
3853 
3854 	if (put_user(sizeof(*head), len_ptr))
3855 		return -EFAULT;
3856 	return put_user(ptr_to_compat(head), head_ptr);
3857 
3858 err_unlock:
3859 	rcu_read_unlock();
3860 
3861 	return ret;
3862 }
3863 #endif /* CONFIG_COMPAT */
3864 
3865 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE6(futex_time32,u32 __user *,uaddr,int,op,u32,val,struct old_timespec32 __user *,utime,u32 __user *,uaddr2,u32,val3)3866 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
3867 		struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
3868 		u32, val3)
3869 {
3870 	struct timespec64 ts;
3871 	ktime_t t, *tp = NULL;
3872 	int val2 = 0;
3873 	int cmd = op & FUTEX_CMD_MASK;
3874 
3875 	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3876 		      cmd == FUTEX_WAIT_BITSET ||
3877 		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3878 		if (get_old_timespec32(&ts, utime))
3879 			return -EFAULT;
3880 		if (!timespec64_valid(&ts))
3881 			return -EINVAL;
3882 
3883 		t = timespec64_to_ktime(ts);
3884 		if (cmd == FUTEX_WAIT)
3885 			t = ktime_add_safe(ktime_get(), t);
3886 		tp = &t;
3887 	}
3888 	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3889 	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3890 		val2 = (int) (unsigned long) utime;
3891 
3892 	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3893 }
3894 #endif /* CONFIG_COMPAT_32BIT_TIME */
3895 
futex_detect_cmpxchg(void)3896 static void __init futex_detect_cmpxchg(void)
3897 {
3898 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3899 	u32 curval;
3900 
3901 	/*
3902 	 * This will fail and we want it. Some arch implementations do
3903 	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3904 	 * functionality. We want to know that before we call in any
3905 	 * of the complex code paths. Also we want to prevent
3906 	 * registration of robust lists in that case. NULL is
3907 	 * guaranteed to fault and we get -EFAULT on functional
3908 	 * implementation, the non-functional ones will return
3909 	 * -ENOSYS.
3910 	 */
3911 	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3912 		futex_cmpxchg_enabled = 1;
3913 #endif
3914 }
3915 
futex_init(void)3916 static int __init futex_init(void)
3917 {
3918 	unsigned int futex_shift;
3919 	unsigned long i;
3920 
3921 #if CONFIG_BASE_SMALL
3922 	futex_hashsize = 16;
3923 #else
3924 	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3925 #endif
3926 
3927 	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3928 					       futex_hashsize, 0,
3929 					       futex_hashsize < 256 ? HASH_SMALL : 0,
3930 					       &futex_shift, NULL,
3931 					       futex_hashsize, futex_hashsize);
3932 	futex_hashsize = 1UL << futex_shift;
3933 
3934 	futex_detect_cmpxchg();
3935 
3936 	for (i = 0; i < futex_hashsize; i++) {
3937 		atomic_set(&futex_queues[i].waiters, 0);
3938 		plist_head_init(&futex_queues[i].chain);
3939 		spin_lock_init(&futex_queues[i].lock);
3940 	}
3941 
3942 	return 0;
3943 }
3944 core_initcall(futex_init);
3945