1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008 Intel Corporation
4 * Author: Matthew Wilcox <willy@linux.intel.com>
5 *
6 * This file implements counting semaphores.
7 * A counting semaphore may be acquired 'n' times before sleeping.
8 * See mutex.c for single-acquisition sleeping locks which enforce
9 * rules which allow code to be debugged more easily.
10 */
11
12 /*
13 * Some notes on the implementation:
14 *
15 * The spinlock controls access to the other members of the semaphore.
16 * down_trylock() and up() can be called from interrupt context, so we
17 * have to disable interrupts when taking the lock. It turns out various
18 * parts of the kernel expect to be able to use down() on a semaphore in
19 * interrupt context when they know it will succeed, so we have to use
20 * irqsave variants for down(), down_interruptible() and down_killable()
21 * too.
22 *
23 * The ->count variable represents how many more tasks can acquire this
24 * semaphore. If it's zero, there may be tasks waiting on the wait_list.
25 */
26
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/sched.h>
31 #include <linux/sched/debug.h>
32 #include <linux/semaphore.h>
33 #include <linux/spinlock.h>
34 #include <linux/ftrace.h>
35
36 static noinline void __down(struct semaphore *sem);
37 static noinline int __down_interruptible(struct semaphore *sem);
38 static noinline int __down_killable(struct semaphore *sem);
39 static noinline int __down_timeout(struct semaphore *sem, long timeout);
40 static noinline void __up(struct semaphore *sem);
41
42 /**
43 * down - acquire the semaphore
44 * @sem: the semaphore to be acquired
45 *
46 * Acquires the semaphore. If no more tasks are allowed to acquire the
47 * semaphore, calling this function will put the task to sleep until the
48 * semaphore is released.
49 *
50 * Use of this function is deprecated, please use down_interruptible() or
51 * down_killable() instead.
52 */
down(struct semaphore * sem)53 void down(struct semaphore *sem)
54 {
55 unsigned long flags;
56
57 might_sleep();
58 raw_spin_lock_irqsave(&sem->lock, flags);
59 if (likely(sem->count > 0))
60 sem->count--;
61 else
62 __down(sem);
63 raw_spin_unlock_irqrestore(&sem->lock, flags);
64 }
65 EXPORT_SYMBOL(down);
66
67 /**
68 * down_interruptible - acquire the semaphore unless interrupted
69 * @sem: the semaphore to be acquired
70 *
71 * Attempts to acquire the semaphore. If no more tasks are allowed to
72 * acquire the semaphore, calling this function will put the task to sleep.
73 * If the sleep is interrupted by a signal, this function will return -EINTR.
74 * If the semaphore is successfully acquired, this function returns 0.
75 */
down_interruptible(struct semaphore * sem)76 int down_interruptible(struct semaphore *sem)
77 {
78 unsigned long flags;
79 int result = 0;
80
81 might_sleep();
82 raw_spin_lock_irqsave(&sem->lock, flags);
83 if (likely(sem->count > 0))
84 sem->count--;
85 else
86 result = __down_interruptible(sem);
87 raw_spin_unlock_irqrestore(&sem->lock, flags);
88
89 return result;
90 }
91 EXPORT_SYMBOL(down_interruptible);
92
93 /**
94 * down_killable - acquire the semaphore unless killed
95 * @sem: the semaphore to be acquired
96 *
97 * Attempts to acquire the semaphore. If no more tasks are allowed to
98 * acquire the semaphore, calling this function will put the task to sleep.
99 * If the sleep is interrupted by a fatal signal, this function will return
100 * -EINTR. If the semaphore is successfully acquired, this function returns
101 * 0.
102 */
down_killable(struct semaphore * sem)103 int down_killable(struct semaphore *sem)
104 {
105 unsigned long flags;
106 int result = 0;
107
108 might_sleep();
109 raw_spin_lock_irqsave(&sem->lock, flags);
110 if (likely(sem->count > 0))
111 sem->count--;
112 else
113 result = __down_killable(sem);
114 raw_spin_unlock_irqrestore(&sem->lock, flags);
115
116 return result;
117 }
118 EXPORT_SYMBOL(down_killable);
119
120 /**
121 * down_trylock - try to acquire the semaphore, without waiting
122 * @sem: the semaphore to be acquired
123 *
124 * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
125 * been acquired successfully or 1 if it cannot be acquired.
126 *
127 * NOTE: This return value is inverted from both spin_trylock and
128 * mutex_trylock! Be careful about this when converting code.
129 *
130 * Unlike mutex_trylock, this function can be used from interrupt context,
131 * and the semaphore can be released by any task or interrupt.
132 */
down_trylock(struct semaphore * sem)133 int down_trylock(struct semaphore *sem)
134 {
135 unsigned long flags;
136 int count;
137
138 raw_spin_lock_irqsave(&sem->lock, flags);
139 count = sem->count - 1;
140 if (likely(count >= 0))
141 sem->count = count;
142 raw_spin_unlock_irqrestore(&sem->lock, flags);
143
144 return (count < 0);
145 }
146 EXPORT_SYMBOL(down_trylock);
147
148 /**
149 * down_timeout - acquire the semaphore within a specified time
150 * @sem: the semaphore to be acquired
151 * @timeout: how long to wait before failing
152 *
153 * Attempts to acquire the semaphore. If no more tasks are allowed to
154 * acquire the semaphore, calling this function will put the task to sleep.
155 * If the semaphore is not released within the specified number of jiffies,
156 * this function returns -ETIME. It returns 0 if the semaphore was acquired.
157 */
down_timeout(struct semaphore * sem,long timeout)158 int down_timeout(struct semaphore *sem, long timeout)
159 {
160 unsigned long flags;
161 int result = 0;
162
163 might_sleep();
164 raw_spin_lock_irqsave(&sem->lock, flags);
165 if (likely(sem->count > 0))
166 sem->count--;
167 else
168 result = __down_timeout(sem, timeout);
169 raw_spin_unlock_irqrestore(&sem->lock, flags);
170
171 return result;
172 }
173 EXPORT_SYMBOL(down_timeout);
174
175 /**
176 * up - release the semaphore
177 * @sem: the semaphore to release
178 *
179 * Release the semaphore. Unlike mutexes, up() may be called from any
180 * context and even by tasks which have never called down().
181 */
up(struct semaphore * sem)182 void up(struct semaphore *sem)
183 {
184 unsigned long flags;
185
186 raw_spin_lock_irqsave(&sem->lock, flags);
187 if (likely(list_empty(&sem->wait_list)))
188 sem->count++;
189 else
190 __up(sem);
191 raw_spin_unlock_irqrestore(&sem->lock, flags);
192 }
193 EXPORT_SYMBOL(up);
194
195 /* Functions for the contended case */
196
197 struct semaphore_waiter {
198 struct list_head list;
199 struct task_struct *task;
200 bool up;
201 };
202
203 /*
204 * Because this function is inlined, the 'state' parameter will be
205 * constant, and thus optimised away by the compiler. Likewise the
206 * 'timeout' parameter for the cases without timeouts.
207 */
__down_common(struct semaphore * sem,long state,long timeout)208 static inline int __sched __down_common(struct semaphore *sem, long state,
209 long timeout)
210 {
211 struct semaphore_waiter waiter;
212
213 list_add_tail(&waiter.list, &sem->wait_list);
214 waiter.task = current;
215 waiter.up = false;
216
217 for (;;) {
218 if (signal_pending_state(state, current))
219 goto interrupted;
220 if (unlikely(timeout <= 0))
221 goto timed_out;
222 __set_current_state(state);
223 raw_spin_unlock_irq(&sem->lock);
224 timeout = schedule_timeout(timeout);
225 raw_spin_lock_irq(&sem->lock);
226 if (waiter.up)
227 return 0;
228 }
229
230 timed_out:
231 list_del(&waiter.list);
232 return -ETIME;
233
234 interrupted:
235 list_del(&waiter.list);
236 return -EINTR;
237 }
238
__down(struct semaphore * sem)239 static noinline void __sched __down(struct semaphore *sem)
240 {
241 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
242 }
243
__down_interruptible(struct semaphore * sem)244 static noinline int __sched __down_interruptible(struct semaphore *sem)
245 {
246 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
247 }
248
__down_killable(struct semaphore * sem)249 static noinline int __sched __down_killable(struct semaphore *sem)
250 {
251 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
252 }
253
__down_timeout(struct semaphore * sem,long timeout)254 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
255 {
256 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
257 }
258
__up(struct semaphore * sem)259 static noinline void __sched __up(struct semaphore *sem)
260 {
261 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
262 struct semaphore_waiter, list);
263 list_del(&waiter->list);
264 waiter->up = true;
265 wake_up_process(waiter->task);
266 }
267