1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * workqueue.h --- work queue handling for Linux.
4 */
5
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
8
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/rcupdate.h>
17
18 struct workqueue_struct;
19
20 struct work_struct;
21 typedef void (*work_func_t)(struct work_struct *work);
22 void delayed_work_timer_fn(struct timer_list *t);
23
24 /*
25 * The first word is the work queue pointer and the flags rolled into
26 * one
27 */
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
29
30 enum {
31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
32 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
33 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
35 #ifdef CONFIG_DEBUG_OBJECTS_WORK
36 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
37 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
38 #else
39 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
40 #endif
41
42 WORK_STRUCT_COLOR_BITS = 4,
43
44 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
45 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
46 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
47 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
48 #ifdef CONFIG_DEBUG_OBJECTS_WORK
49 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
50 #else
51 WORK_STRUCT_STATIC = 0,
52 #endif
53
54 /*
55 * The last color is no color used for works which don't
56 * participate in workqueue flushing.
57 */
58 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
59 WORK_NO_COLOR = WORK_NR_COLORS,
60
61 /* not bound to any CPU, prefer the local CPU */
62 WORK_CPU_UNBOUND = NR_CPUS,
63
64 /*
65 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
66 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
67 * flush colors.
68 */
69 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
70 WORK_STRUCT_COLOR_BITS,
71
72 /* data contains off-queue information when !WORK_STRUCT_PWQ */
73 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
74
75 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
76 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
77
78 /*
79 * When a work item is off queue, its high bits point to the last
80 * pool it was on. Cap at 31 bits and use the highest number to
81 * indicate that no pool is associated.
82 */
83 WORK_OFFQ_FLAG_BITS = 1,
84 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
85 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
86 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
87 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
88
89 /* convenience constants */
90 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
91 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
92 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
93
94 /* bit mask for work_busy() return values */
95 WORK_BUSY_PENDING = 1 << 0,
96 WORK_BUSY_RUNNING = 1 << 1,
97
98 /* maximum string length for set_worker_desc() */
99 WORKER_DESC_LEN = 24,
100 };
101
102 struct work_struct {
103 atomic_long_t data;
104 struct list_head entry;
105 work_func_t func;
106 #ifdef CONFIG_LOCKDEP
107 struct lockdep_map lockdep_map;
108 #endif
109 };
110
111 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
112 #define WORK_DATA_STATIC_INIT() \
113 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
114
115 struct delayed_work {
116 struct work_struct work;
117 struct timer_list timer;
118
119 /* target workqueue and CPU ->timer uses to queue ->work */
120 struct workqueue_struct *wq;
121 int cpu;
122 };
123
124 struct rcu_work {
125 struct work_struct work;
126 struct rcu_head rcu;
127
128 /* target workqueue ->rcu uses to queue ->work */
129 struct workqueue_struct *wq;
130 };
131
132 /**
133 * struct workqueue_attrs - A struct for workqueue attributes.
134 *
135 * This can be used to change attributes of an unbound workqueue.
136 */
137 struct workqueue_attrs {
138 /**
139 * @nice: nice level
140 */
141 int nice;
142
143 /**
144 * @cpumask: allowed CPUs
145 */
146 cpumask_var_t cpumask;
147
148 /**
149 * @no_numa: disable NUMA affinity
150 *
151 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
152 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
153 * doesn't participate in pool hash calculations or equality comparisons.
154 */
155 bool no_numa;
156 };
157
to_delayed_work(struct work_struct * work)158 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
159 {
160 return container_of(work, struct delayed_work, work);
161 }
162
to_rcu_work(struct work_struct * work)163 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
164 {
165 return container_of(work, struct rcu_work, work);
166 }
167
168 struct execute_work {
169 struct work_struct work;
170 };
171
172 #ifdef CONFIG_LOCKDEP
173 /*
174 * NB: because we have to copy the lockdep_map, setting _key
175 * here is required, otherwise it could get initialised to the
176 * copy of the lockdep_map!
177 */
178 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
179 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
180 #else
181 #define __WORK_INIT_LOCKDEP_MAP(n, k)
182 #endif
183
184 #define __WORK_INITIALIZER(n, f) { \
185 .data = WORK_DATA_STATIC_INIT(), \
186 .entry = { &(n).entry, &(n).entry }, \
187 .func = (f), \
188 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
189 }
190
191 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
192 .work = __WORK_INITIALIZER((n).work, (f)), \
193 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
194 (tflags) | TIMER_IRQSAFE), \
195 }
196
197 #define DECLARE_WORK(n, f) \
198 struct work_struct n = __WORK_INITIALIZER(n, f)
199
200 #define DECLARE_DELAYED_WORK(n, f) \
201 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
202
203 #define DECLARE_DEFERRABLE_WORK(n, f) \
204 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
205
206 #ifdef CONFIG_DEBUG_OBJECTS_WORK
207 extern void __init_work(struct work_struct *work, int onstack);
208 extern void destroy_work_on_stack(struct work_struct *work);
209 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)210 static inline unsigned int work_static(struct work_struct *work)
211 {
212 return *work_data_bits(work) & WORK_STRUCT_STATIC;
213 }
214 #else
__init_work(struct work_struct * work,int onstack)215 static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)216 static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)217 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)218 static inline unsigned int work_static(struct work_struct *work) { return 0; }
219 #endif
220
221 /*
222 * initialize all of a work item in one go
223 *
224 * NOTE! No point in using "atomic_long_set()": using a direct
225 * assignment of the work data initializer allows the compiler
226 * to generate better code.
227 */
228 #ifdef CONFIG_LOCKDEP
229 #define __INIT_WORK(_work, _func, _onstack) \
230 do { \
231 static struct lock_class_key __key; \
232 \
233 __init_work((_work), _onstack); \
234 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
235 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
236 INIT_LIST_HEAD(&(_work)->entry); \
237 (_work)->func = (_func); \
238 } while (0)
239 #else
240 #define __INIT_WORK(_work, _func, _onstack) \
241 do { \
242 __init_work((_work), _onstack); \
243 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
244 INIT_LIST_HEAD(&(_work)->entry); \
245 (_work)->func = (_func); \
246 } while (0)
247 #endif
248
249 #define INIT_WORK(_work, _func) \
250 __INIT_WORK((_work), (_func), 0)
251
252 #define INIT_WORK_ONSTACK(_work, _func) \
253 __INIT_WORK((_work), (_func), 1)
254
255 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
256 do { \
257 INIT_WORK(&(_work)->work, (_func)); \
258 __init_timer(&(_work)->timer, \
259 delayed_work_timer_fn, \
260 (_tflags) | TIMER_IRQSAFE); \
261 } while (0)
262
263 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
264 do { \
265 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
266 __init_timer_on_stack(&(_work)->timer, \
267 delayed_work_timer_fn, \
268 (_tflags) | TIMER_IRQSAFE); \
269 } while (0)
270
271 #define INIT_DELAYED_WORK(_work, _func) \
272 __INIT_DELAYED_WORK(_work, _func, 0)
273
274 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
275 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
276
277 #define INIT_DEFERRABLE_WORK(_work, _func) \
278 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
279
280 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
281 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
282
283 #define INIT_RCU_WORK(_work, _func) \
284 INIT_WORK(&(_work)->work, (_func))
285
286 #define INIT_RCU_WORK_ONSTACK(_work, _func) \
287 INIT_WORK_ONSTACK(&(_work)->work, (_func))
288
289 /**
290 * work_pending - Find out whether a work item is currently pending
291 * @work: The work item in question
292 */
293 #define work_pending(work) \
294 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
295
296 /**
297 * delayed_work_pending - Find out whether a delayable work item is currently
298 * pending
299 * @w: The work item in question
300 */
301 #define delayed_work_pending(w) \
302 work_pending(&(w)->work)
303
304 /*
305 * Workqueue flags and constants. For details, please refer to
306 * Documentation/core-api/workqueue.rst.
307 */
308 enum {
309 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
310 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
311 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
312 WQ_HIGHPRI = 1 << 4, /* high priority */
313 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
314 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
315
316 /*
317 * Per-cpu workqueues are generally preferred because they tend to
318 * show better performance thanks to cache locality. Per-cpu
319 * workqueues exclude the scheduler from choosing the CPU to
320 * execute the worker threads, which has an unfortunate side effect
321 * of increasing power consumption.
322 *
323 * The scheduler considers a CPU idle if it doesn't have any task
324 * to execute and tries to keep idle cores idle to conserve power;
325 * however, for example, a per-cpu work item scheduled from an
326 * interrupt handler on an idle CPU will force the scheduler to
327 * excute the work item on that CPU breaking the idleness, which in
328 * turn may lead to more scheduling choices which are sub-optimal
329 * in terms of power consumption.
330 *
331 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
332 * but become unbound if workqueue.power_efficient kernel param is
333 * specified. Per-cpu workqueues which are identified to
334 * contribute significantly to power-consumption are identified and
335 * marked with this flag and enabling the power_efficient mode
336 * leads to noticeable power saving at the cost of small
337 * performance disadvantage.
338 *
339 * http://thread.gmane.org/gmane.linux.kernel/1480396
340 */
341 WQ_POWER_EFFICIENT = 1 << 7,
342
343 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
344 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
345 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
346 __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
347
348 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
349 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
350 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
351 };
352
353 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
354 #define WQ_UNBOUND_MAX_ACTIVE \
355 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
356
357 /*
358 * System-wide workqueues which are always present.
359 *
360 * system_wq is the one used by schedule[_delayed]_work[_on]().
361 * Multi-CPU multi-threaded. There are users which expect relatively
362 * short queue flush time. Don't queue works which can run for too
363 * long.
364 *
365 * system_highpri_wq is similar to system_wq but for work items which
366 * require WQ_HIGHPRI.
367 *
368 * system_long_wq is similar to system_wq but may host long running
369 * works. Queue flushing might take relatively long.
370 *
371 * system_unbound_wq is unbound workqueue. Workers are not bound to
372 * any specific CPU, not concurrency managed, and all queued works are
373 * executed immediately as long as max_active limit is not reached and
374 * resources are available.
375 *
376 * system_freezable_wq is equivalent to system_wq except that it's
377 * freezable.
378 *
379 * *_power_efficient_wq are inclined towards saving power and converted
380 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
381 * they are same as their non-power-efficient counterparts - e.g.
382 * system_power_efficient_wq is identical to system_wq if
383 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
384 */
385 extern struct workqueue_struct *system_wq;
386 extern struct workqueue_struct *system_highpri_wq;
387 extern struct workqueue_struct *system_long_wq;
388 extern struct workqueue_struct *system_unbound_wq;
389 extern struct workqueue_struct *system_freezable_wq;
390 extern struct workqueue_struct *system_power_efficient_wq;
391 extern struct workqueue_struct *system_freezable_power_efficient_wq;
392
393 /**
394 * alloc_workqueue - allocate a workqueue
395 * @fmt: printf format for the name of the workqueue
396 * @flags: WQ_* flags
397 * @max_active: max in-flight work items, 0 for default
398 * remaining args: args for @fmt
399 *
400 * Allocate a workqueue with the specified parameters. For detailed
401 * information on WQ_* flags, please refer to
402 * Documentation/core-api/workqueue.rst.
403 *
404 * RETURNS:
405 * Pointer to the allocated workqueue on success, %NULL on failure.
406 */
407 struct workqueue_struct *alloc_workqueue(const char *fmt,
408 unsigned int flags,
409 int max_active, ...);
410
411 /**
412 * alloc_ordered_workqueue - allocate an ordered workqueue
413 * @fmt: printf format for the name of the workqueue
414 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
415 * @args...: args for @fmt
416 *
417 * Allocate an ordered workqueue. An ordered workqueue executes at
418 * most one work item at any given time in the queued order. They are
419 * implemented as unbound workqueues with @max_active of one.
420 *
421 * RETURNS:
422 * Pointer to the allocated workqueue on success, %NULL on failure.
423 */
424 #define alloc_ordered_workqueue(fmt, flags, args...) \
425 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
426 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
427
428 #define create_workqueue(name) \
429 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
430 #define create_freezable_workqueue(name) \
431 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
432 WQ_MEM_RECLAIM, 1, (name))
433 #define create_singlethread_workqueue(name) \
434 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
435
436 extern void destroy_workqueue(struct workqueue_struct *wq);
437
438 struct workqueue_attrs *alloc_workqueue_attrs(void);
439 void free_workqueue_attrs(struct workqueue_attrs *attrs);
440 int apply_workqueue_attrs(struct workqueue_struct *wq,
441 const struct workqueue_attrs *attrs);
442 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
443
444 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
445 struct work_struct *work);
446 extern bool queue_work_node(int node, struct workqueue_struct *wq,
447 struct work_struct *work);
448 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
449 struct delayed_work *work, unsigned long delay);
450 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
451 struct delayed_work *dwork, unsigned long delay);
452 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
453
454 extern void flush_workqueue(struct workqueue_struct *wq);
455 extern void drain_workqueue(struct workqueue_struct *wq);
456
457 extern int schedule_on_each_cpu(work_func_t func);
458
459 int execute_in_process_context(work_func_t fn, struct execute_work *);
460
461 extern bool flush_work(struct work_struct *work);
462 extern bool cancel_work_sync(struct work_struct *work);
463
464 extern bool flush_delayed_work(struct delayed_work *dwork);
465 extern bool cancel_delayed_work(struct delayed_work *dwork);
466 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
467
468 extern bool flush_rcu_work(struct rcu_work *rwork);
469
470 extern void workqueue_set_max_active(struct workqueue_struct *wq,
471 int max_active);
472 extern struct work_struct *current_work(void);
473 extern bool current_is_workqueue_rescuer(void);
474 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
475 extern unsigned int work_busy(struct work_struct *work);
476 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
477 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
478 extern void show_workqueue_state(void);
479 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
480
481 /**
482 * queue_work - queue work on a workqueue
483 * @wq: workqueue to use
484 * @work: work to queue
485 *
486 * Returns %false if @work was already on a queue, %true otherwise.
487 *
488 * We queue the work to the CPU on which it was submitted, but if the CPU dies
489 * it can be processed by another CPU.
490 */
queue_work(struct workqueue_struct * wq,struct work_struct * work)491 static inline bool queue_work(struct workqueue_struct *wq,
492 struct work_struct *work)
493 {
494 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
495 }
496
497 /**
498 * queue_delayed_work - queue work on a workqueue after delay
499 * @wq: workqueue to use
500 * @dwork: delayable work to queue
501 * @delay: number of jiffies to wait before queueing
502 *
503 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
504 */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)505 static inline bool queue_delayed_work(struct workqueue_struct *wq,
506 struct delayed_work *dwork,
507 unsigned long delay)
508 {
509 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
510 }
511
512 /**
513 * mod_delayed_work - modify delay of or queue a delayed work
514 * @wq: workqueue to use
515 * @dwork: work to queue
516 * @delay: number of jiffies to wait before queueing
517 *
518 * mod_delayed_work_on() on local CPU.
519 */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)520 static inline bool mod_delayed_work(struct workqueue_struct *wq,
521 struct delayed_work *dwork,
522 unsigned long delay)
523 {
524 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
525 }
526
527 /**
528 * schedule_work_on - put work task on a specific cpu
529 * @cpu: cpu to put the work task on
530 * @work: job to be done
531 *
532 * This puts a job on a specific cpu
533 */
schedule_work_on(int cpu,struct work_struct * work)534 static inline bool schedule_work_on(int cpu, struct work_struct *work)
535 {
536 return queue_work_on(cpu, system_wq, work);
537 }
538
539 /**
540 * schedule_work - put work task in global workqueue
541 * @work: job to be done
542 *
543 * Returns %false if @work was already on the kernel-global workqueue and
544 * %true otherwise.
545 *
546 * This puts a job in the kernel-global workqueue if it was not already
547 * queued and leaves it in the same position on the kernel-global
548 * workqueue otherwise.
549 */
schedule_work(struct work_struct * work)550 static inline bool schedule_work(struct work_struct *work)
551 {
552 return queue_work(system_wq, work);
553 }
554
555 /**
556 * flush_scheduled_work - ensure that any scheduled work has run to completion.
557 *
558 * Forces execution of the kernel-global workqueue and blocks until its
559 * completion.
560 *
561 * Think twice before calling this function! It's very easy to get into
562 * trouble if you don't take great care. Either of the following situations
563 * will lead to deadlock:
564 *
565 * One of the work items currently on the workqueue needs to acquire
566 * a lock held by your code or its caller.
567 *
568 * Your code is running in the context of a work routine.
569 *
570 * They will be detected by lockdep when they occur, but the first might not
571 * occur very often. It depends on what work items are on the workqueue and
572 * what locks they need, which you have no control over.
573 *
574 * In most situations flushing the entire workqueue is overkill; you merely
575 * need to know that a particular work item isn't queued and isn't running.
576 * In such cases you should use cancel_delayed_work_sync() or
577 * cancel_work_sync() instead.
578 */
flush_scheduled_work(void)579 static inline void flush_scheduled_work(void)
580 {
581 flush_workqueue(system_wq);
582 }
583
584 /**
585 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
586 * @cpu: cpu to use
587 * @dwork: job to be done
588 * @delay: number of jiffies to wait
589 *
590 * After waiting for a given time this puts a job in the kernel-global
591 * workqueue on the specified CPU.
592 */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)593 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
594 unsigned long delay)
595 {
596 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
597 }
598
599 /**
600 * schedule_delayed_work - put work task in global workqueue after delay
601 * @dwork: job to be done
602 * @delay: number of jiffies to wait or 0 for immediate execution
603 *
604 * After waiting for a given time this puts a job in the kernel-global
605 * workqueue.
606 */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)607 static inline bool schedule_delayed_work(struct delayed_work *dwork,
608 unsigned long delay)
609 {
610 return queue_delayed_work(system_wq, dwork, delay);
611 }
612
613 #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)614 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
615 {
616 return fn(arg);
617 }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)618 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
619 {
620 return fn(arg);
621 }
622 #else
623 long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
624 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
625 #endif /* CONFIG_SMP */
626
627 #ifdef CONFIG_FREEZER
628 extern void freeze_workqueues_begin(void);
629 extern bool freeze_workqueues_busy(void);
630 extern void thaw_workqueues(void);
631 #endif /* CONFIG_FREEZER */
632
633 #ifdef CONFIG_SYSFS
634 int workqueue_sysfs_register(struct workqueue_struct *wq);
635 #else /* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)636 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
637 { return 0; }
638 #endif /* CONFIG_SYSFS */
639
640 #ifdef CONFIG_WQ_WATCHDOG
641 void wq_watchdog_touch(int cpu);
642 #else /* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)643 static inline void wq_watchdog_touch(int cpu) { }
644 #endif /* CONFIG_WQ_WATCHDOG */
645
646 #ifdef CONFIG_SMP
647 int workqueue_prepare_cpu(unsigned int cpu);
648 int workqueue_online_cpu(unsigned int cpu);
649 int workqueue_offline_cpu(unsigned int cpu);
650 #endif
651
652 int __init workqueue_init_early(void);
653 int __init workqueue_init(void);
654
655 #endif
656