1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * workqueue.h --- work queue handling for Linux.
4  */
5 
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
8 
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/rcupdate.h>
17 
18 struct workqueue_struct;
19 
20 struct work_struct;
21 typedef void (*work_func_t)(struct work_struct *work);
22 void delayed_work_timer_fn(struct timer_list *t);
23 
24 /*
25  * The first word is the work queue pointer and the flags rolled into
26  * one
27  */
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
29 
30 enum {
31 	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
32 	WORK_STRUCT_INACTIVE_BIT= 1,	/* work item is inactive */
33 	WORK_STRUCT_PWQ_BIT	= 2,	/* data points to pwq */
34 	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */
35 #ifdef CONFIG_DEBUG_OBJECTS_WORK
36 	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */
37 	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */
38 #else
39 	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */
40 #endif
41 
42 	WORK_STRUCT_COLOR_BITS	= 4,
43 
44 	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
45 	WORK_STRUCT_INACTIVE	= 1 << WORK_STRUCT_INACTIVE_BIT,
46 	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
47 	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
48 #ifdef CONFIG_DEBUG_OBJECTS_WORK
49 	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
50 #else
51 	WORK_STRUCT_STATIC	= 0,
52 #endif
53 
54 	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS),
55 
56 	/* not bound to any CPU, prefer the local CPU */
57 	WORK_CPU_UNBOUND	= NR_CPUS,
58 
59 	/*
60 	 * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
61 	 * This makes pwqs aligned to 256 bytes and allows 16 workqueue
62 	 * flush colors.
63 	 */
64 	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
65 				  WORK_STRUCT_COLOR_BITS,
66 
67 	/* data contains off-queue information when !WORK_STRUCT_PWQ */
68 	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
69 
70 	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
71 	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
72 
73 	/*
74 	 * When a work item is off queue, its high bits point to the last
75 	 * pool it was on.  Cap at 31 bits and use the highest number to
76 	 * indicate that no pool is associated.
77 	 */
78 	WORK_OFFQ_FLAG_BITS	= 1,
79 	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
80 	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
81 	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
82 	WORK_OFFQ_POOL_NONE	= (1LU << WORK_OFFQ_POOL_BITS) - 1,
83 
84 	/* convenience constants */
85 	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
86 	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
87 	WORK_STRUCT_NO_POOL	= (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
88 
89 	/* bit mask for work_busy() return values */
90 	WORK_BUSY_PENDING	= 1 << 0,
91 	WORK_BUSY_RUNNING	= 1 << 1,
92 
93 	/* maximum string length for set_worker_desc() */
94 	WORKER_DESC_LEN		= 24,
95 };
96 
97 struct work_struct {
98 	atomic_long_t data;
99 	struct list_head entry;
100 	work_func_t func;
101 #ifdef CONFIG_LOCKDEP
102 	struct lockdep_map lockdep_map;
103 #endif
104 };
105 
106 #define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
107 #define WORK_DATA_STATIC_INIT()	\
108 	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
109 
110 struct delayed_work {
111 	struct work_struct work;
112 	struct timer_list timer;
113 
114 	/* target workqueue and CPU ->timer uses to queue ->work */
115 	struct workqueue_struct *wq;
116 	int cpu;
117 };
118 
119 struct rcu_work {
120 	struct work_struct work;
121 	struct rcu_head rcu;
122 
123 	/* target workqueue ->rcu uses to queue ->work */
124 	struct workqueue_struct *wq;
125 };
126 
127 /**
128  * struct workqueue_attrs - A struct for workqueue attributes.
129  *
130  * This can be used to change attributes of an unbound workqueue.
131  */
132 struct workqueue_attrs {
133 	/**
134 	 * @nice: nice level
135 	 */
136 	int nice;
137 
138 	/**
139 	 * @cpumask: allowed CPUs
140 	 */
141 	cpumask_var_t cpumask;
142 
143 	/**
144 	 * @no_numa: disable NUMA affinity
145 	 *
146 	 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
147 	 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
148 	 * doesn't participate in pool hash calculations or equality comparisons.
149 	 */
150 	bool no_numa;
151 };
152 
to_delayed_work(struct work_struct * work)153 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
154 {
155 	return container_of(work, struct delayed_work, work);
156 }
157 
to_rcu_work(struct work_struct * work)158 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
159 {
160 	return container_of(work, struct rcu_work, work);
161 }
162 
163 struct execute_work {
164 	struct work_struct work;
165 };
166 
167 #ifdef CONFIG_LOCKDEP
168 /*
169  * NB: because we have to copy the lockdep_map, setting _key
170  * here is required, otherwise it could get initialised to the
171  * copy of the lockdep_map!
172  */
173 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
174 	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
175 #else
176 #define __WORK_INIT_LOCKDEP_MAP(n, k)
177 #endif
178 
179 #define __WORK_INITIALIZER(n, f) {					\
180 	.data = WORK_DATA_STATIC_INIT(),				\
181 	.entry	= { &(n).entry, &(n).entry },				\
182 	.func = (f),							\
183 	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
184 	}
185 
186 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
187 	.work = __WORK_INITIALIZER((n).work, (f)),			\
188 	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
189 				     (tflags) | TIMER_IRQSAFE),		\
190 	}
191 
192 #define DECLARE_WORK(n, f)						\
193 	struct work_struct n = __WORK_INITIALIZER(n, f)
194 
195 #define DECLARE_DELAYED_WORK(n, f)					\
196 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
197 
198 #define DECLARE_DEFERRABLE_WORK(n, f)					\
199 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
200 
201 #ifdef CONFIG_DEBUG_OBJECTS_WORK
202 extern void __init_work(struct work_struct *work, int onstack);
203 extern void destroy_work_on_stack(struct work_struct *work);
204 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)205 static inline unsigned int work_static(struct work_struct *work)
206 {
207 	return *work_data_bits(work) & WORK_STRUCT_STATIC;
208 }
209 #else
__init_work(struct work_struct * work,int onstack)210 static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)211 static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)212 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)213 static inline unsigned int work_static(struct work_struct *work) { return 0; }
214 #endif
215 
216 /*
217  * initialize all of a work item in one go
218  *
219  * NOTE! No point in using "atomic_long_set()": using a direct
220  * assignment of the work data initializer allows the compiler
221  * to generate better code.
222  */
223 #ifdef CONFIG_LOCKDEP
224 #define __INIT_WORK(_work, _func, _onstack)				\
225 	do {								\
226 		static struct lock_class_key __key;			\
227 									\
228 		__init_work((_work), _onstack);				\
229 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
230 		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
231 		INIT_LIST_HEAD(&(_work)->entry);			\
232 		(_work)->func = (_func);				\
233 	} while (0)
234 #else
235 #define __INIT_WORK(_work, _func, _onstack)				\
236 	do {								\
237 		__init_work((_work), _onstack);				\
238 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
239 		INIT_LIST_HEAD(&(_work)->entry);			\
240 		(_work)->func = (_func);				\
241 	} while (0)
242 #endif
243 
244 #define INIT_WORK(_work, _func)						\
245 	__INIT_WORK((_work), (_func), 0)
246 
247 #define INIT_WORK_ONSTACK(_work, _func)					\
248 	__INIT_WORK((_work), (_func), 1)
249 
250 #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
251 	do {								\
252 		INIT_WORK(&(_work)->work, (_func));			\
253 		__init_timer(&(_work)->timer,				\
254 			     delayed_work_timer_fn,			\
255 			     (_tflags) | TIMER_IRQSAFE);		\
256 	} while (0)
257 
258 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
259 	do {								\
260 		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
261 		__init_timer_on_stack(&(_work)->timer,			\
262 				      delayed_work_timer_fn,		\
263 				      (_tflags) | TIMER_IRQSAFE);	\
264 	} while (0)
265 
266 #define INIT_DELAYED_WORK(_work, _func)					\
267 	__INIT_DELAYED_WORK(_work, _func, 0)
268 
269 #define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
270 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
271 
272 #define INIT_DEFERRABLE_WORK(_work, _func)				\
273 	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
274 
275 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
276 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
277 
278 #define INIT_RCU_WORK(_work, _func)					\
279 	INIT_WORK(&(_work)->work, (_func))
280 
281 #define INIT_RCU_WORK_ONSTACK(_work, _func)				\
282 	INIT_WORK_ONSTACK(&(_work)->work, (_func))
283 
284 /**
285  * work_pending - Find out whether a work item is currently pending
286  * @work: The work item in question
287  */
288 #define work_pending(work) \
289 	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
290 
291 /**
292  * delayed_work_pending - Find out whether a delayable work item is currently
293  * pending
294  * @w: The work item in question
295  */
296 #define delayed_work_pending(w) \
297 	work_pending(&(w)->work)
298 
299 /*
300  * Workqueue flags and constants.  For details, please refer to
301  * Documentation/core-api/workqueue.rst.
302  */
303 enum {
304 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
305 	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
306 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
307 	WQ_HIGHPRI		= 1 << 4, /* high priority */
308 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
309 	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
310 
311 	/*
312 	 * Per-cpu workqueues are generally preferred because they tend to
313 	 * show better performance thanks to cache locality.  Per-cpu
314 	 * workqueues exclude the scheduler from choosing the CPU to
315 	 * execute the worker threads, which has an unfortunate side effect
316 	 * of increasing power consumption.
317 	 *
318 	 * The scheduler considers a CPU idle if it doesn't have any task
319 	 * to execute and tries to keep idle cores idle to conserve power;
320 	 * however, for example, a per-cpu work item scheduled from an
321 	 * interrupt handler on an idle CPU will force the scheduler to
322 	 * execute the work item on that CPU breaking the idleness, which in
323 	 * turn may lead to more scheduling choices which are sub-optimal
324 	 * in terms of power consumption.
325 	 *
326 	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
327 	 * but become unbound if workqueue.power_efficient kernel param is
328 	 * specified.  Per-cpu workqueues which are identified to
329 	 * contribute significantly to power-consumption are identified and
330 	 * marked with this flag and enabling the power_efficient mode
331 	 * leads to noticeable power saving at the cost of small
332 	 * performance disadvantage.
333 	 *
334 	 * http://thread.gmane.org/gmane.linux.kernel/1480396
335 	 */
336 	WQ_POWER_EFFICIENT	= 1 << 7,
337 
338 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
339 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
340 	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
341 	__WQ_ORDERED_EXPLICIT	= 1 << 19, /* internal: alloc_ordered_workqueue() */
342 
343 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
344 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
345 	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
346 };
347 
348 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
349 #define WQ_UNBOUND_MAX_ACTIVE	\
350 	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
351 
352 /*
353  * System-wide workqueues which are always present.
354  *
355  * system_wq is the one used by schedule[_delayed]_work[_on]().
356  * Multi-CPU multi-threaded.  There are users which expect relatively
357  * short queue flush time.  Don't queue works which can run for too
358  * long.
359  *
360  * system_highpri_wq is similar to system_wq but for work items which
361  * require WQ_HIGHPRI.
362  *
363  * system_long_wq is similar to system_wq but may host long running
364  * works.  Queue flushing might take relatively long.
365  *
366  * system_unbound_wq is unbound workqueue.  Workers are not bound to
367  * any specific CPU, not concurrency managed, and all queued works are
368  * executed immediately as long as max_active limit is not reached and
369  * resources are available.
370  *
371  * system_freezable_wq is equivalent to system_wq except that it's
372  * freezable.
373  *
374  * *_power_efficient_wq are inclined towards saving power and converted
375  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
376  * they are same as their non-power-efficient counterparts - e.g.
377  * system_power_efficient_wq is identical to system_wq if
378  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
379  */
380 extern struct workqueue_struct *system_wq;
381 extern struct workqueue_struct *system_highpri_wq;
382 extern struct workqueue_struct *system_long_wq;
383 extern struct workqueue_struct *system_unbound_wq;
384 extern struct workqueue_struct *system_freezable_wq;
385 extern struct workqueue_struct *system_power_efficient_wq;
386 extern struct workqueue_struct *system_freezable_power_efficient_wq;
387 
388 /**
389  * alloc_workqueue - allocate a workqueue
390  * @fmt: printf format for the name of the workqueue
391  * @flags: WQ_* flags
392  * @max_active: max in-flight work items, 0 for default
393  * remaining args: args for @fmt
394  *
395  * Allocate a workqueue with the specified parameters.  For detailed
396  * information on WQ_* flags, please refer to
397  * Documentation/core-api/workqueue.rst.
398  *
399  * RETURNS:
400  * Pointer to the allocated workqueue on success, %NULL on failure.
401  */
402 __printf(1, 4) struct workqueue_struct *
403 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
404 
405 /**
406  * alloc_ordered_workqueue - allocate an ordered workqueue
407  * @fmt: printf format for the name of the workqueue
408  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
409  * @args...: args for @fmt
410  *
411  * Allocate an ordered workqueue.  An ordered workqueue executes at
412  * most one work item at any given time in the queued order.  They are
413  * implemented as unbound workqueues with @max_active of one.
414  *
415  * RETURNS:
416  * Pointer to the allocated workqueue on success, %NULL on failure.
417  */
418 #define alloc_ordered_workqueue(fmt, flags, args...)			\
419 	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |		\
420 			__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
421 
422 #define create_workqueue(name)						\
423 	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
424 #define create_freezable_workqueue(name)				\
425 	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
426 			WQ_MEM_RECLAIM, 1, (name))
427 #define create_singlethread_workqueue(name)				\
428 	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
429 
430 extern void destroy_workqueue(struct workqueue_struct *wq);
431 
432 struct workqueue_attrs *alloc_workqueue_attrs(void);
433 void free_workqueue_attrs(struct workqueue_attrs *attrs);
434 int apply_workqueue_attrs(struct workqueue_struct *wq,
435 			  const struct workqueue_attrs *attrs);
436 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
437 
438 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
439 			struct work_struct *work);
440 extern bool queue_work_node(int node, struct workqueue_struct *wq,
441 			    struct work_struct *work);
442 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
443 			struct delayed_work *work, unsigned long delay);
444 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
445 			struct delayed_work *dwork, unsigned long delay);
446 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
447 
448 extern void flush_workqueue(struct workqueue_struct *wq);
449 extern void drain_workqueue(struct workqueue_struct *wq);
450 
451 extern int schedule_on_each_cpu(work_func_t func);
452 
453 int execute_in_process_context(work_func_t fn, struct execute_work *);
454 
455 extern bool flush_work(struct work_struct *work);
456 extern bool cancel_work_sync(struct work_struct *work);
457 
458 extern bool flush_delayed_work(struct delayed_work *dwork);
459 extern bool cancel_delayed_work(struct delayed_work *dwork);
460 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
461 
462 extern bool flush_rcu_work(struct rcu_work *rwork);
463 
464 extern void workqueue_set_max_active(struct workqueue_struct *wq,
465 				     int max_active);
466 extern struct work_struct *current_work(void);
467 extern bool current_is_workqueue_rescuer(void);
468 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
469 extern unsigned int work_busy(struct work_struct *work);
470 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
471 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
472 extern void show_workqueue_state(void);
473 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
474 
475 /**
476  * queue_work - queue work on a workqueue
477  * @wq: workqueue to use
478  * @work: work to queue
479  *
480  * Returns %false if @work was already on a queue, %true otherwise.
481  *
482  * We queue the work to the CPU on which it was submitted, but if the CPU dies
483  * it can be processed by another CPU.
484  *
485  * Memory-ordering properties:  If it returns %true, guarantees that all stores
486  * preceding the call to queue_work() in the program order will be visible from
487  * the CPU which will execute @work by the time such work executes, e.g.,
488  *
489  * { x is initially 0 }
490  *
491  *   CPU0				CPU1
492  *
493  *   WRITE_ONCE(x, 1);			[ @work is being executed ]
494  *   r0 = queue_work(wq, work);		  r1 = READ_ONCE(x);
495  *
496  * Forbids: r0 == true && r1 == 0
497  */
queue_work(struct workqueue_struct * wq,struct work_struct * work)498 static inline bool queue_work(struct workqueue_struct *wq,
499 			      struct work_struct *work)
500 {
501 	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
502 }
503 
504 /**
505  * queue_delayed_work - queue work on a workqueue after delay
506  * @wq: workqueue to use
507  * @dwork: delayable work to queue
508  * @delay: number of jiffies to wait before queueing
509  *
510  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
511  */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)512 static inline bool queue_delayed_work(struct workqueue_struct *wq,
513 				      struct delayed_work *dwork,
514 				      unsigned long delay)
515 {
516 	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
517 }
518 
519 /**
520  * mod_delayed_work - modify delay of or queue a delayed work
521  * @wq: workqueue to use
522  * @dwork: work to queue
523  * @delay: number of jiffies to wait before queueing
524  *
525  * mod_delayed_work_on() on local CPU.
526  */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)527 static inline bool mod_delayed_work(struct workqueue_struct *wq,
528 				    struct delayed_work *dwork,
529 				    unsigned long delay)
530 {
531 	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
532 }
533 
534 /**
535  * schedule_work_on - put work task on a specific cpu
536  * @cpu: cpu to put the work task on
537  * @work: job to be done
538  *
539  * This puts a job on a specific cpu
540  */
schedule_work_on(int cpu,struct work_struct * work)541 static inline bool schedule_work_on(int cpu, struct work_struct *work)
542 {
543 	return queue_work_on(cpu, system_wq, work);
544 }
545 
546 /**
547  * schedule_work - put work task in global workqueue
548  * @work: job to be done
549  *
550  * Returns %false if @work was already on the kernel-global workqueue and
551  * %true otherwise.
552  *
553  * This puts a job in the kernel-global workqueue if it was not already
554  * queued and leaves it in the same position on the kernel-global
555  * workqueue otherwise.
556  *
557  * Shares the same memory-ordering properties of queue_work(), cf. the
558  * DocBook header of queue_work().
559  */
schedule_work(struct work_struct * work)560 static inline bool schedule_work(struct work_struct *work)
561 {
562 	return queue_work(system_wq, work);
563 }
564 
565 /**
566  * flush_scheduled_work - ensure that any scheduled work has run to completion.
567  *
568  * Forces execution of the kernel-global workqueue and blocks until its
569  * completion.
570  *
571  * Think twice before calling this function!  It's very easy to get into
572  * trouble if you don't take great care.  Either of the following situations
573  * will lead to deadlock:
574  *
575  *	One of the work items currently on the workqueue needs to acquire
576  *	a lock held by your code or its caller.
577  *
578  *	Your code is running in the context of a work routine.
579  *
580  * They will be detected by lockdep when they occur, but the first might not
581  * occur very often.  It depends on what work items are on the workqueue and
582  * what locks they need, which you have no control over.
583  *
584  * In most situations flushing the entire workqueue is overkill; you merely
585  * need to know that a particular work item isn't queued and isn't running.
586  * In such cases you should use cancel_delayed_work_sync() or
587  * cancel_work_sync() instead.
588  */
flush_scheduled_work(void)589 static inline void flush_scheduled_work(void)
590 {
591 	flush_workqueue(system_wq);
592 }
593 
594 /**
595  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
596  * @cpu: cpu to use
597  * @dwork: job to be done
598  * @delay: number of jiffies to wait
599  *
600  * After waiting for a given time this puts a job in the kernel-global
601  * workqueue on the specified CPU.
602  */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)603 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
604 					    unsigned long delay)
605 {
606 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
607 }
608 
609 /**
610  * schedule_delayed_work - put work task in global workqueue after delay
611  * @dwork: job to be done
612  * @delay: number of jiffies to wait or 0 for immediate execution
613  *
614  * After waiting for a given time this puts a job in the kernel-global
615  * workqueue.
616  */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)617 static inline bool schedule_delayed_work(struct delayed_work *dwork,
618 					 unsigned long delay)
619 {
620 	return queue_delayed_work(system_wq, dwork, delay);
621 }
622 
623 #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)624 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
625 {
626 	return fn(arg);
627 }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)628 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
629 {
630 	return fn(arg);
631 }
632 #else
633 long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
634 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
635 #endif /* CONFIG_SMP */
636 
637 #ifdef CONFIG_FREEZER
638 extern void freeze_workqueues_begin(void);
639 extern bool freeze_workqueues_busy(void);
640 extern void thaw_workqueues(void);
641 #endif /* CONFIG_FREEZER */
642 
643 #ifdef CONFIG_SYSFS
644 int workqueue_sysfs_register(struct workqueue_struct *wq);
645 #else	/* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)646 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
647 { return 0; }
648 #endif	/* CONFIG_SYSFS */
649 
650 #ifdef CONFIG_WQ_WATCHDOG
651 void wq_watchdog_touch(int cpu);
652 #else	/* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)653 static inline void wq_watchdog_touch(int cpu) { }
654 #endif	/* CONFIG_WQ_WATCHDOG */
655 
656 #ifdef CONFIG_SMP
657 int workqueue_prepare_cpu(unsigned int cpu);
658 int workqueue_online_cpu(unsigned int cpu);
659 int workqueue_offline_cpu(unsigned int cpu);
660 #endif
661 
662 void __init workqueue_init_early(void);
663 void __init workqueue_init(void);
664 
665 #endif
666