1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * workqueue.h --- work queue handling for Linux.
4  */
5 
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
8 
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/rcupdate.h>
17 
18 struct workqueue_struct;
19 
20 struct work_struct;
21 typedef void (*work_func_t)(struct work_struct *work);
22 void delayed_work_timer_fn(struct timer_list *t);
23 
24 /*
25  * The first word is the work queue pointer and the flags rolled into
26  * one
27  */
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
29 
30 enum {
31 	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
32 	WORK_STRUCT_DELAYED_BIT	= 1,	/* work item is delayed */
33 	WORK_STRUCT_PWQ_BIT	= 2,	/* data points to pwq */
34 	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */
35 #ifdef CONFIG_DEBUG_OBJECTS_WORK
36 	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */
37 	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */
38 #else
39 	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */
40 #endif
41 
42 	WORK_STRUCT_COLOR_BITS	= 4,
43 
44 	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
45 	WORK_STRUCT_DELAYED	= 1 << WORK_STRUCT_DELAYED_BIT,
46 	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
47 	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
48 #ifdef CONFIG_DEBUG_OBJECTS_WORK
49 	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
50 #else
51 	WORK_STRUCT_STATIC	= 0,
52 #endif
53 
54 	/*
55 	 * The last color is no color used for works which don't
56 	 * participate in workqueue flushing.
57 	 */
58 	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1,
59 	WORK_NO_COLOR		= WORK_NR_COLORS,
60 
61 	/* not bound to any CPU, prefer the local CPU */
62 	WORK_CPU_UNBOUND	= NR_CPUS,
63 
64 	/*
65 	 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
66 	 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
67 	 * flush colors.
68 	 */
69 	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
70 				  WORK_STRUCT_COLOR_BITS,
71 
72 	/* data contains off-queue information when !WORK_STRUCT_PWQ */
73 	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
74 
75 	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
76 	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
77 
78 	/*
79 	 * When a work item is off queue, its high bits point to the last
80 	 * pool it was on.  Cap at 31 bits and use the highest number to
81 	 * indicate that no pool is associated.
82 	 */
83 	WORK_OFFQ_FLAG_BITS	= 1,
84 	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
85 	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
86 	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
87 	WORK_OFFQ_POOL_NONE	= (1LU << WORK_OFFQ_POOL_BITS) - 1,
88 
89 	/* convenience constants */
90 	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
91 	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
92 	WORK_STRUCT_NO_POOL	= (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
93 
94 	/* bit mask for work_busy() return values */
95 	WORK_BUSY_PENDING	= 1 << 0,
96 	WORK_BUSY_RUNNING	= 1 << 1,
97 
98 	/* maximum string length for set_worker_desc() */
99 	WORKER_DESC_LEN		= 24,
100 };
101 
102 struct work_struct {
103 	atomic_long_t data;
104 	struct list_head entry;
105 	work_func_t func;
106 #ifdef CONFIG_LOCKDEP
107 	struct lockdep_map lockdep_map;
108 #endif
109 };
110 
111 #define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
112 #define WORK_DATA_STATIC_INIT()	\
113 	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
114 
115 struct delayed_work {
116 	struct work_struct work;
117 	struct timer_list timer;
118 
119 	/* target workqueue and CPU ->timer uses to queue ->work */
120 	struct workqueue_struct *wq;
121 	int cpu;
122 };
123 
124 struct rcu_work {
125 	struct work_struct work;
126 	struct rcu_head rcu;
127 
128 	/* target workqueue ->rcu uses to queue ->work */
129 	struct workqueue_struct *wq;
130 };
131 
132 /**
133  * struct workqueue_attrs - A struct for workqueue attributes.
134  *
135  * This can be used to change attributes of an unbound workqueue.
136  */
137 struct workqueue_attrs {
138 	/**
139 	 * @nice: nice level
140 	 */
141 	int nice;
142 
143 	/**
144 	 * @cpumask: allowed CPUs
145 	 */
146 	cpumask_var_t cpumask;
147 
148 	/**
149 	 * @no_numa: disable NUMA affinity
150 	 *
151 	 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
152 	 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
153 	 * doesn't participate in pool hash calculations or equality comparisons.
154 	 */
155 	bool no_numa;
156 };
157 
to_delayed_work(struct work_struct * work)158 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
159 {
160 	return container_of(work, struct delayed_work, work);
161 }
162 
to_rcu_work(struct work_struct * work)163 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
164 {
165 	return container_of(work, struct rcu_work, work);
166 }
167 
168 struct execute_work {
169 	struct work_struct work;
170 };
171 
172 #ifdef CONFIG_LOCKDEP
173 /*
174  * NB: because we have to copy the lockdep_map, setting _key
175  * here is required, otherwise it could get initialised to the
176  * copy of the lockdep_map!
177  */
178 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
179 	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
180 #else
181 #define __WORK_INIT_LOCKDEP_MAP(n, k)
182 #endif
183 
184 #define __WORK_INITIALIZER(n, f) {					\
185 	.data = WORK_DATA_STATIC_INIT(),				\
186 	.entry	= { &(n).entry, &(n).entry },				\
187 	.func = (f),							\
188 	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
189 	}
190 
191 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
192 	.work = __WORK_INITIALIZER((n).work, (f)),			\
193 	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
194 				     (tflags) | TIMER_IRQSAFE),		\
195 	}
196 
197 #define DECLARE_WORK(n, f)						\
198 	struct work_struct n = __WORK_INITIALIZER(n, f)
199 
200 #define DECLARE_DELAYED_WORK(n, f)					\
201 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
202 
203 #define DECLARE_DEFERRABLE_WORK(n, f)					\
204 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
205 
206 #ifdef CONFIG_DEBUG_OBJECTS_WORK
207 extern void __init_work(struct work_struct *work, int onstack);
208 extern void destroy_work_on_stack(struct work_struct *work);
209 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)210 static inline unsigned int work_static(struct work_struct *work)
211 {
212 	return *work_data_bits(work) & WORK_STRUCT_STATIC;
213 }
214 #else
__init_work(struct work_struct * work,int onstack)215 static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)216 static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)217 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)218 static inline unsigned int work_static(struct work_struct *work) { return 0; }
219 #endif
220 
221 /*
222  * initialize all of a work item in one go
223  *
224  * NOTE! No point in using "atomic_long_set()": using a direct
225  * assignment of the work data initializer allows the compiler
226  * to generate better code.
227  */
228 #ifdef CONFIG_LOCKDEP
229 #define __INIT_WORK(_work, _func, _onstack)				\
230 	do {								\
231 		static struct lock_class_key __key;			\
232 									\
233 		__init_work((_work), _onstack);				\
234 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
235 		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
236 		INIT_LIST_HEAD(&(_work)->entry);			\
237 		(_work)->func = (_func);				\
238 	} while (0)
239 #else
240 #define __INIT_WORK(_work, _func, _onstack)				\
241 	do {								\
242 		__init_work((_work), _onstack);				\
243 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
244 		INIT_LIST_HEAD(&(_work)->entry);			\
245 		(_work)->func = (_func);				\
246 	} while (0)
247 #endif
248 
249 #define INIT_WORK(_work, _func)						\
250 	__INIT_WORK((_work), (_func), 0)
251 
252 #define INIT_WORK_ONSTACK(_work, _func)					\
253 	__INIT_WORK((_work), (_func), 1)
254 
255 #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
256 	do {								\
257 		INIT_WORK(&(_work)->work, (_func));			\
258 		__init_timer(&(_work)->timer,				\
259 			     delayed_work_timer_fn,			\
260 			     (_tflags) | TIMER_IRQSAFE);		\
261 	} while (0)
262 
263 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
264 	do {								\
265 		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
266 		__init_timer_on_stack(&(_work)->timer,			\
267 				      delayed_work_timer_fn,		\
268 				      (_tflags) | TIMER_IRQSAFE);	\
269 	} while (0)
270 
271 #define INIT_DELAYED_WORK(_work, _func)					\
272 	__INIT_DELAYED_WORK(_work, _func, 0)
273 
274 #define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
275 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
276 
277 #define INIT_DEFERRABLE_WORK(_work, _func)				\
278 	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
279 
280 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
281 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
282 
283 #define INIT_RCU_WORK(_work, _func)					\
284 	INIT_WORK(&(_work)->work, (_func))
285 
286 #define INIT_RCU_WORK_ONSTACK(_work, _func)				\
287 	INIT_WORK_ONSTACK(&(_work)->work, (_func))
288 
289 /**
290  * work_pending - Find out whether a work item is currently pending
291  * @work: The work item in question
292  */
293 #define work_pending(work) \
294 	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
295 
296 /**
297  * delayed_work_pending - Find out whether a delayable work item is currently
298  * pending
299  * @w: The work item in question
300  */
301 #define delayed_work_pending(w) \
302 	work_pending(&(w)->work)
303 
304 /*
305  * Workqueue flags and constants.  For details, please refer to
306  * Documentation/core-api/workqueue.rst.
307  */
308 enum {
309 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
310 	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
311 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
312 	WQ_HIGHPRI		= 1 << 4, /* high priority */
313 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
314 	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
315 
316 	/*
317 	 * Per-cpu workqueues are generally preferred because they tend to
318 	 * show better performance thanks to cache locality.  Per-cpu
319 	 * workqueues exclude the scheduler from choosing the CPU to
320 	 * execute the worker threads, which has an unfortunate side effect
321 	 * of increasing power consumption.
322 	 *
323 	 * The scheduler considers a CPU idle if it doesn't have any task
324 	 * to execute and tries to keep idle cores idle to conserve power;
325 	 * however, for example, a per-cpu work item scheduled from an
326 	 * interrupt handler on an idle CPU will force the scheduler to
327 	 * excute the work item on that CPU breaking the idleness, which in
328 	 * turn may lead to more scheduling choices which are sub-optimal
329 	 * in terms of power consumption.
330 	 *
331 	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
332 	 * but become unbound if workqueue.power_efficient kernel param is
333 	 * specified.  Per-cpu workqueues which are identified to
334 	 * contribute significantly to power-consumption are identified and
335 	 * marked with this flag and enabling the power_efficient mode
336 	 * leads to noticeable power saving at the cost of small
337 	 * performance disadvantage.
338 	 *
339 	 * http://thread.gmane.org/gmane.linux.kernel/1480396
340 	 */
341 	WQ_POWER_EFFICIENT	= 1 << 7,
342 
343 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
344 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
345 	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
346 	__WQ_ORDERED_EXPLICIT	= 1 << 19, /* internal: alloc_ordered_workqueue() */
347 
348 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
349 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
350 	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
351 };
352 
353 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
354 #define WQ_UNBOUND_MAX_ACTIVE	\
355 	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
356 
357 /*
358  * System-wide workqueues which are always present.
359  *
360  * system_wq is the one used by schedule[_delayed]_work[_on]().
361  * Multi-CPU multi-threaded.  There are users which expect relatively
362  * short queue flush time.  Don't queue works which can run for too
363  * long.
364  *
365  * system_highpri_wq is similar to system_wq but for work items which
366  * require WQ_HIGHPRI.
367  *
368  * system_long_wq is similar to system_wq but may host long running
369  * works.  Queue flushing might take relatively long.
370  *
371  * system_unbound_wq is unbound workqueue.  Workers are not bound to
372  * any specific CPU, not concurrency managed, and all queued works are
373  * executed immediately as long as max_active limit is not reached and
374  * resources are available.
375  *
376  * system_freezable_wq is equivalent to system_wq except that it's
377  * freezable.
378  *
379  * *_power_efficient_wq are inclined towards saving power and converted
380  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
381  * they are same as their non-power-efficient counterparts - e.g.
382  * system_power_efficient_wq is identical to system_wq if
383  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
384  */
385 extern struct workqueue_struct *system_wq;
386 extern struct workqueue_struct *system_highpri_wq;
387 extern struct workqueue_struct *system_long_wq;
388 extern struct workqueue_struct *system_unbound_wq;
389 extern struct workqueue_struct *system_freezable_wq;
390 extern struct workqueue_struct *system_power_efficient_wq;
391 extern struct workqueue_struct *system_freezable_power_efficient_wq;
392 
393 extern struct workqueue_struct *
394 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
395 	struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
396 
397 /**
398  * alloc_workqueue - allocate a workqueue
399  * @fmt: printf format for the name of the workqueue
400  * @flags: WQ_* flags
401  * @max_active: max in-flight work items, 0 for default
402  * @args...: args for @fmt
403  *
404  * Allocate a workqueue with the specified parameters.  For detailed
405  * information on WQ_* flags, please refer to
406  * Documentation/core-api/workqueue.rst.
407  *
408  * The __lock_name macro dance is to guarantee that single lock_class_key
409  * doesn't end up with different namesm, which isn't allowed by lockdep.
410  *
411  * RETURNS:
412  * Pointer to the allocated workqueue on success, %NULL on failure.
413  */
414 #ifdef CONFIG_LOCKDEP
415 #define alloc_workqueue(fmt, flags, max_active, args...)		\
416 ({									\
417 	static struct lock_class_key __key;				\
418 	const char *__lock_name;					\
419 									\
420 	__lock_name = "(wq_completion)"#fmt#args;			\
421 									\
422 	__alloc_workqueue_key((fmt), (flags), (max_active),		\
423 			      &__key, __lock_name, ##args);		\
424 })
425 #else
426 #define alloc_workqueue(fmt, flags, max_active, args...)		\
427 	__alloc_workqueue_key((fmt), (flags), (max_active),		\
428 			      NULL, NULL, ##args)
429 #endif
430 
431 /**
432  * alloc_ordered_workqueue - allocate an ordered workqueue
433  * @fmt: printf format for the name of the workqueue
434  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
435  * @args...: args for @fmt
436  *
437  * Allocate an ordered workqueue.  An ordered workqueue executes at
438  * most one work item at any given time in the queued order.  They are
439  * implemented as unbound workqueues with @max_active of one.
440  *
441  * RETURNS:
442  * Pointer to the allocated workqueue on success, %NULL on failure.
443  */
444 #define alloc_ordered_workqueue(fmt, flags, args...)			\
445 	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |		\
446 			__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
447 
448 #define create_workqueue(name)						\
449 	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
450 #define create_freezable_workqueue(name)				\
451 	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
452 			WQ_MEM_RECLAIM, 1, (name))
453 #define create_singlethread_workqueue(name)				\
454 	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
455 
456 extern void destroy_workqueue(struct workqueue_struct *wq);
457 
458 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
459 void free_workqueue_attrs(struct workqueue_attrs *attrs);
460 int apply_workqueue_attrs(struct workqueue_struct *wq,
461 			  const struct workqueue_attrs *attrs);
462 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
463 
464 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
465 			struct work_struct *work);
466 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
467 			struct delayed_work *work, unsigned long delay);
468 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
469 			struct delayed_work *dwork, unsigned long delay);
470 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
471 
472 extern void flush_workqueue(struct workqueue_struct *wq);
473 extern void drain_workqueue(struct workqueue_struct *wq);
474 
475 extern int schedule_on_each_cpu(work_func_t func);
476 
477 int execute_in_process_context(work_func_t fn, struct execute_work *);
478 
479 extern bool flush_work(struct work_struct *work);
480 extern bool cancel_work_sync(struct work_struct *work);
481 
482 extern bool flush_delayed_work(struct delayed_work *dwork);
483 extern bool cancel_delayed_work(struct delayed_work *dwork);
484 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
485 
486 extern bool flush_rcu_work(struct rcu_work *rwork);
487 
488 extern void workqueue_set_max_active(struct workqueue_struct *wq,
489 				     int max_active);
490 extern struct work_struct *current_work(void);
491 extern bool current_is_workqueue_rescuer(void);
492 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
493 extern unsigned int work_busy(struct work_struct *work);
494 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
495 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
496 extern void show_workqueue_state(void);
497 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
498 
499 /**
500  * queue_work - queue work on a workqueue
501  * @wq: workqueue to use
502  * @work: work to queue
503  *
504  * Returns %false if @work was already on a queue, %true otherwise.
505  *
506  * We queue the work to the CPU on which it was submitted, but if the CPU dies
507  * it can be processed by another CPU.
508  */
queue_work(struct workqueue_struct * wq,struct work_struct * work)509 static inline bool queue_work(struct workqueue_struct *wq,
510 			      struct work_struct *work)
511 {
512 	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
513 }
514 
515 /**
516  * queue_delayed_work - queue work on a workqueue after delay
517  * @wq: workqueue to use
518  * @dwork: delayable work to queue
519  * @delay: number of jiffies to wait before queueing
520  *
521  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
522  */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)523 static inline bool queue_delayed_work(struct workqueue_struct *wq,
524 				      struct delayed_work *dwork,
525 				      unsigned long delay)
526 {
527 	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
528 }
529 
530 /**
531  * mod_delayed_work - modify delay of or queue a delayed work
532  * @wq: workqueue to use
533  * @dwork: work to queue
534  * @delay: number of jiffies to wait before queueing
535  *
536  * mod_delayed_work_on() on local CPU.
537  */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)538 static inline bool mod_delayed_work(struct workqueue_struct *wq,
539 				    struct delayed_work *dwork,
540 				    unsigned long delay)
541 {
542 	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
543 }
544 
545 /**
546  * schedule_work_on - put work task on a specific cpu
547  * @cpu: cpu to put the work task on
548  * @work: job to be done
549  *
550  * This puts a job on a specific cpu
551  */
schedule_work_on(int cpu,struct work_struct * work)552 static inline bool schedule_work_on(int cpu, struct work_struct *work)
553 {
554 	return queue_work_on(cpu, system_wq, work);
555 }
556 
557 /**
558  * schedule_work - put work task in global workqueue
559  * @work: job to be done
560  *
561  * Returns %false if @work was already on the kernel-global workqueue and
562  * %true otherwise.
563  *
564  * This puts a job in the kernel-global workqueue if it was not already
565  * queued and leaves it in the same position on the kernel-global
566  * workqueue otherwise.
567  */
schedule_work(struct work_struct * work)568 static inline bool schedule_work(struct work_struct *work)
569 {
570 	return queue_work(system_wq, work);
571 }
572 
573 /**
574  * flush_scheduled_work - ensure that any scheduled work has run to completion.
575  *
576  * Forces execution of the kernel-global workqueue and blocks until its
577  * completion.
578  *
579  * Think twice before calling this function!  It's very easy to get into
580  * trouble if you don't take great care.  Either of the following situations
581  * will lead to deadlock:
582  *
583  *	One of the work items currently on the workqueue needs to acquire
584  *	a lock held by your code or its caller.
585  *
586  *	Your code is running in the context of a work routine.
587  *
588  * They will be detected by lockdep when they occur, but the first might not
589  * occur very often.  It depends on what work items are on the workqueue and
590  * what locks they need, which you have no control over.
591  *
592  * In most situations flushing the entire workqueue is overkill; you merely
593  * need to know that a particular work item isn't queued and isn't running.
594  * In such cases you should use cancel_delayed_work_sync() or
595  * cancel_work_sync() instead.
596  */
flush_scheduled_work(void)597 static inline void flush_scheduled_work(void)
598 {
599 	flush_workqueue(system_wq);
600 }
601 
602 /**
603  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
604  * @cpu: cpu to use
605  * @dwork: job to be done
606  * @delay: number of jiffies to wait
607  *
608  * After waiting for a given time this puts a job in the kernel-global
609  * workqueue on the specified CPU.
610  */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)611 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
612 					    unsigned long delay)
613 {
614 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
615 }
616 
617 /**
618  * schedule_delayed_work - put work task in global workqueue after delay
619  * @dwork: job to be done
620  * @delay: number of jiffies to wait or 0 for immediate execution
621  *
622  * After waiting for a given time this puts a job in the kernel-global
623  * workqueue.
624  */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)625 static inline bool schedule_delayed_work(struct delayed_work *dwork,
626 					 unsigned long delay)
627 {
628 	return queue_delayed_work(system_wq, dwork, delay);
629 }
630 
631 #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)632 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
633 {
634 	return fn(arg);
635 }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)636 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
637 {
638 	return fn(arg);
639 }
640 #else
641 long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
642 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
643 #endif /* CONFIG_SMP */
644 
645 #ifdef CONFIG_FREEZER
646 extern void freeze_workqueues_begin(void);
647 extern bool freeze_workqueues_busy(void);
648 extern void thaw_workqueues(void);
649 #endif /* CONFIG_FREEZER */
650 
651 #ifdef CONFIG_SYSFS
652 int workqueue_sysfs_register(struct workqueue_struct *wq);
653 #else	/* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)654 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
655 { return 0; }
656 #endif	/* CONFIG_SYSFS */
657 
658 #ifdef CONFIG_WQ_WATCHDOG
659 void wq_watchdog_touch(int cpu);
660 #else	/* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)661 static inline void wq_watchdog_touch(int cpu) { }
662 #endif	/* CONFIG_WQ_WATCHDOG */
663 
664 #ifdef CONFIG_SMP
665 int workqueue_prepare_cpu(unsigned int cpu);
666 int workqueue_online_cpu(unsigned int cpu);
667 int workqueue_offline_cpu(unsigned int cpu);
668 #endif
669 
670 int __init workqueue_init_early(void);
671 int __init workqueue_init(void);
672 
673 #endif
674