Lines Matching +full:power +full:- +full:on +full:- +full:delay
1 /* SPDX-License-Identifier: GPL-2.0 */
3 * workqueue.h --- work queue handling for Linux.
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
67 /* data contains off-queue information when !WORK_STRUCT_PWQ */
75 * pool it was on. Cap at 31 bits and use the highest number to
80 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
82 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
85 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
114 /* target workqueue and CPU ->timer uses to queue ->work */
123 /* target workqueue ->rcu uses to queue ->work */
128 * struct workqueue_attrs - A struct for workqueue attributes.
229 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
230 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
231 INIT_LIST_HEAD(&(_work)->entry); \
232 (_work)->func = (_func); \
238 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
239 INIT_LIST_HEAD(&(_work)->entry); \
240 (_work)->func = (_func); \
252 INIT_WORK(&(_work)->work, (_func)); \
253 __init_timer(&(_work)->timer, \
260 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
261 __init_timer_on_stack(&(_work)->timer, \
279 INIT_WORK(&(_work)->work, (_func))
282 INIT_WORK_ONSTACK(&(_work)->work, (_func))
285 * work_pending - Find out whether a work item is currently pending
292 * delayed_work_pending - Find out whether a delayable work item is currently
297 work_pending(&(w)->work)
301 * Documentation/core-api/workqueue.rst.
312 * Per-cpu workqueues are generally preferred because they tend to
313 * show better performance thanks to cache locality. Per-cpu
316 * of increasing power consumption.
319 * to execute and tries to keep idle cores idle to conserve power;
320 * however, for example, a per-cpu work item scheduled from an
321 * interrupt handler on an idle CPU will force the scheduler to
322 * execute the work item on that CPU breaking the idleness, which in
323 * turn may lead to more scheduling choices which are sub-optimal
324 * in terms of power consumption.
326 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
328 * specified. Per-cpu workqueues which are identified to
329 * contribute significantly to power-consumption are identified and
331 * leads to noticeable power saving at the cost of small
348 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
353 * System-wide workqueues which are always present.
356 * Multi-CPU multi-threaded. There are users which expect relatively
374 * *_power_efficient_wq are inclined towards saving power and converted
376 * they are same as their non-power-efficient counterparts - e.g.
389 * alloc_workqueue - allocate a workqueue
392 * @max_active: max in-flight work items, 0 for default
396 * information on WQ_* flags, please refer to
397 * Documentation/core-api/workqueue.rst.
400 * Pointer to the allocated workqueue on success, %NULL on failure.
406 * alloc_ordered_workqueue - allocate an ordered workqueue
416 * Pointer to the allocated workqueue on success, %NULL on failure.
443 struct delayed_work *work, unsigned long delay);
445 struct delayed_work *dwork, unsigned long delay);
476 * queue_work - queue work on a workqueue
480 * Returns %false if @work was already on a queue, %true otherwise.
482 * We queue the work to the CPU on which it was submitted, but if the CPU dies
485 * Memory-ordering properties: If it returns %true, guarantees that all stores
505 * queue_delayed_work - queue work on a workqueue after delay
508 * @delay: number of jiffies to wait before queueing
514 unsigned long delay) in queue_delayed_work() argument
516 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); in queue_delayed_work()
520 * mod_delayed_work - modify delay of or queue a delayed work
523 * @delay: number of jiffies to wait before queueing
525 * mod_delayed_work_on() on local CPU.
529 unsigned long delay) in mod_delayed_work() argument
531 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); in mod_delayed_work()
535 * schedule_work_on - put work task on a specific cpu
536 * @cpu: cpu to put the work task on
539 * This puts a job on a specific cpu
547 * schedule_work - put work task in global workqueue
550 * Returns %false if @work was already on the kernel-global workqueue and
553 * This puts a job in the kernel-global workqueue if it was not already
554 * queued and leaves it in the same position on the kernel-global
557 * Shares the same memory-ordering properties of queue_work(), cf. the
566 * flush_scheduled_work - ensure that any scheduled work has run to completion.
568 * Forces execution of the kernel-global workqueue and blocks until its
575 * One of the work items currently on the workqueue needs to acquire
581 * occur very often. It depends on what work items are on the workqueue and
595 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
598 * @delay: number of jiffies to wait
600 * After waiting for a given time this puts a job in the kernel-global
601 * workqueue on the specified CPU.
604 unsigned long delay) in schedule_delayed_work_on() argument
606 return queue_delayed_work_on(cpu, system_wq, dwork, delay); in schedule_delayed_work_on()
610 * schedule_delayed_work - put work task in global workqueue after delay
612 * @delay: number of jiffies to wait or 0 for immediate execution
614 * After waiting for a given time this puts a job in the kernel-global
618 unsigned long delay) in schedule_delayed_work() argument
620 return queue_delayed_work(system_wq, dwork, delay); in schedule_delayed_work()