Lines Matching +full:locality +full:- +full:specific

1 /* SPDX-License-Identifier: GPL-2.0 */
3 * workqueue.h --- work queue handling for Linux.
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
58 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
72 /* data contains off-queue information when !WORK_STRUCT_PWQ */
85 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
87 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
90 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
119 /* target workqueue and CPU ->timer uses to queue ->work */
128 /* target workqueue ->rcu uses to queue ->work */
133 * struct workqueue_attrs - A struct for workqueue attributes.
234 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
235 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
236 INIT_LIST_HEAD(&(_work)->entry); \
237 (_work)->func = (_func); \
243 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
244 INIT_LIST_HEAD(&(_work)->entry); \
245 (_work)->func = (_func); \
257 INIT_WORK(&(_work)->work, (_func)); \
258 __init_timer(&(_work)->timer, \
265 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
266 __init_timer_on_stack(&(_work)->timer, \
284 INIT_WORK(&(_work)->work, (_func))
287 INIT_WORK_ONSTACK(&(_work)->work, (_func))
290 * work_pending - Find out whether a work item is currently pending
297 * delayed_work_pending - Find out whether a delayable work item is currently
302 work_pending(&(w)->work)
306 * Documentation/core-api/workqueue.rst.
317 * Per-cpu workqueues are generally preferred because they tend to
318 * show better performance thanks to cache locality. Per-cpu
325 * however, for example, a per-cpu work item scheduled from an
328 * turn may lead to more scheduling choices which are sub-optimal
331 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
333 * specified. Per-cpu workqueues which are identified to
334 * contribute significantly to power-consumption are identified and
353 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
358 * System-wide workqueues which are always present.
361 * Multi-CPU multi-threaded. There are users which expect relatively
372 * any specific CPU, not concurrency managed, and all queued works are
381 * they are same as their non-power-efficient counterparts - e.g.
394 * alloc_workqueue - allocate a workqueue
397 * @max_active: max in-flight work items, 0 for default
402 * Documentation/core-api/workqueue.rst.
412 * alloc_ordered_workqueue - allocate an ordered workqueue
482 * queue_work - queue work on a workqueue
491 * Memory-ordering properties: If it returns %true, guarantees that all stores
511 * queue_delayed_work - queue work on a workqueue after delay
526 * mod_delayed_work - modify delay of or queue a delayed work
541 * schedule_work_on - put work task on a specific cpu
545 * This puts a job on a specific cpu
553 * schedule_work - put work task in global workqueue
556 * Returns %false if @work was already on the kernel-global workqueue and
559 * This puts a job in the kernel-global workqueue if it was not already
560 * queued and leaves it in the same position on the kernel-global
563 * Shares the same memory-ordering properties of queue_work(), cf. the
572 * flush_scheduled_work - ensure that any scheduled work has run to completion.
574 * Forces execution of the kernel-global workqueue and blocks until its
601 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
606 * After waiting for a given time this puts a job in the kernel-global
616 * schedule_delayed_work - put work task in global workqueue after delay
620 * After waiting for a given time this puts a job in the kernel-global