/Linux-v5.10/kernel/ |
D | kthread.c | 643 void __kthread_init_worker(struct kthread_worker *worker, in __kthread_init_worker() argument 647 memset(worker, 0, sizeof(struct kthread_worker)); in __kthread_init_worker() 648 raw_spin_lock_init(&worker->lock); in __kthread_init_worker() 649 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker() 650 INIT_LIST_HEAD(&worker->work_list); in __kthread_init_worker() 651 INIT_LIST_HEAD(&worker->delayed_work_list); in __kthread_init_worker() 672 struct kthread_worker *worker = worker_ptr; in kthread_worker_fn() local 679 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn() 680 worker->task = current; in kthread_worker_fn() 682 if (worker->flags & KTW_FREEZABLE) in kthread_worker_fn() [all …]
|
D | workqueue.c | 169 struct worker *manager; /* L: purely informational */ 251 struct worker *rescuer; /* MD: rescue worker */ 407 #define for_each_pool_worker(worker, pool) \ argument 408 list_for_each_entry((worker), &(pool)->workers, node) \ 815 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() 820 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker() 834 struct worker *worker = first_idle_worker(pool); in wake_up_worker() local 836 if (likely(worker)) in wake_up_worker() 837 wake_up_process(worker->task); in wake_up_worker() 848 struct worker *worker = kthread_data(task); in wq_worker_running() local [all …]
|
D | async.c | 330 struct worker *worker = current_wq_worker(); in current_is_async() local 332 return worker && worker->current_func == async_run_entry_fn; in current_is_async()
|
D | workqueue_internal.h | 24 struct worker { struct 65 static inline struct worker *current_wq_worker(void) in current_wq_worker()
|
/Linux-v5.10/fs/ |
D | io-wq.c | 135 static bool io_worker_get(struct io_worker *worker) in io_worker_get() argument 137 return refcount_inc_not_zero(&worker->ref); in io_worker_get() 140 static void io_worker_release(struct io_worker *worker) in io_worker_release() argument 142 if (refcount_dec_and_test(&worker->ref)) in io_worker_release() 143 wake_up_process(worker->task); in io_worker_release() 151 static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) in __io_worker_unuse() argument 155 if (worker->saved_creds) { in __io_worker_unuse() 156 revert_creds(worker->saved_creds); in __io_worker_unuse() 157 worker->cur_creds = worker->saved_creds = NULL; in __io_worker_unuse() 160 if (current->files != worker->restore_files) { in __io_worker_unuse() [all …]
|
/Linux-v5.10/include/linux/ |
D | kthread.h | 101 struct kthread_worker *worker; member 111 #define KTHREAD_WORKER_INIT(worker) { \ argument 112 .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ 113 .work_list = LIST_HEAD_INIT((worker).work_list), \ 114 .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ 128 #define DEFINE_KTHREAD_WORKER(worker) \ argument 129 struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) 143 # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ argument 144 ({ kthread_init_worker(&worker); worker; }) 145 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ argument [all …]
|
/Linux-v5.10/tools/perf/bench/ |
D | futex-hash.c | 46 struct worker { struct 70 struct worker *w = (struct worker *) arg; in workerfn() argument 127 struct worker *worker = NULL; in bench_futex_hash() local 148 worker = calloc(nthreads, sizeof(*worker)); in bench_futex_hash() 149 if (!worker) in bench_futex_hash() 167 worker[i].tid = i; in bench_futex_hash() 168 worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex)); in bench_futex_hash() 169 if (!worker[i].futex) in bench_futex_hash() 179 ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, in bench_futex_hash() 180 (void *)(struct worker *) &worker[i]); in bench_futex_hash() [all …]
|
D | futex-lock-pi.c | 26 struct worker { struct 34 static struct worker *worker; argument 81 struct worker *w = (struct worker *) arg; in workerfn() 118 static void create_threads(struct worker *w, pthread_attr_t thread_attr, in create_threads() 127 worker[i].tid = i; in create_threads() 130 worker[i].futex = calloc(1, sizeof(u_int32_t)); in create_threads() 131 if (!worker[i].futex) in create_threads() 134 worker[i].futex = &global_futex; in create_threads() 142 if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) in create_threads() 171 worker = calloc(nthreads, sizeof(*worker)); in bench_futex_lock_pi() [all …]
|
D | epoll-wait.c | 118 struct worker { struct 186 struct worker *w = (struct worker *) arg; in workerfn() 240 static void nest_epollfd(struct worker *w) in nest_epollfd() 292 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) in do_threads() argument 311 struct worker *w = &worker[i]; in do_threads() 356 (void *)(struct worker *) w); in do_threads() 369 struct worker *worker = p; in writerfn() local 381 shuffle((void *)worker, nthreads, sizeof(*worker)); in writerfn() 385 struct worker *w = &worker[i]; in writerfn() 408 struct worker *w1 = (struct worker *) p1; in cmpworker() [all …]
|
D | epoll-ctl.c | 67 struct worker { struct 132 static inline void do_epoll_op(struct worker *w, int op, int fd) in do_epoll_op() 160 static inline void do_random_epoll_op(struct worker *w) in do_random_epoll_op() 174 struct worker *w = (struct worker *) arg; in workerfn() 204 static void init_fdmaps(struct worker *w, int pct) in init_fdmaps() 223 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) in do_threads() argument 234 struct worker *w = &worker[i]; in do_threads() 267 (void *)(struct worker *) w); in do_threads() 304 struct worker *worker = NULL; in bench_epoll_ctl() local 339 worker = calloc(nthreads, sizeof(*worker)); in bench_epoll_ctl() [all …]
|
D | futex-wake.c | 41 pthread_t *worker; variable 147 worker = calloc(nthreads, sizeof(*worker)); in bench_futex_wake() 148 if (!worker) in bench_futex_wake() 170 block_threads(worker, thread_attr, cpu); in bench_futex_wake() 197 ret = pthread_join(worker[i], NULL); in bench_futex_wake() 212 free(worker); in bench_futex_wake()
|
D | futex-requeue.c | 40 static pthread_t *worker; variable 139 worker = calloc(nthreads, sizeof(*worker)); in bench_futex_requeue() 140 if (!worker) in bench_futex_requeue() 165 block_threads(worker, thread_attr, cpu); in bench_futex_requeue() 204 ret = pthread_join(worker[i], NULL); in bench_futex_requeue() 218 free(worker); in bench_futex_requeue()
|
/Linux-v5.10/drivers/net/wireguard/ |
D | queueing.c | 12 struct multicore_worker __percpu *worker = in wg_packet_percpu_multicore_worker_alloc() local 15 if (!worker) in wg_packet_percpu_multicore_worker_alloc() 19 per_cpu_ptr(worker, cpu)->ptr = ptr; in wg_packet_percpu_multicore_worker_alloc() 20 INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); in wg_packet_percpu_multicore_worker_alloc() 22 return worker; in wg_packet_percpu_multicore_worker_alloc() 36 queue->worker = wg_packet_percpu_multicore_worker_alloc( in wg_packet_queue_init() 38 if (!queue->worker) { in wg_packet_queue_init() 52 free_percpu(queue->worker); in wg_packet_queue_free()
|
/Linux-v5.10/drivers/gpu/drm/ |
D | drm_vblank_work.c | 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 147 ret = kthread_queue_work(vblank->worker, &work->base); in drm_vblank_work_schedule() 251 struct kthread_worker *worker; in drm_vblank_worker_init() local 255 worker = kthread_create_worker(0, "card%d-crtc%d", in drm_vblank_worker_init() 258 if (IS_ERR(worker)) in drm_vblank_worker_init() 259 return PTR_ERR(worker); in drm_vblank_worker_init() 261 vblank->worker = worker; in drm_vblank_worker_init() 263 sched_set_fifo(worker->task); in drm_vblank_worker_init()
|
D | drm_flip_work.c | 110 queue_work(wq, &work->worker); in drm_flip_work_commit() 116 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); in flip_worker() 156 INIT_WORK(&work->worker, flip_worker); in drm_flip_work_init()
|
/Linux-v5.10/Documentation/core-api/ |
D | workqueue.rst | 20 queue is called workqueue and the thread is called worker. 22 While there are work items on the workqueue the worker executes the 24 there is no work item left on the workqueue the worker becomes idle. 25 When a new work item gets queued, the worker begins executing again. 32 worker thread per CPU and a single threaded (ST) wq had one worker 42 worker pool. An MT wq could provide only one execution context per CPU 60 * Use per-CPU unified worker pools shared by all wq to provide 64 * Automatically regulate worker pool and level of concurrency so that 80 Special purpose threads, called worker threads, execute the functions 82 worker threads become idle. These worker threads are managed in so [all …]
|
/Linux-v5.10/samples/seccomp/ |
D | user-trap.c | 204 pid_t worker = 0 , tracer = 0; in main() local 211 worker = fork(); in main() 212 if (worker < 0) { in main() 217 if (worker == 0) { in main() 343 if (waitpid(worker, &status, 0) != worker) { in main() 368 if (worker > 0) in main() 369 kill(worker, SIGKILL); in main()
|
/Linux-v5.10/drivers/thermal/intel/ |
D | intel_powerclamp.c | 74 struct kthread_worker *worker; member 402 kthread_queue_delayed_work(w_data->worker, in clamp_balancing_func() 434 kthread_queue_work(w_data->worker, &w_data->balancing_work); in clamp_idle_injection_func() 477 struct kthread_worker *worker; in start_power_clamp_worker() local 479 worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu); in start_power_clamp_worker() 480 if (IS_ERR(worker)) in start_power_clamp_worker() 483 w_data->worker = worker; in start_power_clamp_worker() 488 sched_set_fifo(worker->task); in start_power_clamp_worker() 492 kthread_queue_work(w_data->worker, &w_data->balancing_work); in start_power_clamp_worker() 499 if (!w_data->worker) in stop_power_clamp_worker() [all …]
|
/Linux-v5.10/drivers/i2c/ |
D | i2c-slave-testunit.c | 42 struct delayed_work worker; member 47 struct testunit_data *tu = container_of(work, struct testunit_data, worker.work); in i2c_slave_testunit_work() 115 queue_delayed_work(system_long_wq, &tu->worker, in i2c_slave_testunit_slave_cb() 143 INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work); in i2c_slave_testunit_probe() 152 cancel_delayed_work_sync(&tu->worker); in i2c_slave_testunit_remove()
|
/Linux-v5.10/drivers/hid/ |
D | hid-bigbenff.c | 184 struct work_struct worker; member 191 struct bigben_device, worker); in bigben_worker() 248 schedule_work(&bigben->worker); in hid_bigben_play_effect() 280 schedule_work(&bigben->worker); in bigben_set_led() 312 cancel_work_sync(&bigben->worker); in bigben_remove() 353 INIT_WORK(&bigben->worker, bigben_worker); in bigben_probe() 394 schedule_work(&bigben->worker); in bigben_probe()
|
/Linux-v5.10/drivers/crypto/caam/ |
D | caamrng.c | 41 struct work_struct worker; member 141 worker); in caam_rng_worker() 159 schedule_work(&ctx->worker); in caam_read() 168 flush_work(&ctx->worker); in caam_cleanup() 192 INIT_WORK(&ctx->worker, caam_rng_worker); in caam_init()
|
/Linux-v5.10/tools/testing/selftests/bpf/prog_tests/ |
D | send_signal_sched_switch.c | 18 static void *worker(void *p) in worker() function 49 err = pthread_create(threads + i, NULL, worker, NULL); in test_send_signal_sched_switch()
|
/Linux-v5.10/tools/testing/selftests/powerpc/tm/ |
D | tm-vmx-unavail.c | 27 void *worker(void *unused) in worker() function 104 pthread_create(&thread[i], NULL, &worker, NULL); in tm_vmx_unavail_test()
|
/Linux-v5.10/drivers/platform/olpc/ |
D | olpc-ec.c | 36 struct work_struct worker; member 80 struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker); in olpc_ec_worker() 106 schedule_work(&ec->worker); in olpc_ec_worker() 124 schedule_work(&ec->worker); in queue_ec_descriptor() 418 INIT_WORK(&ec->worker, olpc_ec_worker); in olpc_ec_probe()
|
/Linux-v5.10/drivers/macintosh/ams/ |
D | ams-core.c | 65 schedule_work(&ams_info.worker); in ams_handle_irq() 185 INIT_WORK(&ams_info.worker, ams_worker); in ams_init() 216 flush_work(&ams_info.worker); in ams_sensor_detach()
|