/Linux-v4.19/kernel/ |
D | kthread.c | 597 void __kthread_init_worker(struct kthread_worker *worker, in __kthread_init_worker() argument 601 memset(worker, 0, sizeof(struct kthread_worker)); in __kthread_init_worker() 602 spin_lock_init(&worker->lock); in __kthread_init_worker() 603 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker() 604 INIT_LIST_HEAD(&worker->work_list); in __kthread_init_worker() 605 INIT_LIST_HEAD(&worker->delayed_work_list); in __kthread_init_worker() 626 struct kthread_worker *worker = worker_ptr; in kthread_worker_fn() local 633 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn() 634 worker->task = current; in kthread_worker_fn() 636 if (worker->flags & KTW_FREEZABLE) in kthread_worker_fn() [all …]
|
D | workqueue.c | 168 struct worker *manager; /* L: purely informational */ 250 struct worker *rescuer; /* I: rescue worker */ 407 #define for_each_pool_worker(worker, pool) \ argument 408 list_for_each_entry((worker), &(pool)->workers, node) \ 816 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() 821 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker() 835 struct worker *worker = first_idle_worker(pool); in wake_up_worker() local 837 if (likely(worker)) in wake_up_worker() 838 wake_up_process(worker->task); in wake_up_worker() 854 struct worker *worker = kthread_data(task); in wq_worker_waking_up() local [all …]
|
D | workqueue_internal.h | 24 struct worker { struct 61 static inline struct worker *current_wq_worker(void) in current_wq_worker()
|
D | async.c | 329 struct worker *worker = current_wq_worker(); in current_is_async() local 331 return worker && worker->current_func == async_run_entry_fn; in current_is_async()
|
/Linux-v4.19/include/linux/ |
D | kthread.h | 98 struct kthread_worker *worker; member 108 #define KTHREAD_WORKER_INIT(worker) { \ argument 109 .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ 110 .work_list = LIST_HEAD_INIT((worker).work_list), \ 111 .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ 125 #define DEFINE_KTHREAD_WORKER(worker) \ argument 126 struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) 140 # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ argument 141 ({ kthread_init_worker(&worker); worker; }) 142 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ argument [all …]
|
/Linux-v4.19/tools/perf/bench/ |
D | futex-hash.c | 44 struct worker { struct 68 struct worker *w = (struct worker *) arg; in workerfn() argument 125 struct worker *worker = NULL; in bench_futex_hash() local 145 worker = calloc(nthreads, sizeof(*worker)); in bench_futex_hash() 146 if (!worker) in bench_futex_hash() 164 worker[i].tid = i; in bench_futex_hash() 165 worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex)); in bench_futex_hash() 166 if (!worker[i].futex) in bench_futex_hash() 176 ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, in bench_futex_hash() 177 (void *)(struct worker *) &worker[i]); in bench_futex_hash() [all …]
|
D | futex-lock-pi.c | 24 struct worker { struct 32 static struct worker *worker; variable 80 struct worker *w = (struct worker *) arg; in workerfn() 117 static void create_threads(struct worker *w, pthread_attr_t thread_attr, in create_threads() 126 worker[i].tid = i; in create_threads() 129 worker[i].futex = calloc(1, sizeof(u_int32_t)); in create_threads() 130 if (!worker[i].futex) in create_threads() 133 worker[i].futex = &global_futex; in create_threads() 141 if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) in create_threads() 169 worker = calloc(nthreads, sizeof(*worker)); in bench_futex_lock_pi() [all …]
|
D | futex-wake.c | 40 pthread_t *worker; variable 145 worker = calloc(nthreads, sizeof(*worker)); in bench_futex_wake() 146 if (!worker) in bench_futex_wake() 168 block_threads(worker, thread_attr, cpu); in bench_futex_wake() 195 ret = pthread_join(worker[i], NULL); in bench_futex_wake() 210 free(worker); in bench_futex_wake()
|
D | futex-requeue.c | 39 static pthread_t *worker; variable 137 worker = calloc(nthreads, sizeof(*worker)); in bench_futex_requeue() 138 if (!worker) in bench_futex_requeue() 163 block_threads(worker, thread_attr, cpu); in bench_futex_requeue() 202 ret = pthread_join(worker[i], NULL); in bench_futex_requeue() 216 free(worker); in bench_futex_requeue()
|
D | futex-wake-parallel.c | 39 pthread_t worker; member 108 if (pthread_create(&td[i].worker, &thread_attr, in wakeup_threads() 116 if (pthread_join(td[i].worker, NULL)) in wakeup_threads()
|
/Linux-v4.19/Documentation/core-api/ |
D | workqueue.rst | 20 queue is called workqueue and the thread is called worker. 22 While there are work items on the workqueue the worker executes the 24 there is no work item left on the workqueue the worker becomes idle. 25 When a new work item gets queued, the worker begins executing again. 32 worker thread per CPU and a single threaded (ST) wq had one worker 42 worker pool. An MT wq could provide only one execution context per CPU 60 * Use per-CPU unified worker pools shared by all wq to provide 64 * Automatically regulate worker pool and level of concurrency so that 80 Special purpose threads, called worker threads, execute the functions 82 worker threads become idle. These worker threads are managed in so [all …]
|
D | errseq.rst | 49 Let me tell you a story about a worker drone. Now, he's a good worker 114 Occasionally the big boss comes in for a spot check and asks the worker 115 to do a one-off job for him. He's not really watching the worker 119 He can just sample the current errseq_t in the worker, and then use that
|
/Linux-v4.19/drivers/thermal/ |
D | intel_powerclamp.c | 92 struct kthread_worker *worker; member 420 kthread_queue_delayed_work(w_data->worker, in clamp_balancing_func() 452 kthread_queue_work(w_data->worker, &w_data->balancing_work); in clamp_idle_injection_func() 495 struct kthread_worker *worker; in start_power_clamp_worker() local 497 worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu); in start_power_clamp_worker() 498 if (IS_ERR(worker)) in start_power_clamp_worker() 501 w_data->worker = worker; in start_power_clamp_worker() 506 sched_setscheduler(worker->task, SCHED_FIFO, &sparam); in start_power_clamp_worker() 510 kthread_queue_work(w_data->worker, &w_data->balancing_work); in start_power_clamp_worker() 517 if (!w_data->worker) in stop_power_clamp_worker() [all …]
|
/Linux-v4.19/drivers/infiniband/core/ |
D | fmr_pool.c | 99 struct kthread_worker *worker; member 190 kthread_queue_work(pool->worker, &pool->work); in ib_fmr_cleanup_func() 260 pool->worker = kthread_create_worker(0, "ib_fmr(%s)", device->name); in ib_create_fmr_pool() 261 if (IS_ERR(pool->worker)) { in ib_create_fmr_pool() 263 ret = PTR_ERR(pool->worker); in ib_create_fmr_pool() 331 kthread_destroy_worker(pool->worker); in ib_destroy_fmr_pool() 381 kthread_queue_work(pool->worker, &pool->work); in ib_flush_fmr_pool() 493 kthread_queue_work(pool->worker, &pool->work); in ib_fmr_pool_unmap()
|
/Linux-v4.19/drivers/gpu/drm/ |
D | drm_flip_work.c | 107 queue_work(wq, &work->worker); in drm_flip_work_commit() 113 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); in flip_worker() 153 INIT_WORK(&work->worker, flip_worker); in drm_flip_work_init()
|
/Linux-v4.19/drivers/platform/olpc/ |
D | olpc-ec.c | 36 struct work_struct worker; member 70 struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker); in olpc_ec_worker() 96 schedule_work(&ec->worker); in olpc_ec_worker() 114 schedule_work(&ec->worker); in queue_ec_descriptor() 271 INIT_WORK(&ec->worker, olpc_ec_worker); in olpc_ec_probe()
|
/Linux-v4.19/tools/testing/selftests/powerpc/tm/ |
D | tm-vmx-unavail.c | 27 void *worker(void *unused) in worker() function 104 pthread_create(&thread[i], NULL, &worker, NULL); in tm_vmx_unavail_test()
|
/Linux-v4.19/drivers/macintosh/ams/ |
D | ams-core.c | 78 schedule_work(&ams_info.worker); in ams_handle_irq() 198 INIT_WORK(&ams_info.worker, ams_worker); in ams_init() 229 flush_work(&ams_info.worker); in ams_sensor_detach()
|
D | ams.h | 34 struct work_struct worker; member
|
/Linux-v4.19/arch/x86/kvm/ |
D | i8254.c | 215 kthread_queue_work(pit->worker, &pit->expired); in kvm_pit_ack_irq() 275 kthread_queue_work(pt->worker, &pt->expired); in pit_timer_fn() 670 pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr); in kvm_create_pit() 671 if (IS_ERR(pit->worker)) in kvm_create_pit() 714 kthread_destroy_worker(pit->worker); in kvm_create_pit() 733 kthread_destroy_worker(pit->worker); in kvm_free_pit()
|
D | i8254.h | 48 struct kthread_worker *worker; member
|
/Linux-v4.19/drivers/gpu/drm/i915/ |
D | intel_guc_ct.h | 89 struct work_struct worker; member
|
/Linux-v4.19/kernel/sched/ |
D | cpufreq_schedutil.c | 39 struct kthread_worker worker; member 552 kthread_queue_work(&sg_policy->worker, &sg_policy->work); in sugov_irq_work() 649 kthread_init_worker(&sg_policy->worker); in sugov_kthread_create() 650 thread = kthread_create(kthread_worker_fn, &sg_policy->worker, in sugov_kthread_create() 681 kthread_flush_worker(&sg_policy->worker); in sugov_kthread_stop()
|
/Linux-v4.19/include/drm/ |
D | drm_flip_work.h | 76 struct work_struct worker; member
|
/Linux-v4.19/drivers/vhost/ |
D | vhost.c | 238 if (dev->worker) { in vhost_work_flush() 258 if (!dev->worker) in vhost_work_queue() 267 wake_up_process(dev->worker); in vhost_work_queue() 428 dev->worker = NULL; in vhost_dev_init() 494 struct task_struct *worker; in vhost_dev_set_owner() local 505 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); in vhost_dev_set_owner() 506 if (IS_ERR(worker)) { in vhost_dev_set_owner() 507 err = PTR_ERR(worker); in vhost_dev_set_owner() 511 dev->worker = worker; in vhost_dev_set_owner() 512 wake_up_process(worker); /* avoid contributing to loadavg */ in vhost_dev_set_owner() [all …]
|