Lines Matching refs:worker

606 void __kthread_init_worker(struct kthread_worker *worker,  in __kthread_init_worker()  argument
610 memset(worker, 0, sizeof(struct kthread_worker)); in __kthread_init_worker()
611 raw_spin_lock_init(&worker->lock); in __kthread_init_worker()
612 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker()
613 INIT_LIST_HEAD(&worker->work_list); in __kthread_init_worker()
614 INIT_LIST_HEAD(&worker->delayed_work_list); in __kthread_init_worker()
635 struct kthread_worker *worker = worker_ptr; in kthread_worker_fn() local
642 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn()
643 worker->task = current; in kthread_worker_fn()
645 if (worker->flags & KTW_FREEZABLE) in kthread_worker_fn()
653 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
654 worker->task = NULL; in kthread_worker_fn()
655 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
660 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
661 if (!list_empty(&worker->work_list)) { in kthread_worker_fn()
662 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
666 worker->current_work = work; in kthread_worker_fn()
667 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
685 struct kthread_worker *worker; in __kthread_create_worker() local
689 worker = kzalloc(sizeof(*worker), GFP_KERNEL); in __kthread_create_worker()
690 if (!worker) in __kthread_create_worker()
693 kthread_init_worker(worker); in __kthread_create_worker()
698 task = __kthread_create_on_node(kthread_worker_fn, worker, in __kthread_create_worker()
706 worker->flags = flags; in __kthread_create_worker()
707 worker->task = task; in __kthread_create_worker()
709 return worker; in __kthread_create_worker()
712 kfree(worker); in __kthread_create_worker()
728 struct kthread_worker *worker; in kthread_create_worker() local
732 worker = __kthread_create_worker(-1, flags, namefmt, args); in kthread_create_worker()
735 return worker; in kthread_create_worker()
760 struct kthread_worker *worker; in kthread_create_worker_on_cpu() local
764 worker = __kthread_create_worker(cpu, flags, namefmt, args); in kthread_create_worker_on_cpu()
767 return worker; in kthread_create_worker_on_cpu()
776 static inline bool queuing_blocked(struct kthread_worker *worker, in queuing_blocked() argument
779 lockdep_assert_held(&worker->lock); in queuing_blocked()
784 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, in kthread_insert_work_sanity_check() argument
787 lockdep_assert_held(&worker->lock); in kthread_insert_work_sanity_check()
790 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
794 static void kthread_insert_work(struct kthread_worker *worker, in kthread_insert_work() argument
798 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
801 work->worker = worker; in kthread_insert_work()
802 if (!worker->current_work && likely(worker->task)) in kthread_insert_work()
803 wake_up_process(worker->task); in kthread_insert_work()
818 bool kthread_queue_work(struct kthread_worker *worker, in kthread_queue_work() argument
824 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_work()
825 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
826 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
829 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_work()
846 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn() local
853 if (WARN_ON_ONCE(!worker)) in kthread_delayed_work_timer_fn()
856 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_delayed_work_timer_fn()
858 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
863 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
865 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_delayed_work_timer_fn()
869 static void __kthread_queue_delayed_work(struct kthread_worker *worker, in __kthread_queue_delayed_work() argument
885 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
890 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
892 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
893 work->worker = worker; in __kthread_queue_delayed_work()
913 bool kthread_queue_delayed_work(struct kthread_worker *worker, in kthread_queue_delayed_work() argument
921 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_delayed_work()
923 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
924 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_queue_delayed_work()
928 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_delayed_work()
957 struct kthread_worker *worker; in kthread_flush_work() local
960 worker = work->worker; in kthread_flush_work()
961 if (!worker) in kthread_flush_work()
964 raw_spin_lock_irq(&worker->lock); in kthread_flush_work()
966 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
969 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
970 else if (worker->current_work == work) in kthread_flush_work()
971 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
972 worker->work_list.next); in kthread_flush_work()
976 raw_spin_unlock_irq(&worker->lock); in kthread_flush_work()
1000 struct kthread_worker *worker = work->worker; in __kthread_cancel_work() local
1009 raw_spin_unlock_irqrestore(&worker->lock, *flags); in __kthread_cancel_work()
1011 raw_spin_lock_irqsave(&worker->lock, *flags); in __kthread_cancel_work()
1050 bool kthread_mod_delayed_work(struct kthread_worker *worker, in kthread_mod_delayed_work() argument
1058 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_mod_delayed_work()
1061 if (!work->worker) in kthread_mod_delayed_work()
1065 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1073 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_mod_delayed_work()
1075 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_mod_delayed_work()
1082 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync() local
1086 if (!worker) in __kthread_cancel_work_sync()
1089 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1091 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1095 if (worker->current_work != work) in __kthread_cancel_work_sync()
1103 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1105 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1109 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1158 void kthread_flush_worker(struct kthread_worker *worker) in kthread_flush_worker() argument
1165 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()
1178 void kthread_destroy_worker(struct kthread_worker *worker) in kthread_destroy_worker() argument
1182 task = worker->task; in kthread_destroy_worker()
1186 kthread_flush_worker(worker); in kthread_destroy_worker()
1188 WARN_ON(!list_empty(&worker->work_list)); in kthread_destroy_worker()
1189 kfree(worker); in kthread_destroy_worker()