/Linux-v5.10/virt/kvm/ |
D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 100 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local 102 typeof(*work), queue); in kvm_clear_async_pf_completion_queue() 103 list_del(&work->queue); in kvm_clear_async_pf_completion_queue() 109 if (!work->vcpu) in kvm_clear_async_pf_completion_queue() 114 flush_work(&work->work); in kvm_clear_async_pf_completion_queue() 116 if (cancel_work_sync(&work->work)) { in kvm_clear_async_pf_completion_queue() 117 mmput(work->mm); in kvm_clear_async_pf_completion_queue() 119 kmem_cache_free(async_pf_cache, work); in kvm_clear_async_pf_completion_queue() [all …]
|
/Linux-v5.10/drivers/gpu/drm/ |
D | drm_flip_work.c | 57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument 62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 76 void drm_flip_work_queue(struct drm_flip_work *work, void *val) in drm_flip_work_queue() argument 83 drm_flip_work_queue_task(work, task); in drm_flip_work_queue() 85 DRM_ERROR("%s could not allocate task!\n", work->name); in drm_flip_work_queue() 86 work->func(work, val); in drm_flip_work_queue() 101 void drm_flip_work_commit(struct drm_flip_work *work, in drm_flip_work_commit() argument 106 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_commit() [all …]
|
D | drm_vblank_work.c | 48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 58 list_del_init(&work->node); in drm_handle_vblank_works() 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local 76 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_vblank_cancel_pending_works() 77 list_del_init(&work->node); in drm_vblank_cancel_pending_works() 106 int drm_vblank_work_schedule(struct drm_vblank_work *work, in drm_vblank_work_schedule() argument 109 struct drm_vblank_crtc *vblank = work->vblank; in drm_vblank_work_schedule() [all …]
|
/Linux-v5.10/include/trace/events/ |
D | workqueue.h | 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work->func; 47 __entry->work, __entry->function, __entry->workqueue, 61 TP_PROTO(struct work_struct *work), 63 TP_ARGS(work), 66 __field( void *, work ) 70 __entry->work = work; [all …]
|
/Linux-v5.10/kernel/ |
D | irq_work.c | 30 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 34 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); in irq_work_claim() 53 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 56 if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { in __irq_work_queue_local() 57 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && in __irq_work_queue_local() 61 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) in __irq_work_queue_local() 67 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument 70 if (!irq_work_claim(work)) in irq_work_queue() 75 __irq_work_queue_local(work); in irq_work_queue() 88 bool irq_work_queue_on(struct irq_work *work, int cpu) in irq_work_queue_on() argument [all …]
|
D | task_work.c | 32 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 42 work->next = head; in task_work_add() 43 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add() 87 struct callback_head *work; in task_work_cancel() local 99 while ((work = READ_ONCE(*pprev))) { in task_work_cancel() 100 if (work->func != func) in task_work_cancel() 101 pprev = &work->next; in task_work_cancel() 102 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel() 107 return work; in task_work_cancel() 121 struct callback_head *work, *head, *next; in task_work_run() local [all …]
|
D | kthread.c | 673 struct kthread_work *work; in kthread_worker_fn() local 696 work = NULL; in kthread_worker_fn() 699 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 701 list_del_init(&work->node); in kthread_worker_fn() 703 worker->current_work = work; in kthread_worker_fn() 706 if (work) { in kthread_worker_fn() 708 work->func(work); in kthread_worker_fn() 814 struct kthread_work *work) in queuing_blocked() argument 818 return !list_empty(&work->node) || work->canceling; in queuing_blocked() 822 struct kthread_work *work) in kthread_insert_work_sanity_check() argument [all …]
|
D | workqueue.c | 439 struct work_struct *work = addr; in work_is_static_object() local 441 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object() 450 struct work_struct *work = addr; in work_fixup_init() local 454 cancel_work_sync(work); in work_fixup_init() 455 debug_object_init(work, &work_debug_descr); in work_fixup_init() 468 struct work_struct *work = addr; in work_fixup_free() local 472 cancel_work_sync(work); in work_fixup_free() 473 debug_object_free(work, &work_debug_descr); in work_fixup_free() 488 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument 490 debug_object_activate(work, &work_debug_descr); in debug_work_activate() [all …]
|
/Linux-v5.10/include/linux/ |
D | completion.h | 36 #define COMPLETION_INITIALIZER(work) \ argument 37 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 39 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument 40 (*({ init_completion_map(&(work), &(map)); &(work); })) 42 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 43 (*({ init_completion(&work); &work; })) 53 #define DECLARE_COMPLETION(work) \ argument 54 struct completion work = COMPLETION_INITIALIZER(work) 69 # define DECLARE_COMPLETION_ONSTACK(work) \ argument 70 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) [all …]
|
D | workqueue.h | 21 typedef void (*work_func_t)(struct work_struct *work); 28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument 116 struct work_struct work; member 125 struct work_struct work; member 158 static inline struct delayed_work *to_delayed_work(struct work_struct *work) in to_delayed_work() argument 160 return container_of(work, struct delayed_work, work); in to_delayed_work() 163 static inline struct rcu_work *to_rcu_work(struct work_struct *work) in to_rcu_work() argument 165 return container_of(work, struct rcu_work, work); in to_rcu_work() 169 struct work_struct work; member 192 .work = __WORK_INITIALIZER((n).work, (f)), \ [all …]
|
D | jump_label_ratelimit.h | 12 struct delayed_work work; member 18 struct delayed_work work; member 24 struct delayed_work work; member 28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) 30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) 33 __static_key_deferred_flush((x), &(x)->work) 37 struct delayed_work *work, 39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work); 43 extern void jump_label_update_timeout(struct work_struct *work); 49 .work = __DELAYED_WORK_INITIALIZER((name).work, \ [all …]
|
D | kthread.h | 82 typedef void (*kthread_work_func_t)(struct kthread_work *work); 107 struct kthread_work work; member 117 #define KTHREAD_WORK_INIT(work, fn) { \ argument 118 .node = LIST_HEAD_INIT((work).node), \ 123 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ 131 #define DEFINE_KTHREAD_WORK(work, fn) \ argument 132 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 160 #define kthread_init_work(work, fn) \ argument 162 memset((work), 0, sizeof(struct kthread_work)); \ 163 INIT_LIST_HEAD(&(work)->node); \ [all …]
|
/Linux-v5.10/fs/btrfs/ |
D | async-thread.c | 61 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 63 return work->wq->fs_info; in btrfs_work_owner() 224 struct btrfs_work *work; in run_ordered_work() local 233 work = list_entry(list->next, struct btrfs_work, in run_ordered_work() 235 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 244 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work() 246 trace_btrfs_ordered_sched(work); in run_ordered_work() 248 work->ordered_func(work); in run_ordered_work() 252 list_del(&work->ordered_list); in run_ordered_work() 255 if (work == self) { in run_ordered_work() [all …]
|
/Linux-v5.10/drivers/staging/octeon/ |
D | ethernet-rx.c | 63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument 68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error() 70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error() 72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error() 81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error() 82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error() 99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error() 102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error() 111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error() 112 work->word1.len -= i + 5; in cvm_oct_check_rcv_error() [all …]
|
D | ethernet-tx.c | 517 struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); in cvm_oct_xmit_pow() local 519 if (unlikely(!work)) { in cvm_oct_xmit_pow() 532 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); in cvm_oct_xmit_pow() 562 work->word0.pip.cn38xx.hw_chksum = skb->csum; in cvm_oct_xmit_pow() 563 work->word1.len = skb->len; in cvm_oct_xmit_pow() 564 cvmx_wqe_set_port(work, priv->port); in cvm_oct_xmit_pow() 565 cvmx_wqe_set_qos(work, priv->port & 0x7); in cvm_oct_xmit_pow() 566 cvmx_wqe_set_grp(work, pow_send_group); in cvm_oct_xmit_pow() 567 work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE; in cvm_oct_xmit_pow() 568 work->word1.tag = pow_send_group; /* FIXME */ in cvm_oct_xmit_pow() [all …]
|
/Linux-v5.10/fs/ |
D | io-wq.c | 202 struct io_wq_work *work) in io_work_get_acct() argument 204 if (work->flags & IO_WQ_WORK_UNBOUND) in io_work_get_acct() 346 struct io_wq_work *work) in __io_worker_busy() argument 361 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0; in __io_worker_busy() 397 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument 399 return work->flags >> IO_WQ_HASH_SHIFT; in io_get_work_hash() 406 struct io_wq_work *work, *tail; in io_get_next_work() local 410 work = container_of(node, struct io_wq_work, list); in io_get_next_work() 413 if (!io_wq_is_hashed(work)) { in io_get_next_work() 415 return work; in io_get_next_work() [all …]
|
D | fs-writeback.c | 162 struct wb_writeback_work *work) in finish_writeback_work() argument 164 struct wb_completion *done = work->done; in finish_writeback_work() 166 if (work->auto_free) in finish_writeback_work() 167 kfree(work); in finish_writeback_work() 178 struct wb_writeback_work *work) in wb_queue_work() argument 180 trace_writeback_queue(wb, work); in wb_queue_work() 182 if (work->done) in wb_queue_work() 183 atomic_inc(&work->done->cnt); in wb_queue_work() 188 list_add_tail(&work->list, &wb->work_list); in wb_queue_work() 191 finish_writeback_work(wb, work); in wb_queue_work() [all …]
|
/Linux-v5.10/drivers/accessibility/speakup/ |
D | selection.c | 20 struct work_struct work; member 25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument 28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection() 58 .work = __WORK_INITIALIZER(speakup_sel_work.work, 87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection() 96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection() 106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument 109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection() 117 .work = __WORK_INITIALIZER(speakup_paste_work.work, 129 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); in speakup_paste_selection() [all …]
|
/Linux-v5.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_display.c | 46 struct amdgpu_flip_work *work = in amdgpu_display_flip_callback() local 50 schedule_work(&work->flip_work.work); in amdgpu_display_flip_callback() 53 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work, in amdgpu_display_flip_handle_fence() argument 63 if (!dma_fence_add_callback(fence, &work->cb, in amdgpu_display_flip_handle_fence() 74 container_of(__work, struct delayed_work, work); in amdgpu_display_flip_work_func() 75 struct amdgpu_flip_work *work = in amdgpu_display_flip_work_func() local 77 struct amdgpu_device *adev = work->adev; in amdgpu_display_flip_work_func() 78 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id]; in amdgpu_display_flip_work_func() 85 if (amdgpu_display_flip_handle_fence(work, &work->excl)) in amdgpu_display_flip_work_func() 88 for (i = 0; i < work->shared_count; ++i) in amdgpu_display_flip_work_func() [all …]
|
/Linux-v5.10/drivers/net/wireless/st/cw1200/ |
D | sta.h | 58 void cw1200_event_handler(struct work_struct *work); 59 void cw1200_bss_loss_work(struct work_struct *work); 60 void cw1200_bss_params_work(struct work_struct *work); 61 void cw1200_keep_alive_work(struct work_struct *work); 62 void cw1200_tx_failure_work(struct work_struct *work); 78 void cw1200_join_timeout(struct work_struct *work); 79 void cw1200_unjoin_work(struct work_struct *work); 80 void cw1200_join_complete_work(struct work_struct *work); 81 void cw1200_wep_key_work(struct work_struct *work); 84 void cw1200_update_filtering_work(struct work_struct *work); [all …]
|
/Linux-v5.10/drivers/infiniband/core/ |
D | cm.c | 91 struct cm_work *work); 228 struct delayed_work work; member 239 struct cm_work work; member 299 static void cm_work_handler(struct work_struct *work); 718 __be32 remote_id = timewait_info->work.remote_id; in cm_insert_remote_id() 724 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 726 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 752 if (be32_lt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 754 else if (be32_gt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 761 res = cm_acquire_id(timewait_info->work.local_id, in cm_find_remote_id() [all …]
|
/Linux-v5.10/lib/ |
D | once.c | 8 struct work_struct work; member 14 struct once_work *work; in once_deferred() local 16 work = container_of(w, struct once_work, work); in once_deferred() 17 BUG_ON(!static_key_enabled(work->key)); in once_deferred() 18 static_branch_disable(work->key); in once_deferred() 19 kfree(work); in once_deferred() 30 INIT_WORK(&w->work, once_deferred); in once_disable_jump() 32 schedule_work(&w->work); in once_disable_jump()
|
/Linux-v5.10/drivers/gpu/drm/i915/gem/ |
D | i915_gem_client_blt.c | 98 struct work_struct work; member 133 static void clear_pages_signal_irq_worker(struct irq_work *work) in clear_pages_signal_irq_worker() argument 135 struct clear_pages_work *w = container_of(work, typeof(*w), irq_work); in clear_pages_signal_irq_worker() 156 static void clear_pages_worker(struct work_struct *work) in clear_pages_worker() argument 158 struct clear_pages_work *w = container_of(work, typeof(*w), work); in clear_pages_worker() 298 schedule_work(&w->work); in clear_pages_work_notify() 318 struct clear_pages_work *work; in i915_gem_schedule_fill_pages_blt() local 326 work = kmalloc(sizeof(*work), GFP_KERNEL); in i915_gem_schedule_fill_pages_blt() 327 if (!work) { in i915_gem_schedule_fill_pages_blt() 332 work->value = value; in i915_gem_schedule_fill_pages_blt() [all …]
|
D | i915_gem_userptr.c | 24 struct rcu_work work; member 361 __i915_mm_struct_free__worker(struct work_struct *work) in __i915_mm_struct_free__worker() argument 363 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work.work); in __i915_mm_struct_free__worker() 379 INIT_RCU_WORK(&mm->work, __i915_mm_struct_free__worker); in __i915_mm_struct_free() 380 queue_rcu_work(system_wq, &mm->work); in __i915_mm_struct_free() 394 struct work_struct work; member 445 struct get_pages_work *work = container_of(_work, typeof(*work), work); in __i915_gem_userptr_get_pages_worker() local 446 struct drm_i915_gem_object *obj = work->obj; in __i915_gem_userptr_get_pages_worker() 489 if (obj->userptr.work == &work->work) { in __i915_gem_userptr_get_pages_worker() 501 obj->userptr.work = ERR_CAST(pages); in __i915_gem_userptr_get_pages_worker() [all …]
|
/Linux-v5.10/kernel/locking/ |
D | test-ww_mutex.c | 20 struct work_struct work; member 31 static void test_mutex_work(struct work_struct *work) in test_mutex_work() argument 33 struct test_mutex *mtx = container_of(work, typeof(*mtx), work); in test_mutex_work() 58 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); in __test_mutex() 64 schedule_work(&mtx.work); in __test_mutex() 92 flush_work(&mtx.work); in __test_mutex() 93 destroy_work_on_stack(&mtx.work); in __test_mutex() 148 struct work_struct work; member 157 static void test_abba_work(struct work_struct *work) in test_abba_work() argument 159 struct test_abba *abba = container_of(work, typeof(*abba), work); in test_abba_work() [all …]
|