/Linux-v6.1/virt/kvm/ |
D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 100 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local 102 typeof(*work), queue); in kvm_clear_async_pf_completion_queue() 103 list_del(&work->queue); in kvm_clear_async_pf_completion_queue() 109 if (!work->vcpu) in kvm_clear_async_pf_completion_queue() 114 flush_work(&work->work); in kvm_clear_async_pf_completion_queue() 116 if (cancel_work_sync(&work->work)) { in kvm_clear_async_pf_completion_queue() 117 mmput(work->mm); in kvm_clear_async_pf_completion_queue() 119 kmem_cache_free(async_pf_cache, work); in kvm_clear_async_pf_completion_queue() [all …]
|
/Linux-v6.1/drivers/gpu/drm/ |
D | drm_flip_work.c | 57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument 62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 76 void drm_flip_work_queue(struct drm_flip_work *work, void *val) in drm_flip_work_queue() argument 83 drm_flip_work_queue_task(work, task); in drm_flip_work_queue() 85 DRM_ERROR("%s could not allocate task!\n", work->name); in drm_flip_work_queue() 86 work->func(work, val); in drm_flip_work_queue() 101 void drm_flip_work_commit(struct drm_flip_work *work, in drm_flip_work_commit() argument 106 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_commit() [all …]
|
D | drm_vblank_work.c | 48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 58 list_del_init(&work->node); in drm_handle_vblank_works() 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local 76 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_vblank_cancel_pending_works() 77 list_del_init(&work->node); in drm_vblank_cancel_pending_works() 106 int drm_vblank_work_schedule(struct drm_vblank_work *work, in drm_vblank_work_schedule() argument 109 struct drm_vblank_crtc *vblank = work->vblank; in drm_vblank_work_schedule() [all …]
|
/Linux-v6.1/include/trace/events/ |
D | workqueue.h | 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work->func; 47 __entry->work, __entry->function, __get_str(workqueue), 61 TP_PROTO(struct work_struct *work), 63 TP_ARGS(work), 66 __field( void *, work ) 70 __entry->work = work; [all …]
|
/Linux-v6.1/fs/ksmbd/ |
D | ksmbd_work.c | 21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL); in ksmbd_alloc_work_struct() local 23 if (work) { in ksmbd_alloc_work_struct() 24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct() 27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct() 28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct() 29 INIT_LIST_HEAD(&work->interim_entry); in ksmbd_alloc_work_struct() 31 return work; in ksmbd_alloc_work_struct() 34 void ksmbd_free_work_struct(struct ksmbd_work *work) in ksmbd_free_work_struct() argument [all …]
|
D | server.c | 92 static inline int check_conn_state(struct ksmbd_work *work) in check_conn_state() argument 96 if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) { in check_conn_state() 97 rsp_hdr = work->response_buf; in check_conn_state() 107 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, in __process_request() argument 114 if (check_conn_state(work)) in __process_request() 117 if (ksmbd_verify_smb_message(work)) in __process_request() 120 command = conn->ops->get_cmd_val(work); in __process_request() 125 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); in __process_request() 132 conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED); in __process_request() 136 if (work->sess && conn->ops->is_sign_req(work, command)) { in __process_request() [all …]
|
D | smb2pdu.h | 466 bool is_smb2_neg_cmd(struct ksmbd_work *work); 467 bool is_smb2_rsp(struct ksmbd_work *work); 469 u16 get_smb2_cmd_val(struct ksmbd_work *work); 470 void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err); 471 int init_smb2_rsp_hdr(struct ksmbd_work *work); 472 int smb2_allocate_rsp_buf(struct ksmbd_work *work); 473 bool is_chained_smb2_message(struct ksmbd_work *work); 474 int init_smb2_neg_rsp(struct ksmbd_work *work); 475 void smb2_set_err_rsp(struct ksmbd_work *work); 476 int smb2_check_user_session(struct ksmbd_work *work); [all …]
|
D | smb2pdu.c | 41 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) in __wbuf() argument 43 if (work->next_smb2_rcv_hdr_off) { in __wbuf() 44 *req = ksmbd_req_buf_next(work); in __wbuf() 45 *rsp = ksmbd_resp_buf_next(work); in __wbuf() 47 *req = smb2_get_msg(work->request_buf); in __wbuf() 48 *rsp = smb2_get_msg(work->response_buf); in __wbuf() 94 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) in smb2_get_ksmbd_tcon() argument 96 struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf); in smb2_get_ksmbd_tcon() 100 work->tcon = NULL; in smb2_get_ksmbd_tcon() 108 if (xa_empty(&work->sess->tree_conns)) { in smb2_get_ksmbd_tcon() [all …]
|
D | connection.h | 149 int ksmbd_conn_write(struct ksmbd_work *work); 158 void ksmbd_conn_enqueue_request(struct ksmbd_work *work); 159 int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work); 171 static inline bool ksmbd_conn_good(struct ksmbd_work *work) in ksmbd_conn_good() argument 173 return work->conn->status == KSMBD_SESS_GOOD; in ksmbd_conn_good() 176 static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work) in ksmbd_conn_need_negotiate() argument 178 return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE; in ksmbd_conn_need_negotiate() 181 static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work) in ksmbd_conn_need_reconnect() argument 183 return work->conn->status == KSMBD_SESS_NEED_RECONNECT; in ksmbd_conn_need_reconnect() 186 static inline bool ksmbd_conn_exiting(struct ksmbd_work *work) in ksmbd_conn_exiting() argument [all …]
|
D | connection.c | 110 void ksmbd_conn_enqueue_request(struct ksmbd_work *work) in ksmbd_conn_enqueue_request() argument 112 struct ksmbd_conn *conn = work->conn; in ksmbd_conn_enqueue_request() 115 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) { in ksmbd_conn_enqueue_request() 117 work->syncronous = true; in ksmbd_conn_enqueue_request() 123 list_add_tail(&work->request_entry, requests_queue); in ksmbd_conn_enqueue_request() 128 int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) in ksmbd_conn_try_dequeue_request() argument 130 struct ksmbd_conn *conn = work->conn; in ksmbd_conn_try_dequeue_request() 133 if (list_empty(&work->request_entry) && in ksmbd_conn_try_dequeue_request() 134 list_empty(&work->async_request_entry)) in ksmbd_conn_try_dequeue_request() 137 if (!work->multiRsp) in ksmbd_conn_try_dequeue_request() [all …]
|
/Linux-v6.1/kernel/ |
D | irq_work.c | 55 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 59 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim() 78 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 85 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local() 97 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local() 106 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument 109 if (!irq_work_claim(work)) in irq_work_queue() 114 __irq_work_queue_local(work); in irq_work_queue() 127 bool irq_work_queue_on(struct irq_work *work, int cpu) in irq_work_queue_on() argument 130 return irq_work_queue(work); in irq_work_queue_on() [all …]
|
D | task_work.c | 42 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 48 kasan_record_aux_stack(work); in task_work_add() 54 work->next = head; in task_work_add() 55 } while (!try_cmpxchg(&task->task_works, &head, work)); in task_work_add() 91 struct callback_head *work; in task_work_cancel_match() local 103 work = READ_ONCE(*pprev); in task_work_cancel_match() 104 while (work) { in task_work_cancel_match() 105 if (!match(work, data)) { in task_work_cancel_match() 106 pprev = &work->next; in task_work_cancel_match() 107 work = READ_ONCE(*pprev); in task_work_cancel_match() [all …]
|
D | kthread.c | 786 struct kthread_work *work; in kthread_worker_fn() local 809 work = NULL; in kthread_worker_fn() 812 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 814 list_del_init(&work->node); in kthread_worker_fn() 816 worker->current_work = work; in kthread_worker_fn() 819 if (work) { in kthread_worker_fn() 820 kthread_work_func_t func = work->func; in kthread_worker_fn() 822 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn() 823 work->func(work); in kthread_worker_fn() 828 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn() [all …]
|
/Linux-v6.1/include/linux/ |
D | completion.h | 35 #define COMPLETION_INITIALIZER(work) \ argument 36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument 39 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 42 (*({ init_completion(&work); &work; })) 52 #define DECLARE_COMPLETION(work) \ argument 53 struct completion work = COMPLETION_INITIALIZER(work) 68 # define DECLARE_COMPLETION_ONSTACK(work) \ argument 69 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) [all …]
|
D | workqueue.h | 21 typedef void (*work_func_t)(struct work_struct *work); 28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument 111 struct work_struct work; member 120 struct work_struct work; member 153 static inline struct delayed_work *to_delayed_work(struct work_struct *work) in to_delayed_work() argument 155 return container_of(work, struct delayed_work, work); in to_delayed_work() 158 static inline struct rcu_work *to_rcu_work(struct work_struct *work) in to_rcu_work() argument 160 return container_of(work, struct rcu_work, work); in to_rcu_work() 164 struct work_struct work; member 187 .work = __WORK_INITIALIZER((n).work, (f)), \ [all …]
|
D | jump_label_ratelimit.h | 12 struct delayed_work work; member 18 struct delayed_work work; member 24 struct delayed_work work; member 28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) 30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) 33 __static_key_deferred_flush((x), &(x)->work) 37 struct delayed_work *work, 39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work); 43 extern void jump_label_update_timeout(struct work_struct *work); 49 .work = __DELAYED_WORK_INITIALIZER((name).work, \ [all …]
|
D | kthread.h | 115 typedef void (*kthread_work_func_t)(struct kthread_work *work); 140 struct kthread_work work; member 144 #define KTHREAD_WORK_INIT(work, fn) { \ argument 145 .node = LIST_HEAD_INIT((work).node), \ 150 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ 155 #define DEFINE_KTHREAD_WORK(work, fn) \ argument 156 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 171 #define kthread_init_work(work, fn) \ argument 173 memset((work), 0, sizeof(struct kthread_work)); \ 174 INIT_LIST_HEAD(&(work)->node); \ [all …]
|
D | irq_work.h | 37 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) in init_irq_work() argument 39 *work = IRQ_WORK_INIT(func); in init_irq_work() 42 static inline bool irq_work_is_pending(struct irq_work *work) in irq_work_is_pending() argument 44 return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING; in irq_work_is_pending() 47 static inline bool irq_work_is_busy(struct irq_work *work) in irq_work_is_busy() argument 49 return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY; in irq_work_is_busy() 52 static inline bool irq_work_is_hard(struct irq_work *work) in irq_work_is_hard() argument 54 return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ; in irq_work_is_hard() 57 bool irq_work_queue(struct irq_work *work); 58 bool irq_work_queue_on(struct irq_work *work, int cpu); [all …]
|
/Linux-v6.1/fs/btrfs/ |
D | async-thread.c | 55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 57 return work->wq->fs_info; in btrfs_work_owner() 180 struct btrfs_work *work; in run_ordered_work() local 189 work = list_entry(list->next, struct btrfs_work, in run_ordered_work() 191 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 207 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work() 209 trace_btrfs_ordered_sched(work); in run_ordered_work() 211 work->ordered_func(work); in run_ordered_work() 215 list_del(&work->ordered_list); in run_ordered_work() 218 if (work == self) { in run_ordered_work() [all …]
|
/Linux-v6.1/drivers/staging/octeon/ |
D | ethernet-rx.c | 63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument 68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error() 70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error() 72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error() 81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error() 82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error() 99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error() 102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error() 111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error() 112 work->word1.len -= i + 5; in cvm_oct_check_rcv_error() [all …]
|
D | ethernet-tx.c | 516 struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); in cvm_oct_xmit_pow() local 518 if (unlikely(!work)) { in cvm_oct_xmit_pow() 531 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); in cvm_oct_xmit_pow() 561 work->word0.pip.cn38xx.hw_chksum = skb->csum; in cvm_oct_xmit_pow() 562 work->word1.len = skb->len; in cvm_oct_xmit_pow() 563 cvmx_wqe_set_port(work, priv->port); in cvm_oct_xmit_pow() 564 cvmx_wqe_set_qos(work, priv->port & 0x7); in cvm_oct_xmit_pow() 565 cvmx_wqe_set_grp(work, pow_send_group); in cvm_oct_xmit_pow() 566 work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE; in cvm_oct_xmit_pow() 567 work->word1.tag = pow_send_group; /* FIXME */ in cvm_oct_xmit_pow() [all …]
|
/Linux-v6.1/tools/perf/ |
D | builtin-kwork.c | 264 struct kwork_work *work; in work_search() local 268 work = container_of(node, struct kwork_work, node); in work_search() 269 cmp = work_cmp(sort_list, key, work); in work_search() 275 if (work->name == NULL) in work_search() 276 work->name = key->name; in work_search() 277 return work; in work_search() 311 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local 313 if (work == NULL) { in work_new() 319 INIT_LIST_HEAD(&work->atom_list[i]); in work_new() 321 work->id = key->id; in work_new() [all …]
|
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
D | mpesw.c | 43 static void mlx5_mpesw_work(struct work_struct *work) in mlx5_mpesw_work() argument 45 struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work); in mlx5_mpesw_work() 62 struct mlx5_mpesw_work_st *work; in mlx5_lag_mpesw_queue_work() local 68 work = kzalloc(sizeof(*work), GFP_KERNEL); in mlx5_lag_mpesw_queue_work() 69 if (!work) in mlx5_lag_mpesw_queue_work() 72 INIT_WORK(&work->work, mlx5_mpesw_work); in mlx5_lag_mpesw_queue_work() 73 init_completion(&work->comp); in mlx5_lag_mpesw_queue_work() 74 work->op = op; in mlx5_lag_mpesw_queue_work() 75 work->lag = ldev; in mlx5_lag_mpesw_queue_work() 77 if (!queue_work(ldev->wq, &work->work)) { in mlx5_lag_mpesw_queue_work() [all …]
|
/Linux-v6.1/drivers/accessibility/speakup/ |
D | selection.c | 20 struct work_struct work; member 25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument 28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection() 58 .work = __WORK_INITIALIZER(speakup_sel_work.work, 87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection() 96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection() 106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument 109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection() 117 .work = __WORK_INITIALIZER(speakup_paste_work.work, 129 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); in speakup_paste_selection() [all …]
|
/Linux-v6.1/drivers/net/wireless/st/cw1200/ |
D | sta.h | 59 void cw1200_event_handler(struct work_struct *work); 60 void cw1200_bss_loss_work(struct work_struct *work); 61 void cw1200_bss_params_work(struct work_struct *work); 62 void cw1200_keep_alive_work(struct work_struct *work); 63 void cw1200_tx_failure_work(struct work_struct *work); 79 void cw1200_join_timeout(struct work_struct *work); 80 void cw1200_unjoin_work(struct work_struct *work); 81 void cw1200_join_complete_work(struct work_struct *work); 82 void cw1200_wep_key_work(struct work_struct *work); 85 void cw1200_update_filtering_work(struct work_struct *work); [all …]
|