Home
last modified time | relevance | path

Searched full:work (Results 1 – 25 of 4692) sorted by relevance

12345678910>>...188

/Linux-v6.1/drivers/gpu/drm/
Ddrm_flip_work.c31 * drm_flip_work_allocate_task - allocate a flip-work task
51 * @work: the flip-work
55 * func) on a work queue after drm_flip_work_commit() is called.
57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument
62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task()
63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task()
64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task()
69 * drm_flip_work_queue - queue work
70 * @work: the flip-work
73 * Queues work, that will later be run (passed back to drm_flip_func_t
[all …]
Ddrm_vblank_work.c38 * generic delayed work implementation which delays work execution until a
39 * particular vblank has passed, and then executes the work at realtime
43 * re-arming work items can be easily implemented.
48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local
54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works()
55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works()
58 list_del_init(&work->node); in drm_handle_vblank_works()
60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works()
67 /* Handle cancelling any pending vblank work items and drop respective vblank
72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local
[all …]
/Linux-v6.1/include/trace/events/
Dworkqueue.h14 * workqueue_queue_work - called when a work gets queued
17 * @work: pointer to struct work_struct
19 * This event occurs when a work is queued immediately or once a
20 * delayed work is actually queued on a workqueue (ie: once the delay
26 struct work_struct *work),
28 TP_ARGS(req_cpu, pwq, work),
31 __field( void *, work )
39 __entry->work = work;
40 __entry->function = work->func;
46 TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
[all …]
/Linux-v6.1/virt/kvm/
Dasync_pf.c45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument
48 container_of(work, struct kvm_async_pf, work); in async_pf_execute()
59 * This work is run asynchronously to the task which owns in async_pf_execute()
98 /* cancel outstanding work queue item */ in kvm_clear_async_pf_completion_queue()
100 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local
102 typeof(*work), queue); in kvm_clear_async_pf_completion_queue()
103 list_del(&work->queue); in kvm_clear_async_pf_completion_queue()
109 if (!work->vcpu) in kvm_clear_async_pf_completion_queue()
114 flush_work(&work->work); in kvm_clear_async_pf_completion_queue()
116 if (cancel_work_sync(&work->work)) { in kvm_clear_async_pf_completion_queue()
[all …]
/Linux-v6.1/fs/btrfs/
Dasync-thread.c29 /* List head pointing to ordered work list */
55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument
57 return work->wq->fs_info; in btrfs_work_owner()
129 * Hook for threshold which will be called before executing the work,
180 struct btrfs_work *work; in run_ordered_work() local
189 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
191 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
197 * updates from ordinary work function. in run_ordered_work()
203 * we leave the work item on the list as a barrier so in run_ordered_work()
204 * that later work items that are done don't have their in run_ordered_work()
[all …]
/Linux-v6.1/kernel/
Dtask_work.c9 * task_work_add - ask the @task to execute @work->func()
11 * @work: the callback to run
14 * Queue @work for task_work_run() below and notify the @task if @notify
25 * @TWA_RESUME work is run only when the task exits the kernel and returns to
28 * Fails if the @task is exiting/exited and thus it can't process this @work.
29 * Otherwise @work->func() will be called when the @task goes through one of
32 * If the targeted task is exiting, then an error is returned and the work item
42 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument
47 /* record the work call stack in order to print it in KASAN reports */ in task_work_add()
48 kasan_record_aux_stack(work); in task_work_add()
[all …]
Dirq_work.c55 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument
59 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim()
61 * If the work is already pending, no need to raise the IPI. in irq_work_claim()
77 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
78 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument
85 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local()
97 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local()
100 /* If the work is "lazy", handle it from next tick if any */ in __irq_work_queue_local()
105 /* Enqueue the irq work @work on the current CPU */
106 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument
[all …]
Dkthread.c297 * functions which do some additional work in non-modular code such as
777 * when they finish. There is defined a safe point for freezing when one work
786 struct kthread_work *work; in kthread_worker_fn() local
809 work = NULL; in kthread_worker_fn()
812 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
814 list_del_init(&work->node); in kthread_worker_fn()
816 worker->current_work = work; in kthread_worker_fn()
819 if (work) { in kthread_worker_fn()
820 kthread_work_func_t func = work->func; in kthread_worker_fn()
822 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
[all …]
Dworkqueue.c18 * This is the generic async execution mechanism. Work items as are
21 * normal work items and the other for high priority ones) and some extra
213 * When pwq->nr_active >= max_active, new work item is queued to
217 * All work items marked with WORK_STRUCT_INACTIVE do not participate
218 * in pwq->nr_active and all work items in pwq->inactive_works are
220 * work items are in pwq->inactive_works. Some of them are ready to
221 * run in pool->worklist or worker->scheduled. Those work itmes are
223 * not participate in pwq->nr_active. For non-barrier work item, it
254 * The externally visible workqueue. It relays the issued work items to
262 int work_color; /* WQ: current work color */
[all …]
/Linux-v6.1/fs/ksmbd/
Dserver.c88 * @work: smb work containing server thread information
92 static inline int check_conn_state(struct ksmbd_work *work) in check_conn_state() argument
96 if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) { in check_conn_state()
97 rsp_hdr = work->response_buf; in check_conn_state()
107 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, in __process_request() argument
114 if (check_conn_state(work)) in __process_request()
117 if (ksmbd_verify_smb_message(work)) in __process_request()
120 command = conn->ops->get_cmd_val(work); in __process_request()
125 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); in __process_request()
132 conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED); in __process_request()
[all …]
Dksmbd_work.c21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL); in ksmbd_alloc_work_struct() local
23 if (work) { in ksmbd_alloc_work_struct()
24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct()
27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct()
28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct()
29 INIT_LIST_HEAD(&work->interim_entry); in ksmbd_alloc_work_struct()
31 return work; in ksmbd_alloc_work_struct()
34 void ksmbd_free_work_struct(struct ksmbd_work *work) in ksmbd_free_work_struct() argument
[all …]
Dsmb2pdu.c41 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) in __wbuf() argument
43 if (work->next_smb2_rcv_hdr_off) { in __wbuf()
44 *req = ksmbd_req_buf_next(work); in __wbuf()
45 *rsp = ksmbd_resp_buf_next(work); in __wbuf()
47 *req = smb2_get_msg(work->request_buf); in __wbuf()
48 *rsp = smb2_get_msg(work->response_buf); in __wbuf()
89 * @work: smb work
94 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) in smb2_get_ksmbd_tcon() argument
96 struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf); in smb2_get_ksmbd_tcon()
100 work->tcon = NULL; in smb2_get_ksmbd_tcon()
[all …]
Dsmb2pdu.h466 bool is_smb2_neg_cmd(struct ksmbd_work *work);
467 bool is_smb2_rsp(struct ksmbd_work *work);
469 u16 get_smb2_cmd_val(struct ksmbd_work *work);
470 void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
471 int init_smb2_rsp_hdr(struct ksmbd_work *work);
472 int smb2_allocate_rsp_buf(struct ksmbd_work *work);
473 bool is_chained_smb2_message(struct ksmbd_work *work);
474 int init_smb2_neg_rsp(struct ksmbd_work *work);
475 void smb2_set_err_rsp(struct ksmbd_work *work);
476 int smb2_check_user_session(struct ksmbd_work *work);
[all …]
/Linux-v6.1/Documentation/core-api/
Dworkqueue.rst17 When such an asynchronous execution context is needed, a work item
22 While there are work items on the workqueue the worker executes the
23 functions associated with the work items one after the other. When
24 there is no work item left on the workqueue the worker becomes idle.
25 When a new work item gets queued, the worker begins executing again.
43 while an ST wq one for the whole system. Work items had to compete for
72 abstraction, the work item, is introduced.
74 A work item is a simple struct that holds a pointer to the function
76 wants a function to be executed asynchronously it has to set up a work
77 item pointing to that function and queue that work item on a
[all …]
/Linux-v6.1/include/linux/
Dworkqueue.h3 * workqueue.h --- work queue handling for Linux.
21 typedef void (*work_func_t)(struct work_struct *work);
25 * The first word is the work queue pointer and the flags rolled into
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
32 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
74 * When a work item is off queue, its high bits point to the last
111 struct work_struct work; member
114 /* target workqueue and CPU ->timer uses to queue ->work */
[all …]
Dcompletion.h35 #define COMPLETION_INITIALIZER(work) \ argument
36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument
39 (*({ init_completion_map(&(work), &(map)); &(work); }))
41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument
42 (*({ init_completion(&work); &work; }))
46 * @work: identifier for the completion structure
52 #define DECLARE_COMPLETION(work) \ argument
53 struct completion work = COMPLETION_INITIALIZER(work)
62 * @work: identifier for the completion structure
[all …]
/Linux-v6.1/drivers/staging/octeon/
Dethernet-rx.c59 * @work: Work queue entry pointing to the packet.
63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument
68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error()
70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error()
72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error()
81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error()
82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error()
99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error()
102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error()
111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error()
[all …]
Dethernet-tx.c515 /* Get a work queue entry */ in cvm_oct_xmit_pow()
516 struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); in cvm_oct_xmit_pow() local
518 if (unlikely(!work)) { in cvm_oct_xmit_pow()
519 printk_ratelimited("%s: Failed to allocate a work queue entry\n", in cvm_oct_xmit_pow()
531 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); in cvm_oct_xmit_pow()
557 * Fill in some of the work queue fields. We may need to add in cvm_oct_xmit_pow()
561 work->word0.pip.cn38xx.hw_chksum = skb->csum; in cvm_oct_xmit_pow()
562 work->word1.len = skb->len; in cvm_oct_xmit_pow()
563 cvmx_wqe_set_port(work, priv->port); in cvm_oct_xmit_pow()
564 cvmx_wqe_set_qos(work, priv->port & 0x7); in cvm_oct_xmit_pow()
[all …]
/Linux-v6.1/LICENSES/dual/
DApache-2.049 "Work" shall mean the work of authorship, whether in Source or Object form,
51 is included in or attached to the work (an example is provided in the
54 "Derivative Works" shall mean any work, whether in Source or Object form,
55 that is based on (or derived from) the Work and for which the editorial
57 a whole, an original work of authorship. For the purposes of this License,
59 merely link (or bind by name) to the interfaces of, the Work and Derivative
62 "Contribution" shall mean any work of authorship, including the original
63 version of the Work and any modifications or additions to that Work or
65 inclusion in the Work by the copyright owner or by an individual or Legal
72 and improving the Work, but excluding communication that is conspicuously
[all …]
/Linux-v6.1/tools/perf/
Dbuiltin-kwork.c264 struct kwork_work *work; in work_search() local
268 work = container_of(node, struct kwork_work, node); in work_search()
269 cmp = work_cmp(sort_list, key, work); in work_search()
275 if (work->name == NULL) in work_search()
276 work->name = key->name; in work_search()
277 return work; in work_search()
311 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local
313 if (work == NULL) { in work_new()
314 pr_err("Failed to zalloc kwork work\n"); in work_new()
319 INIT_LIST_HEAD(&work->atom_list[i]); in work_new()
[all …]
/Linux-v6.1/io_uring/
Dio-wq.c30 IO_WORKER_F_BOUND = 8, /* is doing bounded work */
64 struct work_struct work; member
168 struct io_wq_work *work) in io_work_get_acct() argument
170 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); in io_work_get_acct()
295 * Most likely an attempt to queue unbounded work on an io_wq that in io_wqe_create_worker()
378 * work item after we canceled in io_wq_exit_workers(). in io_queue_worker_create()
414 * Worker will start processing some work. Move it to the busy list, if
428 * No work, worker going to sleep. Move to freelist, and unuse mm if we
443 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
445 return work->flags >> IO_WQ_HASH_SHIFT; in io_get_work_hash()
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/lag/
Dmpesw.c43 static void mlx5_mpesw_work(struct work_struct *work) in mlx5_mpesw_work() argument
45 struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work); in mlx5_mpesw_work()
62 struct mlx5_mpesw_work_st *work; in mlx5_lag_mpesw_queue_work() local
68 work = kzalloc(sizeof(*work), GFP_KERNEL); in mlx5_lag_mpesw_queue_work()
69 if (!work) in mlx5_lag_mpesw_queue_work()
72 INIT_WORK(&work->work, mlx5_mpesw_work); in mlx5_lag_mpesw_queue_work()
73 init_completion(&work->comp); in mlx5_lag_mpesw_queue_work()
74 work->op = op; in mlx5_lag_mpesw_queue_work()
75 work->lag = ldev; in mlx5_lag_mpesw_queue_work()
77 if (!queue_work(ldev->wq, &work->work)) { in mlx5_lag_mpesw_queue_work()
[all …]
/Linux-v6.1/LICENSES/preferred/
DLGPL-2.190 work, a derivative of the original library. The ordinary General Public
123 follow. Pay close attention to the difference between a "work based on the
124 library" and a "work that uses the library". The former contains code
140 The "Library", below, refers to any such software library or work which
141 has been distributed under these terms. A "work based on the Library"
142 means either the Library or any derivative work under copyright law:
143 that is to say, a work containing the Library or a portion of it, either
148 "Source code" for a work means the preferred form of the work for making
157 program is covered only if its contents constitute a work based on the
173 thus forming a work based on the Library, and copy and distribute such
[all …]
DLGPL-2.088 a textual and legal sense, the linked executable is a combined work, a
108 follow. Pay close attention to the difference between a "work based on the
109 library" and a "work that uses the library". The former contains code
128 The "Library", below, refers to any such software library or work which
129 has been distributed under these terms. A "work based on the Library"
130 means either the Library or any derivative work under copyright law:
131 that is to say, a work containing the Library or a portion of it, either
136 "Source code" for a work means the preferred form of the work for making
145 program is covered only if its contents constitute a work based on the
161 thus forming a work based on the Library, and copy and distribute such
[all …]
/Linux-v6.1/drivers/infiniband/core/
Dcm.c92 struct cm_work *work);
184 struct delayed_work work; member
195 struct cm_work work; member
267 static void cm_work_handler(struct work_struct *work);
693 __be32 remote_id = timewait_info->work.remote_id; in cm_insert_remote_id()
699 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id()
701 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id()
727 if (be32_lt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id()
729 else if (be32_gt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id()
736 res = cm_acquire_id(timewait_info->work.local_id, in cm_find_remote_id()
[all …]

12345678910>>...188