Lines Matching full:work

27 	IO_WORKER_F_BOUND	= 8,	/* is doing bounded work */
60 struct work_struct work; member
161 struct io_wq_work *work) in io_work_get_acct() argument
163 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); in io_work_get_acct()
249 * Most likely an attempt to queue unbounded work on an io_wq that in io_wqe_create_worker()
354 * Worker will start processing some work. Move it to the busy list, if
358 struct io_wq_work *work) in __io_worker_busy() argument
368 * No work, worker going to sleep. Move to freelist, and unuse mm if we
383 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
385 return work->flags >> IO_WQ_HASH_SHIFT; in io_get_work_hash()
408 struct io_wq_work *work, *tail; in io_get_next_work() local
415 work = container_of(node, struct io_wq_work, list); in io_get_next_work()
418 if (!io_wq_is_hashed(work)) { in io_get_next_work()
420 return work; in io_get_next_work()
423 hash = io_get_work_hash(work); in io_get_next_work()
424 /* all items with this hash lie in [work, tail] */ in io_get_next_work()
431 return work; in io_get_next_work()
442 * work being added and clearing the stalled bit. in io_get_next_work()
464 struct io_wq_work *work) in io_assign_current_work() argument
466 if (work) { in io_assign_current_work()
472 worker->cur_work = work; in io_assign_current_work()
476 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
487 struct io_wq_work *work; in io_worker_handle_work() local
490 * If we got some work, mark us as busy. If we didn't, but in io_worker_handle_work()
491 * the list isn't empty, it means we stalled on hashed work. in io_worker_handle_work()
492 * Mark us stalled so we don't keep looking for work when we in io_worker_handle_work()
493 * can't make progress, any work completion or insertion will in io_worker_handle_work()
496 work = io_get_next_work(acct, worker); in io_worker_handle_work()
497 if (work) in io_worker_handle_work()
498 __io_worker_busy(wqe, worker, work); in io_worker_handle_work()
501 if (!work) in io_worker_handle_work()
503 io_assign_current_work(worker, work); in io_worker_handle_work()
509 unsigned int hash = io_get_work_hash(work); in io_worker_handle_work()
511 next_hashed = wq_next_work(work); in io_worker_handle_work()
513 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND)) in io_worker_handle_work()
514 work->flags |= IO_WQ_WORK_CANCEL; in io_worker_handle_work()
515 wq->do_work(work); in io_worker_handle_work()
518 linked = wq->free_work(work); in io_worker_handle_work()
519 work = next_hashed; in io_worker_handle_work()
520 if (!work && linked && !io_wq_is_hashed(linked)) { in io_worker_handle_work()
521 work = linked; in io_worker_handle_work()
524 io_assign_current_work(worker, work); in io_worker_handle_work()
535 if (!work) in io_worker_handle_work()
539 } while (work); in io_worker_handle_work()
620 * running and we have work pending, wake up a free one or create a new one.
656 static bool io_wq_work_match_all(struct io_wq_work *work, void *data) in io_wq_work_match_all() argument
711 schedule_work(&worker->work); in create_worker_cont()
714 static void io_workqueue_create(struct work_struct *work) in io_workqueue_create() argument
716 struct io_worker *worker = container_of(work, struct io_worker, work); in io_workqueue_create()
760 INIT_WORK(&worker->work, io_workqueue_create); in create_io_worker()
761 schedule_work(&worker->work); in create_io_worker()
799 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) in io_run_cancel() argument
804 work->flags |= IO_WQ_WORK_CANCEL; in io_run_cancel()
805 wq->do_work(work); in io_run_cancel()
806 work = wq->free_work(work); in io_run_cancel()
807 } while (work); in io_run_cancel()
810 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) in io_wqe_insert_work() argument
812 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_insert_work()
816 if (!io_wq_is_hashed(work)) { in io_wqe_insert_work()
818 wq_list_add_tail(&work->list, &acct->work_list); in io_wqe_insert_work()
822 hash = io_get_work_hash(work); in io_wqe_insert_work()
824 wqe->hash_tail[hash] = work; in io_wqe_insert_work()
828 wq_list_add_after(&work->list, &tail->list, &acct->work_list); in io_wqe_insert_work()
831 static bool io_wq_work_match_item(struct io_wq_work *work, void *data) in io_wq_work_match_item() argument
833 return work == data; in io_wq_work_match_item()
836 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) in io_wqe_enqueue() argument
838 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_enqueue()
839 unsigned work_flags = work->flags; in io_wqe_enqueue()
847 (work->flags & IO_WQ_WORK_CANCEL)) { in io_wqe_enqueue()
848 io_run_cancel(work, wqe); in io_wqe_enqueue()
853 io_wqe_insert_work(wqe, work); in io_wqe_enqueue()
875 .data = work, in io_wqe_enqueue()
886 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) in io_wq_enqueue() argument
890 io_wqe_enqueue(wqe, work); in io_wq_enqueue()
894 * Work items that hash to the same value will not be done in parallel.
897 void io_wq_hash_work(struct io_wq_work *work, void *val) in io_wq_hash_work() argument
902 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); in io_wq_hash_work()
911 * may dereference the passed in work. in io_wq_worker_cancel()
925 struct io_wq_work *work, in io_wqe_remove_pending() argument
928 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_remove_pending()
929 unsigned int hash = io_get_work_hash(work); in io_wqe_remove_pending()
932 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { in io_wqe_remove_pending()
940 wq_list_del(&acct->work_list, &work->list, prev); in io_wqe_remove_pending()
949 struct io_wq_work *work; in io_acct_cancel_pending_work() local
952 work = container_of(node, struct io_wq_work, list); in io_acct_cancel_pending_work()
953 if (!match->fn(work, match->data)) in io_acct_cancel_pending_work()
955 io_wqe_remove_pending(wqe, work, prev); in io_acct_cancel_pending_work()
957 io_run_cancel(work, wqe); in io_acct_cancel_pending_work()
1004 * from there. CANCEL_OK means that the work is returned as-new, in io_wq_cancel_cb()
1016 * Now check if a free (going busy) or busy worker has the work in io_wq_cancel_cb()