Lines Matching refs:acct

104 	struct io_wqe_acct acct[2];  member
205 return &wqe->acct[IO_WQ_ACCT_UNBOUND]; in io_work_get_acct()
207 return &wqe->acct[IO_WQ_ACCT_BOUND]; in io_work_get_acct()
214 return &wqe->acct[IO_WQ_ACCT_BOUND]; in io_wqe_get_acct()
216 return &wqe->acct[IO_WQ_ACCT_UNBOUND]; in io_wqe_get_acct()
222 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); in io_worker_exit() local
236 atomic_dec(&acct->nr_running); in io_worker_exit()
249 acct->nr_workers--; in io_worker_exit()
294 static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) in io_wqe_wake_worker() argument
302 WARN_ON_ONCE(!acct->max_workers); in io_wqe_wake_worker()
308 if (!ret && acct->nr_workers < acct->max_workers) in io_wqe_wake_worker()
314 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); in io_wqe_inc_running() local
316 atomic_inc(&acct->nr_running); in io_wqe_inc_running()
322 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); in io_wqe_dec_running() local
324 if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) in io_wqe_dec_running()
325 io_wqe_wake_worker(wqe, acct); in io_wqe_dec_running()
366 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--; in __io_worker_busy()
367 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++; in __io_worker_busy()
371 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++; in __io_worker_busy()
372 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--; in __io_worker_busy()
685 struct io_wqe_acct *acct = &wqe->acct[index]; in create_io_worker() local
711 if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND)) in create_io_worker()
713 acct->nr_workers++; in create_io_worker()
727 struct io_wqe_acct *acct = &wqe->acct[index]; in io_wqe_need_worker() local
732 return acct->nr_workers < acct->max_workers; in io_wqe_need_worker()
838 static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct, in io_wq_can_queue() argument
845 if (atomic_read(&acct->nr_running)) in io_wq_can_queue()
854 if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers && in io_wq_can_queue()
896 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_enqueue() local
906 if (unlikely(!io_wq_can_queue(wqe, acct, work))) { in io_wqe_enqueue()
918 !atomic_read(&acct->nr_running)) in io_wqe_enqueue()
919 io_wqe_wake_worker(wqe, acct); in io_wqe_enqueue()
1129 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; in io_wq_create()
1130 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0); in io_wq_create()
1132 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = in io_wq_create()
1135 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0); in io_wq_create()