Lines Matching full:work
29 /* List head pointing to ordered work list */
55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument
57 return work->wq->fs_info; in btrfs_work_owner()
129 * Hook for threshold which will be called before executing the work,
180 struct btrfs_work *work; in run_ordered_work() local
189 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
191 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
197 * updates from ordinary work function. in run_ordered_work()
203 * we leave the work item on the list as a barrier so in run_ordered_work()
204 * that later work items that are done don't have their in run_ordered_work()
207 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work()
209 trace_btrfs_ordered_sched(work); in run_ordered_work()
211 work->ordered_func(work); in run_ordered_work()
215 list_del(&work->ordered_list); in run_ordered_work()
218 if (work == self) { in run_ordered_work()
220 * This is the work item that the worker is currently in run_ordered_work()
224 * of work items. I.e., if a work item with the same in run_ordered_work()
225 * address and work function is queued twice, the second in run_ordered_work()
227 * work item may be freed and recycled with the same in run_ordered_work()
228 * work function; the workqueue code assumes that the in run_ordered_work()
229 * original work item cannot depend on the recycled work in run_ordered_work()
232 * Note that different types of Btrfs work can depend on in run_ordered_work()
233 * each other, and one type of work on one Btrfs in run_ordered_work()
234 * filesystem may even depend on the same type of work in run_ordered_work()
236 * Therefore, we must not allow the current work item to in run_ordered_work()
246 work->ordered_free(work); in run_ordered_work()
247 /* NB: work must not be dereferenced past this point. */ in run_ordered_work()
248 trace_btrfs_all_work_done(wq->fs_info, work); in run_ordered_work()
262 struct btrfs_work *work = container_of(normal_work, struct btrfs_work, in btrfs_work_helper() local
264 struct btrfs_workqueue *wq = work->wq; in btrfs_work_helper()
268 * We should not touch things inside work in the following cases: in btrfs_work_helper()
269 * 1) after work->func() if it has no ordered_free in btrfs_work_helper()
270 * Since the struct is freed in work->func(). in btrfs_work_helper()
272 * The work may be freed in other threads almost instantly. in btrfs_work_helper()
275 if (work->ordered_func) in btrfs_work_helper()
278 trace_btrfs_work_sched(work); in btrfs_work_helper()
280 work->func(work); in btrfs_work_helper()
283 * Ensures all memory accesses done in the work function are in btrfs_work_helper()
285 * which is going to executed the ordered work sees them. in btrfs_work_helper()
289 set_bit(WORK_DONE_BIT, &work->flags); in btrfs_work_helper()
290 run_ordered_work(wq, work); in btrfs_work_helper()
292 /* NB: work must not be dereferenced past this point. */ in btrfs_work_helper()
293 trace_btrfs_all_work_done(wq->fs_info, work); in btrfs_work_helper()
297 void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, in btrfs_init_work() argument
300 work->func = func; in btrfs_init_work()
301 work->ordered_func = ordered_func; in btrfs_init_work()
302 work->ordered_free = ordered_free; in btrfs_init_work()
303 INIT_WORK(&work->normal_work, btrfs_work_helper); in btrfs_init_work()
304 INIT_LIST_HEAD(&work->ordered_list); in btrfs_init_work()
305 work->flags = 0; in btrfs_init_work()
308 void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work) in btrfs_queue_work() argument
312 work->wq = wq; in btrfs_queue_work()
314 if (work->ordered_func) { in btrfs_queue_work()
316 list_add_tail(&work->ordered_list, &wq->ordered_list); in btrfs_queue_work()
319 trace_btrfs_work_queued(work); in btrfs_queue_work()
320 queue_work(wq->normal_wq, &work->normal_work); in btrfs_queue_work()