Lines Matching full:wb

85 static bool wb_io_lists_populated(struct bdi_writeback *wb)  in wb_io_lists_populated()  argument
87 if (wb_has_dirty_io(wb)) { in wb_io_lists_populated()
90 set_bit(WB_has_dirty_io, &wb->state); in wb_io_lists_populated()
91 WARN_ON_ONCE(!wb->avg_write_bandwidth); in wb_io_lists_populated()
92 atomic_long_add(wb->avg_write_bandwidth, in wb_io_lists_populated()
93 &wb->bdi->tot_write_bandwidth); in wb_io_lists_populated()
98 static void wb_io_lists_depopulated(struct bdi_writeback *wb) in wb_io_lists_depopulated() argument
100 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && in wb_io_lists_depopulated()
101 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { in wb_io_lists_depopulated()
102 clear_bit(WB_has_dirty_io, &wb->state); in wb_io_lists_depopulated()
103 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, in wb_io_lists_depopulated()
104 &wb->bdi->tot_write_bandwidth) < 0); in wb_io_lists_depopulated()
111 * @wb: target bdi_writeback
112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
119 struct bdi_writeback *wb, in inode_io_list_move_locked() argument
122 assert_spin_locked(&wb->list_lock); in inode_io_list_move_locked()
127 if (head != &wb->b_dirty_time) in inode_io_list_move_locked()
128 return wb_io_lists_populated(wb); in inode_io_list_move_locked()
130 wb_io_lists_depopulated(wb); in inode_io_list_move_locked()
137 * @wb: bdi_writeback @inode is being removed from
139 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
143 struct bdi_writeback *wb) in inode_io_list_del_locked() argument
145 assert_spin_locked(&wb->list_lock); in inode_io_list_del_locked()
150 wb_io_lists_depopulated(wb); in inode_io_list_del_locked()
153 static void wb_wakeup(struct bdi_writeback *wb) in wb_wakeup() argument
155 spin_lock_bh(&wb->work_lock); in wb_wakeup()
156 if (test_bit(WB_registered, &wb->state)) in wb_wakeup()
157 mod_delayed_work(bdi_wq, &wb->dwork, 0); in wb_wakeup()
158 spin_unlock_bh(&wb->work_lock); in wb_wakeup()
161 static void finish_writeback_work(struct bdi_writeback *wb, in finish_writeback_work() argument
177 static void wb_queue_work(struct bdi_writeback *wb, in wb_queue_work() argument
180 trace_writeback_queue(wb, work); in wb_queue_work()
185 spin_lock_bh(&wb->work_lock); in wb_queue_work()
187 if (test_bit(WB_registered, &wb->state)) { in wb_queue_work()
188 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
189 mod_delayed_work(bdi_wq, &wb->dwork, 0); in wb_queue_work()
191 finish_writeback_work(wb, work); in wb_queue_work()
193 spin_unlock_bh(&wb->work_lock); in wb_queue_work()
253 struct bdi_writeback *wb = NULL; in __inode_attach_wb() local
260 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); in __inode_attach_wb()
264 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); in __inode_attach_wb()
269 if (!wb) in __inode_attach_wb()
270 wb = &bdi->wb; in __inode_attach_wb()
276 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) in __inode_attach_wb()
277 wb_put(wb); in __inode_attach_wb()
282 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
285 * Returns @inode's wb with its list_lock held. @inode->i_lock must be
286 * held on entry and is released on return. The returned wb is guaranteed
287 * to stay @inode's associated wb until its list_lock is released.
292 __acquires(&wb->list_lock) in locked_inode_to_wb_and_lock_list()
295 struct bdi_writeback *wb = inode_to_wb(inode); in locked_inode_to_wb_and_lock_list() local
299 * @inode->i_lock and @wb->list_lock but list_lock nests in locked_inode_to_wb_and_lock_list()
303 wb_get(wb); in locked_inode_to_wb_and_lock_list()
305 spin_lock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
308 if (likely(wb == inode->i_wb)) { in locked_inode_to_wb_and_lock_list()
309 wb_put(wb); /* @inode already has ref */ in locked_inode_to_wb_and_lock_list()
310 return wb; in locked_inode_to_wb_and_lock_list()
313 spin_unlock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
314 wb_put(wb); in locked_inode_to_wb_and_lock_list()
321 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
328 __acquires(&wb->list_lock) in inode_to_wb_and_lock_list()
373 * since I_WB_SWITCH assertion and all wb stat update transactions in inode_switch_wbs_work_fn()
378 * gives us exclusion against all wb related operations on @inode in inode_switch_wbs_work_fn()
449 * ensures that the new wb is visible if they see !I_WB_SWITCH. in inode_switch_wbs_work_fn()
483 * inode_switch_wbs - change the wb association of an inode
485 * @new_wb_id: ID of the new wb
487 * Switch @inode's wb association to the wb identified by @new_wb_id. The
508 /* find and pin the new wb */ in inode_switch_wbs()
566 wbc->wb = inode_to_wb(inode); in wbc_attach_and_unlock_inode()
569 wbc->wb_id = wbc->wb->memcg_css->id; in wbc_attach_and_unlock_inode()
576 wb_get(wbc->wb); in wbc_attach_and_unlock_inode()
580 * A dying wb indicates that either the blkcg associated with the in wbc_attach_and_unlock_inode()
582 * case, a replacement wb should already be available and we should in wbc_attach_and_unlock_inode()
583 * refresh the wb immediately. In the second case, trying to in wbc_attach_and_unlock_inode()
586 if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css))) in wbc_attach_and_unlock_inode()
617 * current wb and the last round's winner wb (max of last round's current
618 * wb, the winner from two rounds ago, and the last round's majority
630 struct bdi_writeback *wb = wbc->wb; in wbc_detach_inode() local
636 if (!wb) in wbc_detach_inode()
663 wb->avg_write_bandwidth); in wbc_detach_inode()
674 * The switch verdict is reached if foreign wb's consume in wbc_detach_inode()
691 * Switch if the current wb isn't the consistent winner. in wbc_detach_inode()
695 * the wrong wb for an extended period of time. in wbc_detach_inode()
709 wb_put(wbc->wb); in wbc_detach_inode()
710 wbc->wb = NULL; in wbc_detach_inode()
736 if (!wbc->wb || wbc->no_cgroup_owner) in wbc_account_cgroup_owner()
774 * associated with @inode is congested; otherwise, the root wb's congestion
787 struct bdi_writeback *wb; in inode_congested() local
791 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); in inode_congested()
792 congested = wb_congested(wb, cong_bits); in inode_congested()
797 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); in inode_congested()
803 * @wb: target bdi_writeback to split @nr_pages to
806 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
807 * relation to the total write bandwidth of all wb's w/ dirty inodes on
808 * @wb->bdi.
810 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) in wb_split_bdi_pages() argument
812 unsigned long this_bw = wb->avg_write_bandwidth; in wb_split_bdi_pages()
813 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_split_bdi_pages()
819 * This may be called on clean wb's and proportional distribution in wb_split_bdi_pages()
830 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
833 * @skip_if_busy: skip wb's which already have writeback in progress
835 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
837 * distributed to the busy wbs according to each wb's proportion in the
845 struct bdi_writeback *wb = list_entry(&bdi->wb_list, in bdi_split_work_to_wbs() local
851 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { in bdi_split_work_to_wbs()
863 if (!wb_has_dirty_io(wb) && in bdi_split_work_to_wbs()
865 list_empty(&wb->b_dirty_time))) in bdi_split_work_to_wbs()
867 if (skip_if_busy && writeback_in_progress(wb)) in bdi_split_work_to_wbs()
870 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); in bdi_split_work_to_wbs()
877 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
888 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
891 * Pin @wb so that it stays on @bdi->wb_list. This allows in bdi_split_work_to_wbs()
892 * continuing iteration from @wb after dropping and in bdi_split_work_to_wbs()
895 wb_get(wb); in bdi_split_work_to_wbs()
896 last_wb = wb; in bdi_split_work_to_wbs()
924 struct bdi_writeback *wb; in cgroup_writeback_by_id() local
944 * And find the associated wb. If the wb isn't there already in cgroup_writeback_by_id()
947 wb = wb_get_lookup(bdi, memcg_css); in cgroup_writeback_by_id()
948 if (!wb) { in cgroup_writeback_by_id()
963 mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty, in cgroup_writeback_by_id()
977 wb_queue_work(wb, work); in cgroup_writeback_by_id()
983 wb_put(wb); in cgroup_writeback_by_id()
992 * cgroup_writeback_umount - flush inode wb switches for umount
995 * flushes in-flight inode wb switches. An inode wb switch goes through
997 * that all previously scheduled switches are finished. As wb switches are
999 * flushing iff wb switches are in flight.
1006 * ensure that all in-flight wb switches are in the workqueue. in cgroup_writeback_umount()
1030 __acquires(&wb->list_lock) in locked_inode_to_wb_and_lock_list()
1032 struct bdi_writeback *wb = inode_to_wb(inode); in locked_inode_to_wb_and_lock_list() local
1035 spin_lock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
1036 return wb; in locked_inode_to_wb_and_lock_list()
1040 __acquires(&wb->list_lock) in inode_to_wb_and_lock_list()
1042 struct bdi_writeback *wb = inode_to_wb(inode); in inode_to_wb_and_lock_list() local
1044 spin_lock(&wb->list_lock); in inode_to_wb_and_lock_list()
1045 return wb; in inode_to_wb_and_lock_list()
1048 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) in wb_split_bdi_pages() argument
1059 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { in bdi_split_work_to_wbs()
1061 wb_queue_work(&bdi->wb, base_work); in bdi_split_work_to_wbs()
1077 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) in wb_start_writeback() argument
1079 if (!wb_has_dirty_io(wb)) in wb_start_writeback()
1090 if (test_bit(WB_start_all, &wb->state) || in wb_start_writeback()
1091 test_and_set_bit(WB_start_all, &wb->state)) in wb_start_writeback()
1094 wb->start_all_reason = reason; in wb_start_writeback()
1095 wb_wakeup(wb); in wb_start_writeback()
1100 * @wb: bdi_writback to write from
1104 * this function returns, it is only guaranteed that for given wb
1108 void wb_start_background_writeback(struct bdi_writeback *wb) in wb_start_background_writeback() argument
1114 trace_writeback_wake_background(wb); in wb_start_background_writeback()
1115 wb_wakeup(wb); in wb_start_background_writeback()
1123 struct bdi_writeback *wb; in inode_io_list_del() local
1125 wb = inode_to_wb_and_lock_list(inode); in inode_io_list_del()
1127 inode_io_list_del_locked(inode, wb); in inode_io_list_del()
1129 spin_unlock(&wb->list_lock); in inode_io_list_del()
1178 static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) in redirty_tail_locked() argument
1182 if (!list_empty(&wb->b_dirty)) { in redirty_tail_locked()
1185 tail = wb_inode(wb->b_dirty.next); in redirty_tail_locked()
1189 inode_io_list_move_locked(inode, wb, &wb->b_dirty); in redirty_tail_locked()
1193 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) in redirty_tail() argument
1196 redirty_tail_locked(inode, wb); in redirty_tail()
1203 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) in requeue_io() argument
1205 inode_io_list_move_locked(inode, wb, &wb->b_more_io); in requeue_io()
1296 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, in queue_io() argument
1302 assert_spin_locked(&wb->list_lock); in queue_io()
1303 list_splice_init(&wb->b_more_io, &wb->b_io); in queue_io()
1304 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); in queue_io()
1307 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, in queue_io()
1310 wb_io_lists_populated(wb); in queue_io()
1311 trace_writeback_queue_io(wb, work, dirtied_before, moved); in queue_io()
1385 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, in requeue_inode() argument
1405 redirty_tail_locked(inode, wb); in requeue_inode()
1416 requeue_io(inode, wb); in requeue_inode()
1425 redirty_tail_locked(inode, wb); in requeue_inode()
1433 redirty_tail_locked(inode, wb); in requeue_inode()
1436 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); in requeue_inode()
1440 inode_io_list_del_locked(inode, wb); in requeue_inode()
1535 struct bdi_writeback *wb; in writeback_single_inode() local
1574 wb = inode_to_wb_and_lock_list(inode); in writeback_single_inode()
1581 inode_io_list_del_locked(inode, wb); in writeback_single_inode()
1582 spin_unlock(&wb->list_lock); in writeback_single_inode()
1589 static long writeback_chunk_size(struct bdi_writeback *wb, in writeback_chunk_size() argument
1610 pages = min(wb->avg_write_bandwidth / 2, in writeback_chunk_size()
1625 * NOTE! This is called with wb->list_lock held, and will
1630 struct bdi_writeback *wb, in writeback_sb_inodes() argument
1647 while (!list_empty(&wb->b_io)) { in writeback_sb_inodes()
1648 struct inode *inode = wb_inode(wb->b_io.prev); in writeback_sb_inodes()
1658 redirty_tail(inode, wb); in writeback_sb_inodes()
1677 redirty_tail_locked(inode, wb); in writeback_sb_inodes()
1692 requeue_io(inode, wb); in writeback_sb_inodes()
1696 spin_unlock(&wb->list_lock); in writeback_sb_inodes()
1707 spin_lock(&wb->list_lock); in writeback_sb_inodes()
1713 write_chunk = writeback_chunk_size(wb, work); in writeback_sb_inodes()
1742 * have been switched to another wb in the meantime. in writeback_sb_inodes()
1752 if (unlikely(tmp_wb != wb)) { in writeback_sb_inodes()
1754 spin_lock(&wb->list_lock); in writeback_sb_inodes()
1771 static long __writeback_inodes_wb(struct bdi_writeback *wb, in __writeback_inodes_wb() argument
1777 while (!list_empty(&wb->b_io)) { in __writeback_inodes_wb()
1778 struct inode *inode = wb_inode(wb->b_io.prev); in __writeback_inodes_wb()
1787 redirty_tail(inode, wb); in __writeback_inodes_wb()
1790 wrote += writeback_sb_inodes(sb, wb, work); in __writeback_inodes_wb()
1805 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, in writeback_inodes_wb() argument
1817 spin_lock(&wb->list_lock); in writeback_inodes_wb()
1818 if (list_empty(&wb->b_io)) in writeback_inodes_wb()
1819 queue_io(wb, &work, jiffies); in writeback_inodes_wb()
1820 __writeback_inodes_wb(wb, &work); in writeback_inodes_wb()
1821 spin_unlock(&wb->list_lock); in writeback_inodes_wb()
1842 static long wb_writeback(struct bdi_writeback *wb, in wb_writeback() argument
1853 spin_lock(&wb->list_lock); in wb_writeback()
1868 !list_empty(&wb->work_list)) in wb_writeback()
1875 if (work->for_background && !wb_over_bg_thresh(wb)) in wb_writeback()
1890 trace_writeback_start(wb, work); in wb_writeback()
1891 if (list_empty(&wb->b_io)) in wb_writeback()
1892 queue_io(wb, work, dirtied_before); in wb_writeback()
1894 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
1896 progress = __writeback_inodes_wb(wb, work); in wb_writeback()
1897 trace_writeback_written(wb, work); in wb_writeback()
1899 wb_update_bandwidth(wb, wb_start); in wb_writeback()
1914 if (list_empty(&wb->b_more_io)) in wb_writeback()
1921 trace_writeback_wait(wb, work); in wb_writeback()
1922 inode = wb_inode(wb->b_more_io.prev); in wb_writeback()
1924 spin_unlock(&wb->list_lock); in wb_writeback()
1927 spin_lock(&wb->list_lock); in wb_writeback()
1929 spin_unlock(&wb->list_lock); in wb_writeback()
1938 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) in get_next_work_item() argument
1942 spin_lock_bh(&wb->work_lock); in get_next_work_item()
1943 if (!list_empty(&wb->work_list)) { in get_next_work_item()
1944 work = list_entry(wb->work_list.next, in get_next_work_item()
1948 spin_unlock_bh(&wb->work_lock); in get_next_work_item()
1952 static long wb_check_background_flush(struct bdi_writeback *wb) in wb_check_background_flush() argument
1954 if (wb_over_bg_thresh(wb)) { in wb_check_background_flush()
1964 return wb_writeback(wb, &work); in wb_check_background_flush()
1970 static long wb_check_old_data_flush(struct bdi_writeback *wb) in wb_check_old_data_flush() argument
1981 expired = wb->last_old_flush + in wb_check_old_data_flush()
1986 wb->last_old_flush = jiffies; in wb_check_old_data_flush()
1998 return wb_writeback(wb, &work); in wb_check_old_data_flush()
2004 static long wb_check_start_all(struct bdi_writeback *wb) in wb_check_start_all() argument
2008 if (!test_bit(WB_start_all, &wb->state)) in wb_check_start_all()
2014 .nr_pages = wb_split_bdi_pages(wb, nr_pages), in wb_check_start_all()
2017 .reason = wb->start_all_reason, in wb_check_start_all()
2020 nr_pages = wb_writeback(wb, &work); in wb_check_start_all()
2023 clear_bit(WB_start_all, &wb->state); in wb_check_start_all()
2031 static long wb_do_writeback(struct bdi_writeback *wb) in wb_do_writeback() argument
2036 set_bit(WB_writeback_running, &wb->state); in wb_do_writeback()
2037 while ((work = get_next_work_item(wb)) != NULL) { in wb_do_writeback()
2038 trace_writeback_exec(wb, work); in wb_do_writeback()
2039 wrote += wb_writeback(wb, work); in wb_do_writeback()
2040 finish_writeback_work(wb, work); in wb_do_writeback()
2046 wrote += wb_check_start_all(wb); in wb_do_writeback()
2051 wrote += wb_check_old_data_flush(wb); in wb_do_writeback()
2052 wrote += wb_check_background_flush(wb); in wb_do_writeback()
2053 clear_bit(WB_writeback_running, &wb->state); in wb_do_writeback()
2064 struct bdi_writeback *wb = container_of(to_delayed_work(work), in wb_workfn() local
2068 set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); in wb_workfn()
2072 !test_bit(WB_registered, &wb->state))) { in wb_workfn()
2074 * The normal path. Keep writing back @wb until its in wb_workfn()
2076 * if @wb is shutting down even when we're running off the in wb_workfn()
2080 pages_written = wb_do_writeback(wb); in wb_workfn()
2082 } while (!list_empty(&wb->work_list)); in wb_workfn()
2089 pages_written = writeback_inodes_wb(wb, 1024, in wb_workfn()
2094 if (!list_empty(&wb->work_list)) in wb_workfn()
2095 wb_wakeup(wb); in wb_workfn()
2096 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) in wb_workfn()
2097 wb_wakeup_delayed(wb); in wb_workfn()
2109 struct bdi_writeback *wb; in __wakeup_flusher_threads_bdi() local
2114 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) in __wakeup_flusher_threads_bdi()
2115 wb_start_writeback(wb, reason); in __wakeup_flusher_threads_bdi()
2169 struct bdi_writeback *wb; in wakeup_dirtytime_writeback() local
2171 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) in wakeup_dirtytime_writeback()
2172 if (!list_empty(&wb->b_dirty_time)) in wakeup_dirtytime_writeback()
2173 wb_wakeup(wb); in wakeup_dirtytime_writeback()
2318 struct bdi_writeback *wb; in __mark_inode_dirty() local
2322 wb = locked_inode_to_wb_and_lock_list(inode); in __mark_inode_dirty()
2324 WARN((wb->bdi->capabilities & BDI_CAP_WRITEBACK) && in __mark_inode_dirty()
2325 !test_bit(WB_registered, &wb->state), in __mark_inode_dirty()
2326 "bdi-%s not registered\n", bdi_dev_name(wb->bdi)); in __mark_inode_dirty()
2333 dirty_list = &wb->b_dirty; in __mark_inode_dirty()
2335 dirty_list = &wb->b_dirty_time; in __mark_inode_dirty()
2337 wakeup_bdi = inode_io_list_move_locked(inode, wb, in __mark_inode_dirty()
2340 spin_unlock(&wb->list_lock); in __mark_inode_dirty()
2350 (wb->bdi->capabilities & BDI_CAP_WRITEBACK)) in __mark_inode_dirty()
2351 wb_wakeup_delayed(wb); in __mark_inode_dirty()
2407 * Move each inode back to the wb list before we drop the lock in wait_sb_inodes()
2416 * do not have the mapping lock. Skip it here, wb completion in wait_sb_inodes()
2556 /* protect against inode wb switch, see inode_switch_wbs_work_fn() */ in sync_inodes_sb()