Lines Matching refs:wb

102 static bool wb_io_lists_populated(struct bdi_writeback *wb)  in wb_io_lists_populated()  argument
104 if (wb_has_dirty_io(wb)) { in wb_io_lists_populated()
107 set_bit(WB_has_dirty_io, &wb->state); in wb_io_lists_populated()
108 WARN_ON_ONCE(!wb->avg_write_bandwidth); in wb_io_lists_populated()
109 atomic_long_add(wb->avg_write_bandwidth, in wb_io_lists_populated()
110 &wb->bdi->tot_write_bandwidth); in wb_io_lists_populated()
115 static void wb_io_lists_depopulated(struct bdi_writeback *wb) in wb_io_lists_depopulated() argument
117 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && in wb_io_lists_depopulated()
118 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { in wb_io_lists_depopulated()
119 clear_bit(WB_has_dirty_io, &wb->state); in wb_io_lists_depopulated()
120 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, in wb_io_lists_depopulated()
121 &wb->bdi->tot_write_bandwidth) < 0); in wb_io_lists_depopulated()
136 struct bdi_writeback *wb, in inode_io_list_move_locked() argument
139 assert_spin_locked(&wb->list_lock); in inode_io_list_move_locked()
144 if (head != &wb->b_dirty_time) in inode_io_list_move_locked()
145 return wb_io_lists_populated(wb); in inode_io_list_move_locked()
147 wb_io_lists_depopulated(wb); in inode_io_list_move_locked()
160 struct bdi_writeback *wb) in inode_io_list_del_locked() argument
162 assert_spin_locked(&wb->list_lock); in inode_io_list_del_locked()
165 wb_io_lists_depopulated(wb); in inode_io_list_del_locked()
168 static void wb_wakeup(struct bdi_writeback *wb) in wb_wakeup() argument
170 spin_lock_bh(&wb->work_lock); in wb_wakeup()
171 if (test_bit(WB_registered, &wb->state)) in wb_wakeup()
172 mod_delayed_work(bdi_wq, &wb->dwork, 0); in wb_wakeup()
173 spin_unlock_bh(&wb->work_lock); in wb_wakeup()
176 static void finish_writeback_work(struct bdi_writeback *wb, in finish_writeback_work() argument
184 wake_up_all(&wb->bdi->wb_waitq); in finish_writeback_work()
187 static void wb_queue_work(struct bdi_writeback *wb, in wb_queue_work() argument
190 trace_writeback_queue(wb, work); in wb_queue_work()
195 spin_lock_bh(&wb->work_lock); in wb_queue_work()
197 if (test_bit(WB_registered, &wb->state)) { in wb_queue_work()
198 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
199 mod_delayed_work(bdi_wq, &wb->dwork, 0); in wb_queue_work()
201 finish_writeback_work(wb, work); in wb_queue_work()
203 spin_unlock_bh(&wb->work_lock); in wb_queue_work()
246 struct bdi_writeback *wb = NULL; in __inode_attach_wb() local
253 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); in __inode_attach_wb()
257 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); in __inode_attach_wb()
262 if (!wb) in __inode_attach_wb()
263 wb = &bdi->wb; in __inode_attach_wb()
269 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) in __inode_attach_wb()
270 wb_put(wb); in __inode_attach_wb()
284 __acquires(&wb->list_lock) in locked_inode_to_wb_and_lock_list()
287 struct bdi_writeback *wb = inode_to_wb(inode); in locked_inode_to_wb_and_lock_list() local
295 wb_get(wb); in locked_inode_to_wb_and_lock_list()
297 spin_lock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
300 if (likely(wb == inode->i_wb)) { in locked_inode_to_wb_and_lock_list()
301 wb_put(wb); /* @inode already has ref */ in locked_inode_to_wb_and_lock_list()
302 return wb; in locked_inode_to_wb_and_lock_list()
305 spin_unlock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
306 wb_put(wb); in locked_inode_to_wb_and_lock_list()
320 __acquires(&wb->list_lock) in inode_to_wb_and_lock_list()
540 wbc->wb = inode_to_wb(inode); in wbc_attach_and_unlock_inode()
543 wbc->wb_id = wbc->wb->memcg_css->id; in wbc_attach_and_unlock_inode()
550 wb_get(wbc->wb); in wbc_attach_and_unlock_inode()
557 if (unlikely(wb_dying(wbc->wb))) in wbc_attach_and_unlock_inode()
600 struct bdi_writeback *wb = wbc->wb; in wbc_detach_inode() local
606 if (!wb) in wbc_detach_inode()
633 wb->avg_write_bandwidth); in wbc_detach_inode()
676 wb_put(wbc->wb); in wbc_detach_inode()
677 wbc->wb = NULL; in wbc_detach_inode()
701 if (!wbc->wb) in wbc_account_io()
747 struct bdi_writeback *wb; in inode_congested() local
751 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); in inode_congested()
752 congested = wb_congested(wb, cong_bits); in inode_congested()
757 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); in inode_congested()
770 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) in wb_split_bdi_pages() argument
772 unsigned long this_bw = wb->avg_write_bandwidth; in wb_split_bdi_pages()
773 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_split_bdi_pages()
805 struct bdi_writeback *wb = list_entry(&bdi->wb_list, in bdi_split_work_to_wbs() local
811 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { in bdi_split_work_to_wbs()
823 if (!wb_has_dirty_io(wb) && in bdi_split_work_to_wbs()
825 list_empty(&wb->b_dirty_time))) in bdi_split_work_to_wbs()
827 if (skip_if_busy && writeback_in_progress(wb)) in bdi_split_work_to_wbs()
830 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); in bdi_split_work_to_wbs()
837 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
848 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
855 wb_get(wb); in bdi_split_work_to_wbs()
856 last_wb = wb; in bdi_split_work_to_wbs()
900 __acquires(&wb->list_lock) in locked_inode_to_wb_and_lock_list()
902 struct bdi_writeback *wb = inode_to_wb(inode); in locked_inode_to_wb_and_lock_list() local
905 spin_lock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
906 return wb; in locked_inode_to_wb_and_lock_list()
910 __acquires(&wb->list_lock) in inode_to_wb_and_lock_list()
912 struct bdi_writeback *wb = inode_to_wb(inode); in inode_to_wb_and_lock_list() local
914 spin_lock(&wb->list_lock); in inode_to_wb_and_lock_list()
915 return wb; in inode_to_wb_and_lock_list()
918 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) in wb_split_bdi_pages() argument
929 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { in bdi_split_work_to_wbs()
931 wb_queue_work(&bdi->wb, base_work); in bdi_split_work_to_wbs()
948 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) in wb_start_writeback() argument
950 if (!wb_has_dirty_io(wb)) in wb_start_writeback()
961 if (test_bit(WB_start_all, &wb->state) || in wb_start_writeback()
962 test_and_set_bit(WB_start_all, &wb->state)) in wb_start_writeback()
965 wb->start_all_reason = reason; in wb_start_writeback()
966 wb_wakeup(wb); in wb_start_writeback()
979 void wb_start_background_writeback(struct bdi_writeback *wb) in wb_start_background_writeback() argument
985 trace_writeback_wake_background(wb); in wb_start_background_writeback()
986 wb_wakeup(wb); in wb_start_background_writeback()
994 struct bdi_writeback *wb; in inode_io_list_del() local
996 wb = inode_to_wb_and_lock_list(inode); in inode_io_list_del()
997 inode_io_list_del_locked(inode, wb); in inode_io_list_del()
998 spin_unlock(&wb->list_lock); in inode_io_list_del()
1046 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) in redirty_tail() argument
1048 if (!list_empty(&wb->b_dirty)) { in redirty_tail()
1051 tail = wb_inode(wb->b_dirty.next); in redirty_tail()
1055 inode_io_list_move_locked(inode, wb, &wb->b_dirty); in redirty_tail()
1061 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) in requeue_io() argument
1063 inode_io_list_move_locked(inode, wb, &wb->b_more_io); in requeue_io()
1163 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) in queue_io() argument
1167 assert_spin_locked(&wb->list_lock); in queue_io()
1168 list_splice_init(&wb->b_more_io, &wb->b_io); in queue_io()
1169 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); in queue_io()
1170 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, in queue_io()
1173 wb_io_lists_populated(wb); in queue_io()
1174 trace_writeback_queue_io(wb, work, moved); in queue_io()
1248 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, in requeue_inode() argument
1268 redirty_tail(inode, wb); in requeue_inode()
1279 requeue_io(inode, wb); in requeue_inode()
1288 redirty_tail(inode, wb); in requeue_inode()
1296 redirty_tail(inode, wb); in requeue_inode()
1299 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); in requeue_inode()
1302 inode_io_list_del_locked(inode, wb); in requeue_inode()
1401 struct bdi_writeback *wb; in writeback_single_inode() local
1440 wb = inode_to_wb_and_lock_list(inode); in writeback_single_inode()
1447 inode_io_list_del_locked(inode, wb); in writeback_single_inode()
1448 spin_unlock(&wb->list_lock); in writeback_single_inode()
1455 static long writeback_chunk_size(struct bdi_writeback *wb, in writeback_chunk_size() argument
1476 pages = min(wb->avg_write_bandwidth / 2, in writeback_chunk_size()
1496 struct bdi_writeback *wb, in writeback_sb_inodes() argument
1513 while (!list_empty(&wb->b_io)) { in writeback_sb_inodes()
1514 struct inode *inode = wb_inode(wb->b_io.prev); in writeback_sb_inodes()
1524 redirty_tail(inode, wb); in writeback_sb_inodes()
1544 redirty_tail(inode, wb); in writeback_sb_inodes()
1558 requeue_io(inode, wb); in writeback_sb_inodes()
1562 spin_unlock(&wb->list_lock); in writeback_sb_inodes()
1573 spin_lock(&wb->list_lock); in writeback_sb_inodes()
1579 write_chunk = writeback_chunk_size(wb, work); in writeback_sb_inodes()
1618 if (unlikely(tmp_wb != wb)) { in writeback_sb_inodes()
1620 spin_lock(&wb->list_lock); in writeback_sb_inodes()
1637 static long __writeback_inodes_wb(struct bdi_writeback *wb, in __writeback_inodes_wb() argument
1643 while (!list_empty(&wb->b_io)) { in __writeback_inodes_wb()
1644 struct inode *inode = wb_inode(wb->b_io.prev); in __writeback_inodes_wb()
1653 redirty_tail(inode, wb); in __writeback_inodes_wb()
1656 wrote += writeback_sb_inodes(sb, wb, work); in __writeback_inodes_wb()
1671 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, in writeback_inodes_wb() argument
1683 spin_lock(&wb->list_lock); in writeback_inodes_wb()
1684 if (list_empty(&wb->b_io)) in writeback_inodes_wb()
1685 queue_io(wb, &work); in writeback_inodes_wb()
1686 __writeback_inodes_wb(wb, &work); in writeback_inodes_wb()
1687 spin_unlock(&wb->list_lock); in writeback_inodes_wb()
1708 static long wb_writeback(struct bdi_writeback *wb, in wb_writeback() argument
1722 spin_lock(&wb->list_lock); in wb_writeback()
1737 !list_empty(&wb->work_list)) in wb_writeback()
1744 if (work->for_background && !wb_over_bg_thresh(wb)) in wb_writeback()
1759 trace_writeback_start(wb, work); in wb_writeback()
1760 if (list_empty(&wb->b_io)) in wb_writeback()
1761 queue_io(wb, work); in wb_writeback()
1763 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
1765 progress = __writeback_inodes_wb(wb, work); in wb_writeback()
1766 trace_writeback_written(wb, work); in wb_writeback()
1768 wb_update_bandwidth(wb, wb_start); in wb_writeback()
1783 if (list_empty(&wb->b_more_io)) in wb_writeback()
1790 trace_writeback_wait(wb, work); in wb_writeback()
1791 inode = wb_inode(wb->b_more_io.prev); in wb_writeback()
1793 spin_unlock(&wb->list_lock); in wb_writeback()
1796 spin_lock(&wb->list_lock); in wb_writeback()
1798 spin_unlock(&wb->list_lock); in wb_writeback()
1807 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) in get_next_work_item() argument
1811 spin_lock_bh(&wb->work_lock); in get_next_work_item()
1812 if (!list_empty(&wb->work_list)) { in get_next_work_item()
1813 work = list_entry(wb->work_list.next, in get_next_work_item()
1817 spin_unlock_bh(&wb->work_lock); in get_next_work_item()
1821 static long wb_check_background_flush(struct bdi_writeback *wb) in wb_check_background_flush() argument
1823 if (wb_over_bg_thresh(wb)) { in wb_check_background_flush()
1833 return wb_writeback(wb, &work); in wb_check_background_flush()
1839 static long wb_check_old_data_flush(struct bdi_writeback *wb) in wb_check_old_data_flush() argument
1850 expired = wb->last_old_flush + in wb_check_old_data_flush()
1855 wb->last_old_flush = jiffies; in wb_check_old_data_flush()
1867 return wb_writeback(wb, &work); in wb_check_old_data_flush()
1873 static long wb_check_start_all(struct bdi_writeback *wb) in wb_check_start_all() argument
1877 if (!test_bit(WB_start_all, &wb->state)) in wb_check_start_all()
1883 .nr_pages = wb_split_bdi_pages(wb, nr_pages), in wb_check_start_all()
1886 .reason = wb->start_all_reason, in wb_check_start_all()
1889 nr_pages = wb_writeback(wb, &work); in wb_check_start_all()
1892 clear_bit(WB_start_all, &wb->state); in wb_check_start_all()
1900 static long wb_do_writeback(struct bdi_writeback *wb) in wb_do_writeback() argument
1905 set_bit(WB_writeback_running, &wb->state); in wb_do_writeback()
1906 while ((work = get_next_work_item(wb)) != NULL) { in wb_do_writeback()
1907 trace_writeback_exec(wb, work); in wb_do_writeback()
1908 wrote += wb_writeback(wb, work); in wb_do_writeback()
1909 finish_writeback_work(wb, work); in wb_do_writeback()
1915 wrote += wb_check_start_all(wb); in wb_do_writeback()
1920 wrote += wb_check_old_data_flush(wb); in wb_do_writeback()
1921 wrote += wb_check_background_flush(wb); in wb_do_writeback()
1922 clear_bit(WB_writeback_running, &wb->state); in wb_do_writeback()
1933 struct bdi_writeback *wb = container_of(to_delayed_work(work), in wb_workfn() local
1937 set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); in wb_workfn()
1941 !test_bit(WB_registered, &wb->state))) { in wb_workfn()
1949 pages_written = wb_do_writeback(wb); in wb_workfn()
1951 } while (!list_empty(&wb->work_list)); in wb_workfn()
1958 pages_written = writeback_inodes_wb(wb, 1024, in wb_workfn()
1963 if (!list_empty(&wb->work_list)) in wb_workfn()
1964 wb_wakeup(wb); in wb_workfn()
1965 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) in wb_workfn()
1966 wb_wakeup_delayed(wb); in wb_workfn()
1978 struct bdi_writeback *wb; in __wakeup_flusher_threads_bdi() local
1983 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) in __wakeup_flusher_threads_bdi()
1984 wb_start_writeback(wb, reason); in __wakeup_flusher_threads_bdi()
2038 struct bdi_writeback *wb; in wakeup_dirtytime_writeback() local
2040 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) in wakeup_dirtytime_writeback()
2041 if (!list_empty(&wb->b_dirty_time)) in wakeup_dirtytime_writeback()
2042 wb_wakeup(wb); in wakeup_dirtytime_writeback()
2186 struct bdi_writeback *wb; in __mark_inode_dirty() local
2190 wb = locked_inode_to_wb_and_lock_list(inode); in __mark_inode_dirty()
2192 WARN(bdi_cap_writeback_dirty(wb->bdi) && in __mark_inode_dirty()
2193 !test_bit(WB_registered, &wb->state), in __mark_inode_dirty()
2194 "bdi-%s not registered\n", wb->bdi->name); in __mark_inode_dirty()
2201 dirty_list = &wb->b_dirty; in __mark_inode_dirty()
2203 dirty_list = &wb->b_dirty_time; in __mark_inode_dirty()
2205 wakeup_bdi = inode_io_list_move_locked(inode, wb, in __mark_inode_dirty()
2208 spin_unlock(&wb->list_lock); in __mark_inode_dirty()
2217 if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi) in __mark_inode_dirty()
2218 wb_wakeup_delayed(wb); in __mark_inode_dirty()