Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping

1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/page-writeback.c
25 #include <linux/backing-dev.h>
54 #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
100 * The interval between `kupdate'-style writebacks
124 /* End of sysctl-exported parameters */
142 unsigned long wb_dirty; /* per-wb counterparts */
152 * reflect changes in current writeout rate.
160 .wb_completions = &(__wb)->completions
166 .wb_completions = &(__wb)->memcg_completions, \
171 return dtc->dom; in mdtc_valid()
176 return dtc->dom; in dtc_dom()
181 return mdtc->gdtc; in mdtc_gdtc()
186 return &wb->memcg_completions; in wb_memcg_completions()
192 unsigned long this_bw = wb->avg_write_bandwidth; in wb_min_max_ratio()
193 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_min_max_ratio()
194 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio()
195 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio()
219 .wb_completions = &(__wb)->completions
246 *minp = wb->bdi->min_ratio; in wb_min_max_ratio()
247 *maxp = wb->bdi->max_ratio; in wb_min_max_ratio()
260 * user-configurable dirty ratio is the effective number of pages that
264 * Because the user is allowed to specify the dirty limit globally as
265 * absolute number of bytes, calculating the per-zone dirty limit can
266 * require translating the configured limit into a percentage of
271 * node_dirtyable_memory - number of dirtyable pages in a node
275 * page cache. This is the base value for the per-node dirty limits.
283 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory()
296 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory()
319 z = &NODE_DATA(node)->node_zones[i]; in highmem_dirtyable_memory()
325 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory()
357 * global_dirtyable_memory - number of globally dirtyable pages
372 x -= min(x, totalreserve_pages); in global_dirtyable_memory()
378 x -= highmem_dirtyable_memory(x); in global_dirtyable_memory()
384 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
387 * Calculate @dtc->thresh and ->bg_thresh considering
389 * must ensure that @dtc->avail is set before calling this function. The
390 * dirty limits will be lifted by 1/4 for real-time tasks.
394 const unsigned long available_memory = dtc->avail; in domain_dirty_limits()
398 /* convert ratios to per-PAGE_SIZE for higher precision */ in domain_dirty_limits()
407 unsigned long global_avail = gdtc->avail; in domain_dirty_limits()
413 * per-PAGE_SIZE, they can be obtained by dividing bytes by in domain_dirty_limits()
437 tsk = current; in domain_dirty_limits()
442 dtc->thresh = thresh; in domain_dirty_limits()
443 dtc->bg_thresh = bg_thresh; in domain_dirty_limits()
451 * global_dirty_limits - background-writeback and dirty-throttling thresholds
470 * node_dirty_limit - maximum number of dirty pages allowed in a node
479 struct task_struct *tsk = current; in node_dirty_limit()
495 * node_dirty_ok - tells whether a node is within its dirty limits
499 * dirty limit, %false if the limit is exceeded.
503 unsigned long limit = node_dirty_limit(pgdat); in node_dirty_ok() local
509 return nr_pages <= limit; in node_dirty_ok()
575 __fprop_inc_percpu_max(&dom->completions, completions, in wb_domain_writeout_inc()
578 if (unlikely(!dom->period_time)) { in wb_domain_writeout_inc()
585 dom->period_time = wp_next_time(jiffies); in wb_domain_writeout_inc()
586 mod_timer(&dom->period_timer, dom->period_time); in wb_domain_writeout_inc()
599 wb_domain_writeout_inc(&global_wb_domain, &wb->completions, in __wb_writeout_inc()
600 wb->bdi->max_prop_frac); in __wb_writeout_inc()
605 wb->bdi->max_prop_frac); in __wb_writeout_inc()
625 int miss_periods = (jiffies - dom->period_time) / in writeout_period()
628 if (fprop_new_period(&dom->completions, miss_periods + 1)) { in writeout_period()
629 dom->period_time = wp_next_time(dom->period_time + in writeout_period()
631 mod_timer(&dom->period_timer, dom->period_time); in writeout_period()
637 dom->period_time = 0; in writeout_period()
645 spin_lock_init(&dom->lock); in wb_domain_init()
647 timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE); in wb_domain_init()
649 dom->dirty_limit_tstamp = jiffies; in wb_domain_init()
651 return fprop_global_init(&dom->completions, gfp); in wb_domain_init()
657 del_timer_sync(&dom->period_timer); in wb_domain_exit()
658 fprop_global_destroy(&dom->completions); in wb_domain_exit()
674 if (min_ratio > bdi->max_ratio) { in bdi_set_min_ratio()
675 ret = -EINVAL; in bdi_set_min_ratio()
677 min_ratio -= bdi->min_ratio; in bdi_set_min_ratio()
680 bdi->min_ratio += min_ratio; in bdi_set_min_ratio()
682 ret = -EINVAL; in bdi_set_min_ratio()
695 return -EINVAL; in bdi_set_max_ratio()
698 if (bdi->min_ratio > max_ratio) { in bdi_set_max_ratio()
699 ret = -EINVAL; in bdi_set_max_ratio()
701 bdi->max_ratio = max_ratio; in bdi_set_max_ratio()
702 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; in bdi_set_max_ratio()
719 return max(thresh, dom->dirty_limit); in hard_dirty_limit()
724 * system-wide clean memory excluding the amount being used in the domain.
730 unsigned long clean = filepages - min(filepages, mdtc->dirty); in mdtc_calc_avail()
731 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); in mdtc_calc_avail()
732 unsigned long other_clean = global_clean - min(global_clean, clean); in mdtc_calc_avail()
734 mdtc->avail = filepages + min(headroom, other_clean); in mdtc_calc_avail()
738 * __wb_calc_thresh - @wb's share of dirty throttling threshold
741 * Note that balance_dirty_pages() will only seriously take it as a hard limit
749 * - starving fast devices
750 * - piling up dirty pages (that will take long time to sync) on slow devices
752 * The wb's share of dirty limit will be adapting to its throughput and
753 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
755 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
761 unsigned long thresh = dtc->thresh; in __wb_calc_thresh()
769 fprop_fraction_percpu(&dom->completions, dtc->wb_completions, in __wb_calc_thresh()
772 wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100; in __wb_calc_thresh()
776 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); in __wb_calc_thresh()
793 * setpoint - dirty 3
794 * f(dirty) := 1.0 + (----------------)
795 * limit - setpoint
801 * (3) f(limit) = 0 => the hard limit
808 unsigned long limit) in pos_ratio_polynom() argument
813 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, in pos_ratio_polynom()
814 (limit - setpoint) | 1); in pos_ratio_polynom()
861 * 0 +------------.------------------.----------------------*------------->
862 * freerun^ setpoint^ limit^ dirty pages
889 * 0 +----------------------.-------------------------------.------------->
894 * - start writing to a slow SD card and a fast disk at the same time. The SD
896 * - the wb dirty thresh drops quickly due to change of JBOD workload
900 struct bdi_writeback *wb = dtc->wb; in wb_position_ratio()
901 unsigned long write_bw = wb->avg_write_bandwidth; in wb_position_ratio()
902 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); in wb_position_ratio()
903 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); in wb_position_ratio() local
904 unsigned long wb_thresh = dtc->wb_thresh; in wb_position_ratio()
909 long long pos_ratio; /* for scaling up/down the rate limit */ in wb_position_ratio()
912 dtc->pos_ratio = 0; in wb_position_ratio()
914 if (unlikely(dtc->dirty >= limit)) in wb_position_ratio()
922 setpoint = (freerun + limit) / 2; in wb_position_ratio()
923 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit); in wb_position_ratio()
930 * This is especially important for fuse which sets bdi->max_ratio to in wb_position_ratio()
937 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global in wb_position_ratio()
950 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_position_ratio()
953 if (dtc->wb_dirty < 8) { in wb_position_ratio()
954 dtc->pos_ratio = min_t(long long, pos_ratio * 2, in wb_position_ratio()
959 if (dtc->wb_dirty >= wb_thresh) in wb_position_ratio()
963 dtc->wb_bg_thresh); in wb_position_ratio()
968 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty, in wb_position_ratio()
978 * wb's) while given strictlimit wb is below limit. in wb_position_ratio()
981 * but it would look too non-natural for the case of all in wb_position_ratio()
983 * with bdi->max_ratio == 100%. in wb_position_ratio()
992 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio); in wb_position_ratio()
1005 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint) in wb_position_ratio()
1007 * x_intercept - wb_dirty in wb_position_ratio()
1008 * := -------------------------- in wb_position_ratio()
1009 * x_intercept - wb_setpoint in wb_position_ratio()
1014 * (2) k = - 1 / (8 * write_bw) (in single wb case) in wb_position_ratio()
1019 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2] in wb_position_ratio()
1027 if (unlikely(wb_thresh > dtc->thresh)) in wb_position_ratio()
1028 wb_thresh = dtc->thresh; in wb_position_ratio()
1036 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); in wb_position_ratio()
1041 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1); in wb_position_ratio()
1045 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case. in wb_position_ratio()
1047 * wb_thresh thresh - wb_thresh in wb_position_ratio()
1048 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh in wb_position_ratio()
1051 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16; in wb_position_ratio()
1054 if (dtc->wb_dirty < x_intercept - span / 4) { in wb_position_ratio()
1055 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty), in wb_position_ratio()
1056 (x_intercept - wb_setpoint) | 1); in wb_position_ratio()
1066 if (dtc->wb_dirty < x_intercept) { in wb_position_ratio()
1067 if (dtc->wb_dirty > x_intercept / 8) in wb_position_ratio()
1069 dtc->wb_dirty); in wb_position_ratio()
1074 dtc->pos_ratio = pos_ratio; in wb_position_ratio()
1082 unsigned long avg = wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1083 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth()
1089 * bw * elapsed + write_bandwidth * (period - elapsed) in wb_update_write_bandwidth()
1090 * write_bandwidth = --------------------------------------------------- in wb_update_write_bandwidth()
1096 bw = written - min(written, wb->written_stamp); in wb_update_write_bandwidth()
1103 bw += (u64)wb->write_bandwidth * (period - elapsed); in wb_update_write_bandwidth()
1110 avg -= (avg - old) >> 3; in wb_update_write_bandwidth()
1113 avg += (old - avg) >> 3; in wb_update_write_bandwidth()
1119 long delta = avg - wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1121 &wb->bdi->tot_write_bandwidth) <= 0); in wb_update_write_bandwidth()
1123 wb->write_bandwidth = bw; in wb_update_write_bandwidth()
1124 wb->avg_write_bandwidth = avg; in wb_update_write_bandwidth()
1130 unsigned long thresh = dtc->thresh; in update_dirty_limit()
1131 unsigned long limit = dom->dirty_limit; in update_dirty_limit() local
1136 if (limit < thresh) { in update_dirty_limit()
1137 limit = thresh; in update_dirty_limit()
1144 * dom->dirty_limit which is guaranteed to lie above the dirty pages. in update_dirty_limit()
1146 thresh = max(thresh, dtc->dirty); in update_dirty_limit()
1147 if (limit > thresh) { in update_dirty_limit()
1148 limit -= (limit - thresh) >> 5; in update_dirty_limit()
1153 dom->dirty_limit = limit; in update_dirty_limit()
1164 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) in domain_update_bandwidth()
1167 spin_lock(&dom->lock); in domain_update_bandwidth()
1168 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) { in domain_update_bandwidth()
1170 dom->dirty_limit_tstamp = now; in domain_update_bandwidth()
1172 spin_unlock(&dom->lock); in domain_update_bandwidth()
1176 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1185 struct bdi_writeback *wb = dtc->wb; in wb_update_dirty_ratelimit()
1186 unsigned long dirty = dtc->dirty; in wb_update_dirty_ratelimit()
1187 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); in wb_update_dirty_ratelimit()
1188 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); in wb_update_dirty_ratelimit() local
1189 unsigned long setpoint = (freerun + limit) / 2; in wb_update_dirty_ratelimit()
1190 unsigned long write_bw = wb->avg_write_bandwidth; in wb_update_dirty_ratelimit()
1191 unsigned long dirty_ratelimit = wb->dirty_ratelimit; in wb_update_dirty_ratelimit()
1201 * when dirty pages are truncated by userspace or re-dirtied by FS. in wb_update_dirty_ratelimit()
1203 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; in wb_update_dirty_ratelimit()
1209 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT; in wb_update_dirty_ratelimit()
1216 * formula will yield the balanced rate limit (write_bw / N). in wb_update_dirty_ratelimit()
1253 * wb->dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1257 * limit the step size. in wb_update_dirty_ratelimit()
1261 * task_ratelimit - dirty_ratelimit in wb_update_dirty_ratelimit()
1262 * = (pos_ratio - 1) * dirty_ratelimit in wb_update_dirty_ratelimit()
1271 * - dirty_ratelimit > balanced_dirty_ratelimit in wb_update_dirty_ratelimit()
1272 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) in wb_update_dirty_ratelimit()
1278 * |task_ratelimit - dirty_ratelimit| is used to limit the step size in wb_update_dirty_ratelimit()
1297 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_update_dirty_ratelimit()
1298 dirty = dtc->wb_dirty; in wb_update_dirty_ratelimit()
1299 if (dtc->wb_dirty < 8) in wb_update_dirty_ratelimit()
1300 setpoint = dtc->wb_dirty + 1; in wb_update_dirty_ratelimit()
1302 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2; in wb_update_dirty_ratelimit()
1306 x = min3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1309 step = x - dirty_ratelimit; in wb_update_dirty_ratelimit()
1311 x = max3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1314 step = dirty_ratelimit - x; in wb_update_dirty_ratelimit()
1331 dirty_ratelimit -= step; in wb_update_dirty_ratelimit()
1333 wb->dirty_ratelimit = max(dirty_ratelimit, 1UL); in wb_update_dirty_ratelimit()
1334 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1344 struct bdi_writeback *wb = gdtc->wb; in __wb_update_bandwidth()
1346 unsigned long elapsed = now - wb->bw_time_stamp; in __wb_update_bandwidth()
1350 lockdep_assert_held(&wb->list_lock); in __wb_update_bandwidth()
1353 * rate-limit, only update once every 200ms. in __wb_update_bandwidth()
1358 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); in __wb_update_bandwidth()
1359 written = percpu_counter_read(&wb->stat[WB_WRITTEN]); in __wb_update_bandwidth()
1362 * Skip quiet periods when disk bandwidth is under-utilized. in __wb_update_bandwidth()
1365 if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time)) in __wb_update_bandwidth()
1384 wb->dirtied_stamp = dirtied; in __wb_update_bandwidth()
1385 wb->written_stamp = written; in __wb_update_bandwidth()
1386 wb->bw_time_stamp = now; in __wb_update_bandwidth()
1401 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1408 return 1UL << (ilog2(thresh - dirty) >> 1); in dirty_poll_interval()
1416 unsigned long bw = wb->avg_write_bandwidth; in wb_max_pause()
1420 * Limit pause time for small memory systems. If sleeping for too long in wb_max_pause()
1438 long hi = ilog2(wb->avg_write_bandwidth); in wb_min_pause()
1439 long lo = ilog2(wb->dirty_ratelimit); in wb_min_pause()
1444 /* target for 10ms pause on 1-dd case */ in wb_min_pause()
1454 t += (hi - lo) * (10 * HZ) / 1024; in wb_min_pause()
1470 * 2) limit the target pause time to max_pause/2, so that the normal in wb_min_pause()
1479 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. in wb_min_pause()
1509 struct bdi_writeback *wb = dtc->wb; in wb_dirty_limits()
1515 * - in JBOD setup, wb_thresh can fluctuate a lot in wb_dirty_limits()
1516 * - in a system with HDD and USB key, the USB key may somehow in wb_dirty_limits()
1525 dtc->wb_thresh = __wb_calc_thresh(dtc); in wb_dirty_limits()
1526 dtc->wb_bg_thresh = dtc->thresh ? in wb_dirty_limits()
1527 div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; in wb_dirty_limits()
1535 * reported dirty, even though there are thresh-m pages in wb_dirty_limits()
1539 if (dtc->wb_thresh < 2 * wb_stat_error()) { in wb_dirty_limits()
1541 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); in wb_dirty_limits()
1544 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); in wb_dirty_limits()
1573 struct backing_dev_info *bdi = wb->bdi; in balance_dirty_pages()
1574 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; in balance_dirty_pages()
1585 gdtc->avail = global_dirtyable_memory(); in balance_dirty_pages()
1586 gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK); in balance_dirty_pages()
1593 dirty = gdtc->wb_dirty; in balance_dirty_pages()
1594 thresh = gdtc->wb_thresh; in balance_dirty_pages()
1595 bg_thresh = gdtc->wb_bg_thresh; in balance_dirty_pages()
1597 dirty = gdtc->dirty; in balance_dirty_pages()
1598 thresh = gdtc->thresh; in balance_dirty_pages()
1599 bg_thresh = gdtc->bg_thresh; in balance_dirty_pages()
1610 &mdtc->dirty, &writeback); in balance_dirty_pages()
1611 mdtc->dirty += writeback; in balance_dirty_pages()
1618 m_dirty = mdtc->wb_dirty; in balance_dirty_pages()
1619 m_thresh = mdtc->wb_thresh; in balance_dirty_pages()
1620 m_bg_thresh = mdtc->wb_bg_thresh; in balance_dirty_pages()
1622 m_dirty = mdtc->dirty; in balance_dirty_pages()
1623 m_thresh = mdtc->thresh; in balance_dirty_pages()
1624 m_bg_thresh = mdtc->bg_thresh; in balance_dirty_pages()
1630 * catch-up. This avoids (excessively) small writeouts in balance_dirty_pages()
1635 * up are the price we consciously pay for strictlimit-ing. in balance_dirty_pages()
1650 current->dirty_paused_when = now; in balance_dirty_pages()
1651 current->nr_dirtied = 0; in balance_dirty_pages()
1654 current->nr_dirtied_pause = min(intv, m_intv); in balance_dirty_pages()
1670 if ((current->flags & PF_LOCAL_THROTTLE) && in balance_dirty_pages()
1671 gdtc->wb_dirty < in balance_dirty_pages()
1672 dirty_freerun_ceiling(gdtc->wb_thresh, in balance_dirty_pages()
1673 gdtc->wb_bg_thresh)) in balance_dirty_pages()
1676 * when below the per-wb freerun ceiling. in balance_dirty_pages()
1681 dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) && in balance_dirty_pages()
1682 ((gdtc->dirty > gdtc->thresh) || strictlimit); in balance_dirty_pages()
1697 if ((current->flags & PF_LOCAL_THROTTLE) && in balance_dirty_pages()
1698 mdtc->wb_dirty < in balance_dirty_pages()
1699 dirty_freerun_ceiling(mdtc->wb_thresh, in balance_dirty_pages()
1700 mdtc->wb_bg_thresh)) in balance_dirty_pages()
1703 * throttled when below the per-wb in balance_dirty_pages()
1708 dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) && in balance_dirty_pages()
1709 ((mdtc->dirty > mdtc->thresh) || strictlimit); in balance_dirty_pages()
1712 if (mdtc->pos_ratio < gdtc->pos_ratio) in balance_dirty_pages()
1716 if (dirty_exceeded && !wb->dirty_exceeded) in balance_dirty_pages()
1717 wb->dirty_exceeded = 1; in balance_dirty_pages()
1719 if (time_is_before_jiffies(wb->bw_time_stamp + in balance_dirty_pages()
1721 spin_lock(&wb->list_lock); in balance_dirty_pages()
1723 spin_unlock(&wb->list_lock); in balance_dirty_pages()
1727 dirty_ratelimit = wb->dirty_ratelimit; in balance_dirty_pages()
1728 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >> in balance_dirty_pages()
1730 max_pause = wb_max_pause(wb, sdtc->wb_dirty); in balance_dirty_pages()
1742 if (current->dirty_paused_when) in balance_dirty_pages()
1743 pause -= now - current->dirty_paused_when; in balance_dirty_pages()
1746 * for up to 800ms from time to time on 1-HDD; so does xfs, in balance_dirty_pages()
1753 sdtc->thresh, in balance_dirty_pages()
1754 sdtc->bg_thresh, in balance_dirty_pages()
1755 sdtc->dirty, in balance_dirty_pages()
1756 sdtc->wb_thresh, in balance_dirty_pages()
1757 sdtc->wb_dirty, in balance_dirty_pages()
1764 if (pause < -HZ) { in balance_dirty_pages()
1765 current->dirty_paused_when = now; in balance_dirty_pages()
1766 current->nr_dirtied = 0; in balance_dirty_pages()
1768 current->dirty_paused_when += period; in balance_dirty_pages()
1769 current->nr_dirtied = 0; in balance_dirty_pages()
1770 } else if (current->nr_dirtied_pause <= pages_dirtied) in balance_dirty_pages()
1771 current->nr_dirtied_pause += pages_dirtied; in balance_dirty_pages()
1776 now += min(pause - max_pause, max_pause); in balance_dirty_pages()
1782 sdtc->thresh, in balance_dirty_pages()
1783 sdtc->bg_thresh, in balance_dirty_pages()
1784 sdtc->dirty, in balance_dirty_pages()
1785 sdtc->wb_thresh, in balance_dirty_pages()
1786 sdtc->wb_dirty, in balance_dirty_pages()
1794 wb->dirty_sleep = now; in balance_dirty_pages()
1797 current->dirty_paused_when = now + pause; in balance_dirty_pages()
1798 current->nr_dirtied = 0; in balance_dirty_pages()
1799 current->nr_dirtied_pause = nr_dirtied_pause; in balance_dirty_pages()
1813 * In theory 1 page is enough to keep the consumer-producer in balance_dirty_pages()
1818 if (sdtc->wb_dirty <= wb_stat_error()) in balance_dirty_pages()
1821 if (fatal_signal_pending(current)) in balance_dirty_pages()
1825 if (!dirty_exceeded && wb->dirty_exceeded) in balance_dirty_pages()
1826 wb->dirty_exceeded = 0; in balance_dirty_pages()
1842 if (nr_reclaimable > gdtc->bg_thresh) in balance_dirty_pages()
1851 * dirty tsk->nr_dirtied_pause pages;
1855 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1857 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1865 * balance_dirty_pages_ratelimited - balance dirty memory state
1866 * @mapping: address_space which was dirtied
1874 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1875 * from overshooting the limit by (ratelimit_pages) each.
1877 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument
1879 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited()
1885 if (!(bdi->capabilities & BDI_CAP_WRITEBACK)) in balance_dirty_pages_ratelimited()
1891 wb = &bdi->wb; in balance_dirty_pages_ratelimited()
1893 ratelimit = current->nr_dirtied_pause; in balance_dirty_pages_ratelimited()
1894 if (wb->dirty_exceeded) in balance_dirty_pages_ratelimited()
1895 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); in balance_dirty_pages_ratelimited()
1902 * time, hence all honoured too large initial task->nr_dirtied_pause. in balance_dirty_pages_ratelimited()
1905 if (unlikely(current->nr_dirtied >= ratelimit)) in balance_dirty_pages_ratelimited()
1913 * short-lived tasks (eg. gcc invocations in a kernel build) escaping in balance_dirty_pages_ratelimited()
1914 * the dirty throttling and livelock other long-run dirtiers. in balance_dirty_pages_ratelimited()
1917 if (*p > 0 && current->nr_dirtied < ratelimit) { in balance_dirty_pages_ratelimited()
1919 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); in balance_dirty_pages_ratelimited()
1920 *p -= nr_pages_dirtied; in balance_dirty_pages_ratelimited()
1921 current->nr_dirtied += nr_pages_dirtied; in balance_dirty_pages_ratelimited()
1925 if (unlikely(current->nr_dirtied >= ratelimit)) in balance_dirty_pages_ratelimited()
1926 balance_dirty_pages(wb, current->nr_dirtied); in balance_dirty_pages_ratelimited()
1933 * wb_over_bg_thresh - does @wb need to be written back?
1953 gdtc->avail = global_dirtyable_memory(); in wb_over_bg_thresh()
1954 gdtc->dirty = global_node_page_state(NR_FILE_DIRTY); in wb_over_bg_thresh()
1957 if (gdtc->dirty > gdtc->bg_thresh) in wb_over_bg_thresh()
1961 wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) in wb_over_bg_thresh()
1967 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, in wb_over_bg_thresh()
1972 if (mdtc->dirty > mdtc->bg_thresh) in wb_over_bg_thresh()
1976 wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) in wb_over_bg_thresh()
1996 * and a different non-zero value will wakeup the writeback threads. in dirty_writeback_centisecs_handler()
2020 * then push it back - the user is still using the disk.
2024 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); in laptop_io_completion()
2039 del_timer(&bdi->laptop_mode_wb_timer); in laptop_sync_completion()
2046 * If ratelimit_pages is too high then we can get into dirty-data overload
2063 dom->dirty_limit = dirty_thresh; in writeback_set_ratelimit()
2083 * is now applied to total non-HIGHPAGE memory, and as such we can't
2086 * non-HIGHMEM memory.
2102 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2103 * @mapping: address space structure to write
2115 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument
2118 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2138 …* write_cache_pages - walk the list of dirty pages of the given address space and write all of the…
2139 * @mapping: address space structure to write
2140 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2145 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2146 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2148 * the call was made get new I/O started against them. If wbc->sync_mode is
2154 * writing them. For data-integrity sync we have to be careful so that we do
2162 * lock/page writeback access order inversion - we should only ever lock
2163 * multiple pages in ascending page->index order, and looping back to the start
2168 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument
2184 if (wbc->range_cyclic) { in write_cache_pages()
2185 index = mapping->writeback_index; /* prev offset */ in write_cache_pages()
2186 end = -1; in write_cache_pages()
2188 index = wbc->range_start >> PAGE_SHIFT; in write_cache_pages()
2189 end = wbc->range_end >> PAGE_SHIFT; in write_cache_pages()
2190 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in write_cache_pages()
2193 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) { in write_cache_pages()
2194 tag_pages_for_writeback(mapping, index, end); in write_cache_pages()
2203 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages()
2211 done_index = page->index; in write_cache_pages()
2223 if (unlikely(page->mapping != mapping)) { in write_cache_pages()
2235 if (wbc->sync_mode != WB_SYNC_NONE) in write_cache_pages()
2245 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); in write_cache_pages()
2263 } else if (wbc->sync_mode != WB_SYNC_ALL) { in write_cache_pages()
2265 done_index = page->index + 1; in write_cache_pages()
2279 if (--wbc->nr_to_write <= 0 && in write_cache_pages()
2280 wbc->sync_mode == WB_SYNC_NONE) { in write_cache_pages()
2294 if (wbc->range_cyclic && !done) in write_cache_pages()
2296 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) in write_cache_pages()
2297 mapping->writeback_index = done_index; in write_cache_pages()
2305 * function and set the mapping flags on error
2310 struct address_space *mapping = data; in __writepage() local
2311 int ret = mapping->a_ops->writepage(page, wbc); in __writepage()
2312 mapping_set_error(mapping, ret); in __writepage()
2317 …* generic_writepages - walk the list of dirty pages of the given address space and writepage() all…
2318 * @mapping: address space structure to write
2319 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2326 int generic_writepages(struct address_space *mapping, in generic_writepages() argument
2333 if (!mapping->a_ops->writepage) in generic_writepages()
2337 ret = write_cache_pages(mapping, wbc, __writepage, mapping); in generic_writepages()
2344 int do_writepages(struct address_space *mapping, struct writeback_control *wbc) in do_writepages() argument
2348 if (wbc->nr_to_write <= 0) in do_writepages()
2351 if (mapping->a_ops->writepages) in do_writepages()
2352 ret = mapping->a_ops->writepages(mapping, wbc); in do_writepages()
2354 ret = generic_writepages(mapping, wbc); in do_writepages()
2355 if ((ret != -ENOMEM) || (wbc->sync_mode != WB_SYNC_ALL)) in do_writepages()
2364 * write_one_page - write out a single page and wait on I/O
2369 * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
2376 struct address_space *mapping = page->mapping; in write_one_page() local
2389 ret = mapping->a_ops->writepage(page, &wbc); in write_one_page()
2398 ret = filemap_check_errors(mapping); in write_one_page()
2420 void account_page_dirtied(struct page *page, struct address_space *mapping) in account_page_dirtied() argument
2422 struct inode *inode = mapping->host; in account_page_dirtied()
2424 trace_writeback_dirty_page(page, mapping); in account_page_dirtied()
2426 if (mapping_can_writeback(mapping)) { in account_page_dirtied()
2438 current->nr_dirtied++; in account_page_dirtied()
2450 void account_page_cleaned(struct page *page, struct address_space *mapping, in account_page_cleaned() argument
2453 if (mapping_can_writeback(mapping)) { in account_page_cleaned()
2466 * page dirty in that case, but not all the buffers. This is a "bottom-up"
2467 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2477 struct address_space *mapping = page_mapping(page); in __set_page_dirty_nobuffers() local
2480 if (!mapping) { in __set_page_dirty_nobuffers()
2485 xa_lock_irqsave(&mapping->i_pages, flags); in __set_page_dirty_nobuffers()
2486 BUG_ON(page_mapping(page) != mapping); in __set_page_dirty_nobuffers()
2488 account_page_dirtied(page, mapping); in __set_page_dirty_nobuffers()
2489 __xa_set_mark(&mapping->i_pages, page_index(page), in __set_page_dirty_nobuffers()
2491 xa_unlock_irqrestore(&mapping->i_pages, flags); in __set_page_dirty_nobuffers()
2494 if (mapping->host) { in __set_page_dirty_nobuffers()
2496 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in __set_page_dirty_nobuffers()
2506 * Call this whenever redirtying a page, to de-account the dirty counters
2507 * (NR_DIRTIED, WB_DIRTIED, tsk->nr_dirtied), so that they match the written
2514 struct address_space *mapping = page->mapping; in account_page_redirty() local
2516 if (mapping && mapping_can_writeback(mapping)) { in account_page_redirty()
2517 struct inode *inode = mapping->host; in account_page_redirty()
2522 current->nr_dirtied--; in account_page_redirty()
2539 wbc->pages_skipped++; in redirty_page_for_writepage()
2549 * For pages with a mapping this should be done under the page lock
2554 * If the mapping doesn't provide a set_page_dirty a_op, then
2559 struct address_space *mapping = page_mapping(page); in set_page_dirty() local
2562 if (likely(mapping)) { in set_page_dirty()
2563 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; in set_page_dirty()
2592 * page->mapping->host, and if the page is unlocked. This is because another
2593 * CPU could truncate the page off the mapping and then free the mapping.
2595 * Usually, the page _is_ locked, or the caller is a user-space process which
2626 struct address_space *mapping = page_mapping(page); in __cancel_dirty_page() local
2628 if (mapping_can_writeback(mapping)) { in __cancel_dirty_page()
2629 struct inode *inode = mapping->host; in __cancel_dirty_page()
2637 account_page_cleaned(page, mapping, wb); in __cancel_dirty_page()
2652 * tagged as dirty in the xarray so that a concurrent write-for-sync
2653 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
2663 struct address_space *mapping = page_mapping(page); in clear_page_dirty_for_io() local
2668 if (mapping && mapping_can_writeback(mapping)) { in clear_page_dirty_for_io()
2669 struct inode *inode = mapping->host; in clear_page_dirty_for_io()
2678 * (b) we tell the low-level filesystem to in clear_page_dirty_for_io()
2689 * has no effect on the actual dirty bit - since in clear_page_dirty_for_io()
2724 struct address_space *mapping = page_mapping(page); in test_clear_page_writeback() local
2731 if (mapping && mapping_use_writeback_tags(mapping)) { in test_clear_page_writeback()
2732 struct inode *inode = mapping->host; in test_clear_page_writeback()
2736 xa_lock_irqsave(&mapping->i_pages, flags); in test_clear_page_writeback()
2739 __xa_clear_mark(&mapping->i_pages, page_index(page), in test_clear_page_writeback()
2741 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { in test_clear_page_writeback()
2749 if (mapping->host && !mapping_tagged(mapping, in test_clear_page_writeback()
2751 sb_clear_inode_writeback(mapping->host); in test_clear_page_writeback()
2753 xa_unlock_irqrestore(&mapping->i_pages, flags); in test_clear_page_writeback()
2768 struct address_space *mapping = page_mapping(page); in __test_set_page_writeback() local
2772 if (mapping && mapping_use_writeback_tags(mapping)) { in __test_set_page_writeback()
2773 XA_STATE(xas, &mapping->i_pages, page_index(page)); in __test_set_page_writeback()
2774 struct inode *inode = mapping->host; in __test_set_page_writeback()
2784 on_wblist = mapping_tagged(mapping, in __test_set_page_writeback()
2788 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) in __test_set_page_writeback()
2796 if (mapping->host && !on_wblist) in __test_set_page_writeback()
2797 sb_mark_inode_writeback(mapping->host); in __test_set_page_writeback()
2837 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2847 if (page->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES) in wait_for_stable_page()