Lines Matching +full:up +full:- +full:counter

1 // SPDX-License-Identifier: GPL-2.0+
24 * The basics of filesystem summary counter checking are that we iterate the
25 * AGs counting the number of free blocks, free space btree blocks, per-AG
27 * Then we compare what we computed against the in-core counters.
32 * freezing is costly. To get around this, we added a per-cpu counter of the
37 * So the first thing we do is warm up the buffer cache in the setup routine by
38 * walking all the AGs to make sure the incore per-AG structure has been
39 * initialized. The expected value calculation then iterates the incore per-AG
41 * after this operation and use the difference in counter values to guess at
42 * our tolerance for mismatch between expected and actual counter values.
53 * Make sure the per-AG structure has been initialized from the on-disk header
66 struct xfs_mount *mp = sc->mp; in xchk_fscount_warmup()
73 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { in xchk_fscount_warmup()
76 if (pag->pagi_init && pag->pagf_init) in xchk_fscount_warmup()
80 error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp); in xchk_fscount_warmup()
83 error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp); in xchk_fscount_warmup()
91 error = -EFSCORRUPTED; in xchk_fscount_warmup()
92 if (!pag->pagi_init || !pag->pagf_init) in xchk_fscount_warmup()
125 sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); in xchk_setup_fscounters()
126 if (!sc->buf) in xchk_setup_fscounters()
127 return -ENOMEM; in xchk_setup_fscounters()
128 fsc = sc->buf; in xchk_setup_fscounters()
130 xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); in xchk_setup_fscounters()
132 /* We must get the incore counters set up before we can proceed. */ in xchk_setup_fscounters()
148 * Calculate what the global in-core counters ought to be from the incore
149 * per-AG structure. Callers can compare this to the actual in-core counters
150 * to estimate by how much both in-core and on-disk counters need to be
158 struct xfs_mount *mp = sc->mp; in xchk_fscount_aggregate_agcounts()
166 fsc->icount = 0; in xchk_fscount_aggregate_agcounts()
167 fsc->ifree = 0; in xchk_fscount_aggregate_agcounts()
168 fsc->fdblocks = 0; in xchk_fscount_aggregate_agcounts()
170 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { in xchk_fscount_aggregate_agcounts()
174 if (!pag->pagi_init || !pag->pagf_init) { in xchk_fscount_aggregate_agcounts()
176 return -EFSCORRUPTED; in xchk_fscount_aggregate_agcounts()
180 fsc->icount += pag->pagi_count; in xchk_fscount_aggregate_agcounts()
181 fsc->ifree += pag->pagi_freecount; in xchk_fscount_aggregate_agcounts()
183 /* Add up the free/freelist/bnobt/cntbt blocks */ in xchk_fscount_aggregate_agcounts()
184 fsc->fdblocks += pag->pagf_freeblks; in xchk_fscount_aggregate_agcounts()
185 fsc->fdblocks += pag->pagf_flcount; in xchk_fscount_aggregate_agcounts()
186 fsc->fdblocks += pag->pagf_btreeblks; in xchk_fscount_aggregate_agcounts()
189 * Per-AG reservations are taken out of the incore counters, in xchk_fscount_aggregate_agcounts()
192 fsc->fdblocks -= pag->pag_meta_resv.ar_reserved; in xchk_fscount_aggregate_agcounts()
193 fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved; in xchk_fscount_aggregate_agcounts()
208 fsc->fdblocks -= mp->m_resblks_avail; in xchk_fscount_aggregate_agcounts()
215 delayed = percpu_counter_sum(&mp->m_delalloc_blks); in xchk_fscount_aggregate_agcounts()
216 fsc->fdblocks -= delayed; in xchk_fscount_aggregate_agcounts()
218 trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks, in xchk_fscount_aggregate_agcounts()
223 if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max || in xchk_fscount_aggregate_agcounts()
224 fsc->fdblocks > mp->m_sb.sb_dblocks || in xchk_fscount_aggregate_agcounts()
225 fsc->ifree > fsc->icount_max) in xchk_fscount_aggregate_agcounts()
226 return -EFSCORRUPTED; in xchk_fscount_aggregate_agcounts()
231 * to maintain ifree <= icount before giving up. in xchk_fscount_aggregate_agcounts()
233 if (fsc->ifree > fsc->icount) { in xchk_fscount_aggregate_agcounts()
234 if (tries--) in xchk_fscount_aggregate_agcounts()
244 * Is the @counter reasonably close to the @expected value?
247 * per-AG data to compute the @expected value, which means that the counter
248 * could have changed. We know the @old_value of the summation of the counter
249 * before the aggregation, and we re-sum the counter now. If the expected
260 struct percpu_counter *counter, in xchk_fscount_within_range() argument
264 int64_t curr_value = percpu_counter_sum(counter); in xchk_fscount_within_range()
266 trace_xchk_fscounters_within_range(sc->mp, expected, curr_value, in xchk_fscount_within_range()
280 /* Within the before-and-after range is ok. */ in xchk_fscount_within_range()
287 * true here so that we don't mark the counter corrupt. in xchk_fscount_within_range()
291 * check should be moved up and the return code changed to signal to in xchk_fscount_within_range()
294 if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) { in xchk_fscount_within_range()
307 struct xfs_mount *mp = sc->mp; in xchk_fscounters()
308 struct xchk_fscounters *fsc = sc->buf; in xchk_fscounters()
313 icount = percpu_counter_sum(&mp->m_icount); in xchk_fscounters()
314 ifree = percpu_counter_sum(&mp->m_ifree); in xchk_fscounters()
315 fdblocks = percpu_counter_sum(&mp->m_fdblocks); in xchk_fscounters()
322 if (icount < fsc->icount_min || icount > fsc->icount_max) in xchk_fscounters()
326 if (fdblocks > mp->m_sb.sb_dblocks) in xchk_fscounters()
333 if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE) in xchk_fscounters()
340 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) in xchk_fscounters()
343 /* Compare the in-core counters with whatever we counted. */ in xchk_fscounters()
344 if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount)) in xchk_fscounters()
347 if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) in xchk_fscounters()
350 if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks, in xchk_fscounters()
351 fsc->fdblocks)) in xchk_fscounters()