1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_qm.h"
17 #include "scrub/scrub.h"
18 #include "scrub/common.h"
19
20 /* Convert a scrub type code to a DQ flag, or return 0 if error. */
21 static inline uint
xchk_quota_to_dqtype(struct xfs_scrub * sc)22 xchk_quota_to_dqtype(
23 struct xfs_scrub *sc)
24 {
25 switch (sc->sm->sm_type) {
26 case XFS_SCRUB_TYPE_UQUOTA:
27 return XFS_DQ_USER;
28 case XFS_SCRUB_TYPE_GQUOTA:
29 return XFS_DQ_GROUP;
30 case XFS_SCRUB_TYPE_PQUOTA:
31 return XFS_DQ_PROJ;
32 default:
33 return 0;
34 }
35 }
36
37 /* Set us up to scrub a quota. */
38 int
xchk_setup_quota(struct xfs_scrub * sc,struct xfs_inode * ip)39 xchk_setup_quota(
40 struct xfs_scrub *sc,
41 struct xfs_inode *ip)
42 {
43 uint dqtype;
44 int error;
45
46 if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
47 return -ENOENT;
48
49 dqtype = xchk_quota_to_dqtype(sc);
50 if (dqtype == 0)
51 return -EINVAL;
52 sc->flags |= XCHK_HAS_QUOTAOFFLOCK;
53 mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
54 if (!xfs_this_quota_on(sc->mp, dqtype))
55 return -ENOENT;
56 error = xchk_setup_fs(sc, ip);
57 if (error)
58 return error;
59 sc->ip = xfs_quota_inode(sc->mp, dqtype);
60 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
61 sc->ilock_flags = XFS_ILOCK_EXCL;
62 return 0;
63 }
64
65 /* Quotas. */
66
67 struct xchk_quota_info {
68 struct xfs_scrub *sc;
69 xfs_dqid_t last_id;
70 };
71
72 /* Scrub the fields in an individual quota item. */
73 STATIC int
xchk_quota_item(struct xfs_dquot * dq,uint dqtype,void * priv)74 xchk_quota_item(
75 struct xfs_dquot *dq,
76 uint dqtype,
77 void *priv)
78 {
79 struct xchk_quota_info *sqi = priv;
80 struct xfs_scrub *sc = sqi->sc;
81 struct xfs_mount *mp = sc->mp;
82 struct xfs_disk_dquot *d = &dq->q_core;
83 struct xfs_quotainfo *qi = mp->m_quotainfo;
84 xfs_fileoff_t offset;
85 unsigned long long bsoft;
86 unsigned long long isoft;
87 unsigned long long rsoft;
88 unsigned long long bhard;
89 unsigned long long ihard;
90 unsigned long long rhard;
91 unsigned long long bcount;
92 unsigned long long icount;
93 unsigned long long rcount;
94 xfs_ino_t fs_icount;
95 xfs_dqid_t id = be32_to_cpu(d->d_id);
96
97 /*
98 * Except for the root dquot, the actual dquot we got must either have
99 * the same or higher id as we saw before.
100 */
101 offset = id / qi->qi_dqperchunk;
102 if (id && id <= sqi->last_id)
103 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
104
105 sqi->last_id = id;
106
107 /* Did we get the dquot type we wanted? */
108 if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
109 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
110
111 if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
112 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
113
114 /* Check the limits. */
115 bhard = be64_to_cpu(d->d_blk_hardlimit);
116 ihard = be64_to_cpu(d->d_ino_hardlimit);
117 rhard = be64_to_cpu(d->d_rtb_hardlimit);
118
119 bsoft = be64_to_cpu(d->d_blk_softlimit);
120 isoft = be64_to_cpu(d->d_ino_softlimit);
121 rsoft = be64_to_cpu(d->d_rtb_softlimit);
122
123 /*
124 * Warn if the hard limits are larger than the fs.
125 * Administrators can do this, though in production this seems
126 * suspect, which is why we flag it for review.
127 *
128 * Complain about corruption if the soft limit is greater than
129 * the hard limit.
130 */
131 if (bhard > mp->m_sb.sb_dblocks)
132 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
133 if (bsoft > bhard)
134 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
135
136 if (ihard > M_IGEO(mp)->maxicount)
137 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
138 if (isoft > ihard)
139 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
140
141 if (rhard > mp->m_sb.sb_rblocks)
142 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
143 if (rsoft > rhard)
144 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
145
146 /* Check the resource counts. */
147 bcount = be64_to_cpu(d->d_bcount);
148 icount = be64_to_cpu(d->d_icount);
149 rcount = be64_to_cpu(d->d_rtbcount);
150 fs_icount = percpu_counter_sum(&mp->m_icount);
151
152 /*
153 * Check that usage doesn't exceed physical limits. However, on
154 * a reflink filesystem we're allowed to exceed physical space
155 * if there are no quota limits.
156 */
157 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
158 if (mp->m_sb.sb_dblocks < bcount)
159 xchk_fblock_set_warning(sc, XFS_DATA_FORK,
160 offset);
161 } else {
162 if (mp->m_sb.sb_dblocks < bcount)
163 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
164 offset);
165 }
166 if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
167 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
168
169 /*
170 * We can violate the hard limits if the admin suddenly sets a
171 * lower limit than the actual usage. However, we flag it for
172 * admin review.
173 */
174 if (id != 0 && bhard != 0 && bcount > bhard)
175 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
176 if (id != 0 && ihard != 0 && icount > ihard)
177 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
178 if (id != 0 && rhard != 0 && rcount > rhard)
179 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
180
181 return 0;
182 }
183
184 /* Check the quota's data fork. */
185 STATIC int
xchk_quota_data_fork(struct xfs_scrub * sc)186 xchk_quota_data_fork(
187 struct xfs_scrub *sc)
188 {
189 struct xfs_bmbt_irec irec = { 0 };
190 struct xfs_iext_cursor icur;
191 struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
192 struct xfs_ifork *ifp;
193 xfs_fileoff_t max_dqid_off;
194 int error = 0;
195
196 /* Invoke the fork scrubber. */
197 error = xchk_metadata_inode_forks(sc);
198 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
199 return error;
200
201 /* Check for data fork problems that apply only to quota files. */
202 max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
203 ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
204 for_each_xfs_iext(ifp, &icur, &irec) {
205 if (xchk_should_terminate(sc, &error))
206 break;
207 /*
208 * delalloc extents or blocks mapped above the highest
209 * quota id shouldn't happen.
210 */
211 if (isnullstartblock(irec.br_startblock) ||
212 irec.br_startoff > max_dqid_off ||
213 irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
214 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
215 irec.br_startoff);
216 break;
217 }
218 }
219
220 return error;
221 }
222
223 /* Scrub all of a quota type's items. */
224 int
xchk_quota(struct xfs_scrub * sc)225 xchk_quota(
226 struct xfs_scrub *sc)
227 {
228 struct xchk_quota_info sqi;
229 struct xfs_mount *mp = sc->mp;
230 struct xfs_quotainfo *qi = mp->m_quotainfo;
231 uint dqtype;
232 int error = 0;
233
234 dqtype = xchk_quota_to_dqtype(sc);
235
236 /* Look for problem extents. */
237 error = xchk_quota_data_fork(sc);
238 if (error)
239 goto out;
240 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
241 goto out;
242
243 /*
244 * Check all the quota items. Now that we've checked the quota inode
245 * data fork we have to drop ILOCK_EXCL to use the regular dquot
246 * functions.
247 */
248 xfs_iunlock(sc->ip, sc->ilock_flags);
249 sc->ilock_flags = 0;
250 sqi.sc = sc;
251 sqi.last_id = 0;
252 error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
253 sc->ilock_flags = XFS_ILOCK_EXCL;
254 xfs_ilock(sc->ip, sc->ilock_flags);
255 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
256 sqi.last_id * qi->qi_dqperchunk, &error))
257 goto out;
258
259 out:
260 return error;
261 }
262