1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25
26 /*
27 * The global quota manager. There is only one of these for the entire
28 * system, _not_ one per file system. XQM keeps track of the overall
29 * quota functionality, including maintaining the freelist and hash
30 * tables of dquots.
31 */
32 STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
33 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
34
35 STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
36 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
37 /*
38 * We use the batch lookup interface to iterate over the dquots as it
39 * currently is the only interface into the radix tree code that allows
40 * fuzzy lookups instead of exact matches. Holding the lock over multiple
41 * operations is fine as all callers are used either during mount/umount
42 * or quotaoff.
43 */
44 #define XFS_DQ_LOOKUP_BATCH 32
45
46 STATIC int
xfs_qm_dquot_walk(struct xfs_mount * mp,int type,int (* execute)(struct xfs_dquot * dqp,void * data),void * data)47 xfs_qm_dquot_walk(
48 struct xfs_mount *mp,
49 int type,
50 int (*execute)(struct xfs_dquot *dqp, void *data),
51 void *data)
52 {
53 struct xfs_quotainfo *qi = mp->m_quotainfo;
54 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
55 uint32_t next_index;
56 int last_error = 0;
57 int skipped;
58 int nr_found;
59
60 restart:
61 skipped = 0;
62 next_index = 0;
63 nr_found = 0;
64
65 while (1) {
66 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
67 int error = 0;
68 int i;
69
70 mutex_lock(&qi->qi_tree_lock);
71 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
72 next_index, XFS_DQ_LOOKUP_BATCH);
73 if (!nr_found) {
74 mutex_unlock(&qi->qi_tree_lock);
75 break;
76 }
77
78 for (i = 0; i < nr_found; i++) {
79 struct xfs_dquot *dqp = batch[i];
80
81 next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
82
83 error = execute(batch[i], data);
84 if (error == -EAGAIN) {
85 skipped++;
86 continue;
87 }
88 if (error && last_error != -EFSCORRUPTED)
89 last_error = error;
90 }
91
92 mutex_unlock(&qi->qi_tree_lock);
93
94 /* bail out if the filesystem is corrupted. */
95 if (last_error == -EFSCORRUPTED) {
96 skipped = 0;
97 break;
98 }
99 /* we're done if id overflows back to zero */
100 if (!next_index)
101 break;
102 }
103
104 if (skipped) {
105 delay(1);
106 goto restart;
107 }
108
109 return last_error;
110 }
111
112
113 /*
114 * Purge a dquot from all tracking data structures and free it.
115 */
116 STATIC int
xfs_qm_dqpurge(struct xfs_dquot * dqp,void * data)117 xfs_qm_dqpurge(
118 struct xfs_dquot *dqp,
119 void *data)
120 {
121 struct xfs_mount *mp = dqp->q_mount;
122 struct xfs_quotainfo *qi = mp->m_quotainfo;
123
124 xfs_dqlock(dqp);
125 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
126 xfs_dqunlock(dqp);
127 return -EAGAIN;
128 }
129
130 dqp->dq_flags |= XFS_DQ_FREEING;
131
132 xfs_dqflock(dqp);
133
134 /*
135 * If we are turning this type of quotas off, we don't care
136 * about the dirty metadata sitting in this dquot. OTOH, if
137 * we're unmounting, we do care, so we flush it and wait.
138 */
139 if (XFS_DQ_IS_DIRTY(dqp)) {
140 struct xfs_buf *bp = NULL;
141 int error;
142
143 /*
144 * We don't care about getting disk errors here. We need
145 * to purge this dquot anyway, so we go ahead regardless.
146 */
147 error = xfs_qm_dqflush(dqp, &bp);
148 if (!error) {
149 error = xfs_bwrite(bp);
150 xfs_buf_relse(bp);
151 }
152 xfs_dqflock(dqp);
153 }
154
155 ASSERT(atomic_read(&dqp->q_pincount) == 0);
156 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
157 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
158
159 xfs_dqfunlock(dqp);
160 xfs_dqunlock(dqp);
161
162 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
163 be32_to_cpu(dqp->q_core.d_id));
164 qi->qi_dquots--;
165
166 /*
167 * We move dquots to the freelist as soon as their reference count
168 * hits zero, so it really should be on the freelist here.
169 */
170 ASSERT(!list_empty(&dqp->q_lru));
171 list_lru_del(&qi->qi_lru, &dqp->q_lru);
172 XFS_STATS_DEC(mp, xs_qm_dquot_unused);
173
174 xfs_qm_dqdestroy(dqp);
175 return 0;
176 }
177
178 /*
179 * Purge the dquot cache.
180 */
181 void
xfs_qm_dqpurge_all(struct xfs_mount * mp,uint flags)182 xfs_qm_dqpurge_all(
183 struct xfs_mount *mp,
184 uint flags)
185 {
186 if (flags & XFS_QMOPT_UQUOTA)
187 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
188 if (flags & XFS_QMOPT_GQUOTA)
189 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
190 if (flags & XFS_QMOPT_PQUOTA)
191 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
192 }
193
194 /*
195 * Just destroy the quotainfo structure.
196 */
197 void
xfs_qm_unmount(struct xfs_mount * mp)198 xfs_qm_unmount(
199 struct xfs_mount *mp)
200 {
201 if (mp->m_quotainfo) {
202 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
203 xfs_qm_destroy_quotainfo(mp);
204 }
205 }
206
207 /*
208 * Called from the vfsops layer.
209 */
210 void
xfs_qm_unmount_quotas(xfs_mount_t * mp)211 xfs_qm_unmount_quotas(
212 xfs_mount_t *mp)
213 {
214 /*
215 * Release the dquots that root inode, et al might be holding,
216 * before we flush quotas and blow away the quotainfo structure.
217 */
218 ASSERT(mp->m_rootip);
219 xfs_qm_dqdetach(mp->m_rootip);
220 if (mp->m_rbmip)
221 xfs_qm_dqdetach(mp->m_rbmip);
222 if (mp->m_rsumip)
223 xfs_qm_dqdetach(mp->m_rsumip);
224
225 /*
226 * Release the quota inodes.
227 */
228 if (mp->m_quotainfo) {
229 if (mp->m_quotainfo->qi_uquotaip) {
230 xfs_irele(mp->m_quotainfo->qi_uquotaip);
231 mp->m_quotainfo->qi_uquotaip = NULL;
232 }
233 if (mp->m_quotainfo->qi_gquotaip) {
234 xfs_irele(mp->m_quotainfo->qi_gquotaip);
235 mp->m_quotainfo->qi_gquotaip = NULL;
236 }
237 if (mp->m_quotainfo->qi_pquotaip) {
238 xfs_irele(mp->m_quotainfo->qi_pquotaip);
239 mp->m_quotainfo->qi_pquotaip = NULL;
240 }
241 }
242 }
243
244 STATIC int
xfs_qm_dqattach_one(xfs_inode_t * ip,xfs_dqid_t id,uint type,bool doalloc,xfs_dquot_t ** IO_idqpp)245 xfs_qm_dqattach_one(
246 xfs_inode_t *ip,
247 xfs_dqid_t id,
248 uint type,
249 bool doalloc,
250 xfs_dquot_t **IO_idqpp)
251 {
252 xfs_dquot_t *dqp;
253 int error;
254
255 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
256 error = 0;
257
258 /*
259 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
260 * or &i_gdquot. This made the code look weird, but made the logic a lot
261 * simpler.
262 */
263 dqp = *IO_idqpp;
264 if (dqp) {
265 trace_xfs_dqattach_found(dqp);
266 return 0;
267 }
268
269 /*
270 * Find the dquot from somewhere. This bumps the reference count of
271 * dquot and returns it locked. This can return ENOENT if dquot didn't
272 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
273 * turned off suddenly.
274 */
275 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
276 if (error)
277 return error;
278
279 trace_xfs_dqattach_get(dqp);
280
281 /*
282 * dqget may have dropped and re-acquired the ilock, but it guarantees
283 * that the dquot returned is the one that should go in the inode.
284 */
285 *IO_idqpp = dqp;
286 xfs_dqunlock(dqp);
287 return 0;
288 }
289
290 static bool
xfs_qm_need_dqattach(struct xfs_inode * ip)291 xfs_qm_need_dqattach(
292 struct xfs_inode *ip)
293 {
294 struct xfs_mount *mp = ip->i_mount;
295
296 if (!XFS_IS_QUOTA_RUNNING(mp))
297 return false;
298 if (!XFS_IS_QUOTA_ON(mp))
299 return false;
300 if (!XFS_NOT_DQATTACHED(mp, ip))
301 return false;
302 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
303 return false;
304 return true;
305 }
306
307 /*
308 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
309 * into account.
310 * If @doalloc is true, the dquot(s) will be allocated if needed.
311 * Inode may get unlocked and relocked in here, and the caller must deal with
312 * the consequences.
313 */
314 int
xfs_qm_dqattach_locked(xfs_inode_t * ip,bool doalloc)315 xfs_qm_dqattach_locked(
316 xfs_inode_t *ip,
317 bool doalloc)
318 {
319 xfs_mount_t *mp = ip->i_mount;
320 int error = 0;
321
322 if (!xfs_qm_need_dqattach(ip))
323 return 0;
324
325 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
326
327 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
328 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
329 doalloc, &ip->i_udquot);
330 if (error)
331 goto done;
332 ASSERT(ip->i_udquot);
333 }
334
335 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
336 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
337 doalloc, &ip->i_gdquot);
338 if (error)
339 goto done;
340 ASSERT(ip->i_gdquot);
341 }
342
343 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
344 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
345 doalloc, &ip->i_pdquot);
346 if (error)
347 goto done;
348 ASSERT(ip->i_pdquot);
349 }
350
351 done:
352 /*
353 * Don't worry about the dquots that we may have attached before any
354 * error - they'll get detached later if it has not already been done.
355 */
356 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
357 return error;
358 }
359
360 int
xfs_qm_dqattach(struct xfs_inode * ip)361 xfs_qm_dqattach(
362 struct xfs_inode *ip)
363 {
364 int error;
365
366 if (!xfs_qm_need_dqattach(ip))
367 return 0;
368
369 xfs_ilock(ip, XFS_ILOCK_EXCL);
370 error = xfs_qm_dqattach_locked(ip, false);
371 xfs_iunlock(ip, XFS_ILOCK_EXCL);
372
373 return error;
374 }
375
376 /*
377 * Release dquots (and their references) if any.
378 * The inode should be locked EXCL except when this's called by
379 * xfs_ireclaim.
380 */
381 void
xfs_qm_dqdetach(xfs_inode_t * ip)382 xfs_qm_dqdetach(
383 xfs_inode_t *ip)
384 {
385 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
386 return;
387
388 trace_xfs_dquot_dqdetach(ip);
389
390 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
391 if (ip->i_udquot) {
392 xfs_qm_dqrele(ip->i_udquot);
393 ip->i_udquot = NULL;
394 }
395 if (ip->i_gdquot) {
396 xfs_qm_dqrele(ip->i_gdquot);
397 ip->i_gdquot = NULL;
398 }
399 if (ip->i_pdquot) {
400 xfs_qm_dqrele(ip->i_pdquot);
401 ip->i_pdquot = NULL;
402 }
403 }
404
405 struct xfs_qm_isolate {
406 struct list_head buffers;
407 struct list_head dispose;
408 };
409
410 static enum lru_status
xfs_qm_dquot_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)411 xfs_qm_dquot_isolate(
412 struct list_head *item,
413 struct list_lru_one *lru,
414 spinlock_t *lru_lock,
415 void *arg)
416 __releases(lru_lock) __acquires(lru_lock)
417 {
418 struct xfs_dquot *dqp = container_of(item,
419 struct xfs_dquot, q_lru);
420 struct xfs_qm_isolate *isol = arg;
421
422 if (!xfs_dqlock_nowait(dqp))
423 goto out_miss_busy;
424
425 /*
426 * This dquot has acquired a reference in the meantime remove it from
427 * the freelist and try again.
428 */
429 if (dqp->q_nrefs) {
430 xfs_dqunlock(dqp);
431 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
432
433 trace_xfs_dqreclaim_want(dqp);
434 list_lru_isolate(lru, &dqp->q_lru);
435 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
436 return LRU_REMOVED;
437 }
438
439 /*
440 * If the dquot is dirty, flush it. If it's already being flushed, just
441 * skip it so there is time for the IO to complete before we try to
442 * reclaim it again on the next LRU pass.
443 */
444 if (!xfs_dqflock_nowait(dqp)) {
445 xfs_dqunlock(dqp);
446 goto out_miss_busy;
447 }
448
449 if (XFS_DQ_IS_DIRTY(dqp)) {
450 struct xfs_buf *bp = NULL;
451 int error;
452
453 trace_xfs_dqreclaim_dirty(dqp);
454
455 /* we have to drop the LRU lock to flush the dquot */
456 spin_unlock(lru_lock);
457
458 error = xfs_qm_dqflush(dqp, &bp);
459 if (error)
460 goto out_unlock_dirty;
461
462 xfs_buf_delwri_queue(bp, &isol->buffers);
463 xfs_buf_relse(bp);
464 goto out_unlock_dirty;
465 }
466 xfs_dqfunlock(dqp);
467
468 /*
469 * Prevent lookups now that we are past the point of no return.
470 */
471 dqp->dq_flags |= XFS_DQ_FREEING;
472 xfs_dqunlock(dqp);
473
474 ASSERT(dqp->q_nrefs == 0);
475 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
476 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
477 trace_xfs_dqreclaim_done(dqp);
478 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
479 return LRU_REMOVED;
480
481 out_miss_busy:
482 trace_xfs_dqreclaim_busy(dqp);
483 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
484 return LRU_SKIP;
485
486 out_unlock_dirty:
487 trace_xfs_dqreclaim_busy(dqp);
488 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
489 xfs_dqunlock(dqp);
490 spin_lock(lru_lock);
491 return LRU_RETRY;
492 }
493
494 static unsigned long
xfs_qm_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)495 xfs_qm_shrink_scan(
496 struct shrinker *shrink,
497 struct shrink_control *sc)
498 {
499 struct xfs_quotainfo *qi = container_of(shrink,
500 struct xfs_quotainfo, qi_shrinker);
501 struct xfs_qm_isolate isol;
502 unsigned long freed;
503 int error;
504
505 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
506 return 0;
507
508 INIT_LIST_HEAD(&isol.buffers);
509 INIT_LIST_HEAD(&isol.dispose);
510
511 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
512 xfs_qm_dquot_isolate, &isol);
513
514 error = xfs_buf_delwri_submit(&isol.buffers);
515 if (error)
516 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
517
518 while (!list_empty(&isol.dispose)) {
519 struct xfs_dquot *dqp;
520
521 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
522 list_del_init(&dqp->q_lru);
523 xfs_qm_dqfree_one(dqp);
524 }
525
526 return freed;
527 }
528
529 static unsigned long
xfs_qm_shrink_count(struct shrinker * shrink,struct shrink_control * sc)530 xfs_qm_shrink_count(
531 struct shrinker *shrink,
532 struct shrink_control *sc)
533 {
534 struct xfs_quotainfo *qi = container_of(shrink,
535 struct xfs_quotainfo, qi_shrinker);
536
537 return list_lru_shrink_count(&qi->qi_lru, sc);
538 }
539
540 STATIC void
xfs_qm_set_defquota(xfs_mount_t * mp,uint type,xfs_quotainfo_t * qinf)541 xfs_qm_set_defquota(
542 xfs_mount_t *mp,
543 uint type,
544 xfs_quotainfo_t *qinf)
545 {
546 xfs_dquot_t *dqp;
547 struct xfs_def_quota *defq;
548 struct xfs_disk_dquot *ddqp;
549 int error;
550
551 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
552 if (error)
553 return;
554
555 ddqp = &dqp->q_core;
556 defq = xfs_get_defquota(dqp, qinf);
557
558 /*
559 * Timers and warnings have been already set, let's just set the
560 * default limits for this quota type
561 */
562 defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
563 defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
564 defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
565 defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
566 defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
567 defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
568 xfs_qm_dqdestroy(dqp);
569 }
570
571 /* Initialize quota time limits from the root dquot. */
572 static void
xfs_qm_init_timelimits(struct xfs_mount * mp,struct xfs_quotainfo * qinf)573 xfs_qm_init_timelimits(
574 struct xfs_mount *mp,
575 struct xfs_quotainfo *qinf)
576 {
577 struct xfs_disk_dquot *ddqp;
578 struct xfs_dquot *dqp;
579 uint type;
580 int error;
581
582 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
583 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
584 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
585 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
586 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
587 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
588
589 /*
590 * We try to get the limits from the superuser's limits fields.
591 * This is quite hacky, but it is standard quota practice.
592 *
593 * Since we may not have done a quotacheck by this point, just read
594 * the dquot without attaching it to any hashtables or lists.
595 *
596 * Timers and warnings are globally set by the first timer found in
597 * user/group/proj quota types, otherwise a default value is used.
598 * This should be split into different fields per quota type.
599 */
600 if (XFS_IS_UQUOTA_RUNNING(mp))
601 type = XFS_DQ_USER;
602 else if (XFS_IS_GQUOTA_RUNNING(mp))
603 type = XFS_DQ_GROUP;
604 else
605 type = XFS_DQ_PROJ;
606 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
607 if (error)
608 return;
609
610 ddqp = &dqp->q_core;
611 /*
612 * The warnings and timers set the grace period given to
613 * a user or group before he or she can not perform any
614 * more writing. If it is zero, a default is used.
615 */
616 if (ddqp->d_btimer)
617 qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer);
618 if (ddqp->d_itimer)
619 qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer);
620 if (ddqp->d_rtbtimer)
621 qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
622 if (ddqp->d_bwarns)
623 qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
624 if (ddqp->d_iwarns)
625 qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
626 if (ddqp->d_rtbwarns)
627 qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
628
629 xfs_qm_dqdestroy(dqp);
630 }
631
632 /*
633 * This initializes all the quota information that's kept in the
634 * mount structure
635 */
636 STATIC int
xfs_qm_init_quotainfo(struct xfs_mount * mp)637 xfs_qm_init_quotainfo(
638 struct xfs_mount *mp)
639 {
640 struct xfs_quotainfo *qinf;
641 int error;
642
643 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
644
645 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), 0);
646
647 error = list_lru_init(&qinf->qi_lru);
648 if (error)
649 goto out_free_qinf;
650
651 /*
652 * See if quotainodes are setup, and if not, allocate them,
653 * and change the superblock accordingly.
654 */
655 error = xfs_qm_init_quotainos(mp);
656 if (error)
657 goto out_free_lru;
658
659 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
660 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
661 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
662 mutex_init(&qinf->qi_tree_lock);
663
664 /* mutex used to serialize quotaoffs */
665 mutex_init(&qinf->qi_quotaofflock);
666
667 /* Precalc some constants */
668 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
669 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
670
671 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
672
673 xfs_qm_init_timelimits(mp, qinf);
674
675 if (XFS_IS_UQUOTA_RUNNING(mp))
676 xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
677 if (XFS_IS_GQUOTA_RUNNING(mp))
678 xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
679 if (XFS_IS_PQUOTA_RUNNING(mp))
680 xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
681
682 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
683 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
684 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
685 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
686
687 error = register_shrinker(&qinf->qi_shrinker);
688 if (error)
689 goto out_free_inos;
690
691 return 0;
692
693 out_free_inos:
694 mutex_destroy(&qinf->qi_quotaofflock);
695 mutex_destroy(&qinf->qi_tree_lock);
696 xfs_qm_destroy_quotainos(qinf);
697 out_free_lru:
698 list_lru_destroy(&qinf->qi_lru);
699 out_free_qinf:
700 kmem_free(qinf);
701 mp->m_quotainfo = NULL;
702 return error;
703 }
704
705 /*
706 * Gets called when unmounting a filesystem or when all quotas get
707 * turned off.
708 * This purges the quota inodes, destroys locks and frees itself.
709 */
710 void
xfs_qm_destroy_quotainfo(xfs_mount_t * mp)711 xfs_qm_destroy_quotainfo(
712 xfs_mount_t *mp)
713 {
714 xfs_quotainfo_t *qi;
715
716 qi = mp->m_quotainfo;
717 ASSERT(qi != NULL);
718
719 unregister_shrinker(&qi->qi_shrinker);
720 list_lru_destroy(&qi->qi_lru);
721 xfs_qm_destroy_quotainos(qi);
722 mutex_destroy(&qi->qi_tree_lock);
723 mutex_destroy(&qi->qi_quotaofflock);
724 kmem_free(qi);
725 mp->m_quotainfo = NULL;
726 }
727
728 /*
729 * Create an inode and return with a reference already taken, but unlocked
730 * This is how we create quota inodes
731 */
732 STATIC int
xfs_qm_qino_alloc(xfs_mount_t * mp,xfs_inode_t ** ip,uint flags)733 xfs_qm_qino_alloc(
734 xfs_mount_t *mp,
735 xfs_inode_t **ip,
736 uint flags)
737 {
738 xfs_trans_t *tp;
739 int error;
740 bool need_alloc = true;
741
742 *ip = NULL;
743 /*
744 * With superblock that doesn't have separate pquotino, we
745 * share an inode between gquota and pquota. If the on-disk
746 * superblock has GQUOTA and the filesystem is now mounted
747 * with PQUOTA, just use sb_gquotino for sb_pquotino and
748 * vice-versa.
749 */
750 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
751 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
752 xfs_ino_t ino = NULLFSINO;
753
754 if ((flags & XFS_QMOPT_PQUOTA) &&
755 (mp->m_sb.sb_gquotino != NULLFSINO)) {
756 ino = mp->m_sb.sb_gquotino;
757 ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
758 } else if ((flags & XFS_QMOPT_GQUOTA) &&
759 (mp->m_sb.sb_pquotino != NULLFSINO)) {
760 ino = mp->m_sb.sb_pquotino;
761 ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
762 }
763 if (ino != NULLFSINO) {
764 error = xfs_iget(mp, NULL, ino, 0, 0, ip);
765 if (error)
766 return error;
767 mp->m_sb.sb_gquotino = NULLFSINO;
768 mp->m_sb.sb_pquotino = NULLFSINO;
769 need_alloc = false;
770 }
771 }
772
773 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
774 XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
775 if (error)
776 return error;
777
778 if (need_alloc) {
779 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
780 if (error) {
781 xfs_trans_cancel(tp);
782 return error;
783 }
784 }
785
786 /*
787 * Make the changes in the superblock, and log those too.
788 * sbfields arg may contain fields other than *QUOTINO;
789 * VERSIONNUM for example.
790 */
791 spin_lock(&mp->m_sb_lock);
792 if (flags & XFS_QMOPT_SBVERSION) {
793 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
794
795 xfs_sb_version_addquota(&mp->m_sb);
796 mp->m_sb.sb_uquotino = NULLFSINO;
797 mp->m_sb.sb_gquotino = NULLFSINO;
798 mp->m_sb.sb_pquotino = NULLFSINO;
799
800 /* qflags will get updated fully _after_ quotacheck */
801 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
802 }
803 if (flags & XFS_QMOPT_UQUOTA)
804 mp->m_sb.sb_uquotino = (*ip)->i_ino;
805 else if (flags & XFS_QMOPT_GQUOTA)
806 mp->m_sb.sb_gquotino = (*ip)->i_ino;
807 else
808 mp->m_sb.sb_pquotino = (*ip)->i_ino;
809 spin_unlock(&mp->m_sb_lock);
810 xfs_log_sb(tp);
811
812 error = xfs_trans_commit(tp);
813 if (error) {
814 ASSERT(XFS_FORCED_SHUTDOWN(mp));
815 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
816 }
817 if (need_alloc)
818 xfs_finish_inode_setup(*ip);
819 return error;
820 }
821
822
823 STATIC void
xfs_qm_reset_dqcounts(xfs_mount_t * mp,xfs_buf_t * bp,xfs_dqid_t id,uint type)824 xfs_qm_reset_dqcounts(
825 xfs_mount_t *mp,
826 xfs_buf_t *bp,
827 xfs_dqid_t id,
828 uint type)
829 {
830 struct xfs_dqblk *dqb;
831 int j;
832 xfs_failaddr_t fa;
833
834 trace_xfs_reset_dqcounts(bp, _RET_IP_);
835
836 /*
837 * Reset all counters and timers. They'll be
838 * started afresh by xfs_qm_quotacheck.
839 */
840 #ifdef DEBUG
841 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
842 sizeof(xfs_dqblk_t);
843 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
844 #endif
845 dqb = bp->b_addr;
846 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
847 struct xfs_disk_dquot *ddq;
848
849 ddq = (struct xfs_disk_dquot *)&dqb[j];
850
851 /*
852 * Do a sanity check, and if needed, repair the dqblk. Don't
853 * output any warnings because it's perfectly possible to
854 * find uninitialised dquot blks. See comment in
855 * xfs_dquot_verify.
856 */
857 fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type);
858 if (fa)
859 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
860
861 /*
862 * Reset type in case we are reusing group quota file for
863 * project quotas or vice versa
864 */
865 ddq->d_flags = type;
866 ddq->d_bcount = 0;
867 ddq->d_icount = 0;
868 ddq->d_rtbcount = 0;
869 ddq->d_btimer = 0;
870 ddq->d_itimer = 0;
871 ddq->d_rtbtimer = 0;
872 ddq->d_bwarns = 0;
873 ddq->d_iwarns = 0;
874 ddq->d_rtbwarns = 0;
875
876 if (xfs_sb_version_hascrc(&mp->m_sb)) {
877 xfs_update_cksum((char *)&dqb[j],
878 sizeof(struct xfs_dqblk),
879 XFS_DQUOT_CRC_OFF);
880 }
881 }
882 }
883
884 STATIC int
xfs_qm_reset_dqcounts_all(struct xfs_mount * mp,xfs_dqid_t firstid,xfs_fsblock_t bno,xfs_filblks_t blkcnt,uint flags,struct list_head * buffer_list)885 xfs_qm_reset_dqcounts_all(
886 struct xfs_mount *mp,
887 xfs_dqid_t firstid,
888 xfs_fsblock_t bno,
889 xfs_filblks_t blkcnt,
890 uint flags,
891 struct list_head *buffer_list)
892 {
893 struct xfs_buf *bp;
894 int error;
895 int type;
896
897 ASSERT(blkcnt > 0);
898 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
899 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
900 error = 0;
901
902 /*
903 * Blkcnt arg can be a very big number, and might even be
904 * larger than the log itself. So, we have to break it up into
905 * manageable-sized transactions.
906 * Note that we don't start a permanent transaction here; we might
907 * not be able to get a log reservation for the whole thing up front,
908 * and we don't really care to either, because we just discard
909 * everything if we were to crash in the middle of this loop.
910 */
911 while (blkcnt--) {
912 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
913 XFS_FSB_TO_DADDR(mp, bno),
914 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
915 &xfs_dquot_buf_ops);
916
917 /*
918 * CRC and validation errors will return a EFSCORRUPTED here. If
919 * this occurs, re-read without CRC validation so that we can
920 * repair the damage via xfs_qm_reset_dqcounts(). This process
921 * will leave a trace in the log indicating corruption has
922 * been detected.
923 */
924 if (error == -EFSCORRUPTED) {
925 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
926 XFS_FSB_TO_DADDR(mp, bno),
927 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
928 NULL);
929 }
930
931 if (error)
932 break;
933
934 /*
935 * A corrupt buffer might not have a verifier attached, so
936 * make sure we have the correct one attached before writeback
937 * occurs.
938 */
939 bp->b_ops = &xfs_dquot_buf_ops;
940 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
941 xfs_buf_delwri_queue(bp, buffer_list);
942 xfs_buf_relse(bp);
943
944 /* goto the next block. */
945 bno++;
946 firstid += mp->m_quotainfo->qi_dqperchunk;
947 }
948
949 return error;
950 }
951
952 /*
953 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
954 * counters for every chunk of dquots that we find.
955 */
956 STATIC int
xfs_qm_reset_dqcounts_buf(struct xfs_mount * mp,struct xfs_inode * qip,uint flags,struct list_head * buffer_list)957 xfs_qm_reset_dqcounts_buf(
958 struct xfs_mount *mp,
959 struct xfs_inode *qip,
960 uint flags,
961 struct list_head *buffer_list)
962 {
963 struct xfs_bmbt_irec *map;
964 int i, nmaps; /* number of map entries */
965 int error; /* return value */
966 xfs_fileoff_t lblkno;
967 xfs_filblks_t maxlblkcnt;
968 xfs_dqid_t firstid;
969 xfs_fsblock_t rablkno;
970 xfs_filblks_t rablkcnt;
971
972 error = 0;
973 /*
974 * This looks racy, but we can't keep an inode lock across a
975 * trans_reserve. But, this gets called during quotacheck, and that
976 * happens only at mount time which is single threaded.
977 */
978 if (qip->i_d.di_nblocks == 0)
979 return 0;
980
981 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
982
983 lblkno = 0;
984 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
985 do {
986 uint lock_mode;
987
988 nmaps = XFS_DQITER_MAP_SIZE;
989 /*
990 * We aren't changing the inode itself. Just changing
991 * some of its data. No new blocks are added here, and
992 * the inode is never added to the transaction.
993 */
994 lock_mode = xfs_ilock_data_map_shared(qip);
995 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
996 map, &nmaps, 0);
997 xfs_iunlock(qip, lock_mode);
998 if (error)
999 break;
1000
1001 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1002 for (i = 0; i < nmaps; i++) {
1003 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1004 ASSERT(map[i].br_blockcount);
1005
1006
1007 lblkno += map[i].br_blockcount;
1008
1009 if (map[i].br_startblock == HOLESTARTBLOCK)
1010 continue;
1011
1012 firstid = (xfs_dqid_t) map[i].br_startoff *
1013 mp->m_quotainfo->qi_dqperchunk;
1014 /*
1015 * Do a read-ahead on the next extent.
1016 */
1017 if ((i+1 < nmaps) &&
1018 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1019 rablkcnt = map[i+1].br_blockcount;
1020 rablkno = map[i+1].br_startblock;
1021 while (rablkcnt--) {
1022 xfs_buf_readahead(mp->m_ddev_targp,
1023 XFS_FSB_TO_DADDR(mp, rablkno),
1024 mp->m_quotainfo->qi_dqchunklen,
1025 &xfs_dquot_buf_ops);
1026 rablkno++;
1027 }
1028 }
1029 /*
1030 * Iterate thru all the blks in the extent and
1031 * reset the counters of all the dquots inside them.
1032 */
1033 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1034 map[i].br_startblock,
1035 map[i].br_blockcount,
1036 flags, buffer_list);
1037 if (error)
1038 goto out;
1039 }
1040 } while (nmaps > 0);
1041
1042 out:
1043 kmem_free(map);
1044 return error;
1045 }
1046
1047 /*
1048 * Called by dqusage_adjust in doing a quotacheck.
1049 *
1050 * Given the inode, and a dquot id this updates both the incore dqout as well
1051 * as the buffer copy. This is so that once the quotacheck is done, we can
1052 * just log all the buffers, as opposed to logging numerous updates to
1053 * individual dquots.
1054 */
1055 STATIC int
xfs_qm_quotacheck_dqadjust(struct xfs_inode * ip,uint type,xfs_qcnt_t nblks,xfs_qcnt_t rtblks)1056 xfs_qm_quotacheck_dqadjust(
1057 struct xfs_inode *ip,
1058 uint type,
1059 xfs_qcnt_t nblks,
1060 xfs_qcnt_t rtblks)
1061 {
1062 struct xfs_mount *mp = ip->i_mount;
1063 struct xfs_dquot *dqp;
1064 xfs_dqid_t id;
1065 int error;
1066
1067 id = xfs_qm_id_for_quotatype(ip, type);
1068 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1069 if (error) {
1070 /*
1071 * Shouldn't be able to turn off quotas here.
1072 */
1073 ASSERT(error != -ESRCH);
1074 ASSERT(error != -ENOENT);
1075 return error;
1076 }
1077
1078 trace_xfs_dqadjust(dqp);
1079
1080 /*
1081 * Adjust the inode count and the block count to reflect this inode's
1082 * resource usage.
1083 */
1084 be64_add_cpu(&dqp->q_core.d_icount, 1);
1085 dqp->q_res_icount++;
1086 if (nblks) {
1087 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1088 dqp->q_res_bcount += nblks;
1089 }
1090 if (rtblks) {
1091 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1092 dqp->q_res_rtbcount += rtblks;
1093 }
1094
1095 /*
1096 * Set default limits, adjust timers (since we changed usages)
1097 *
1098 * There are no timers for the default values set in the root dquot.
1099 */
1100 if (dqp->q_core.d_id) {
1101 xfs_qm_adjust_dqlimits(mp, dqp);
1102 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1103 }
1104
1105 dqp->dq_flags |= XFS_DQ_DIRTY;
1106 xfs_qm_dqput(dqp);
1107 return 0;
1108 }
1109
1110 /*
1111 * callback routine supplied to bulkstat(). Given an inumber, find its
1112 * dquots and update them to account for resources taken by that inode.
1113 */
1114 /* ARGSUSED */
1115 STATIC int
xfs_qm_dqusage_adjust(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,void * data)1116 xfs_qm_dqusage_adjust(
1117 struct xfs_mount *mp,
1118 struct xfs_trans *tp,
1119 xfs_ino_t ino,
1120 void *data)
1121 {
1122 struct xfs_inode *ip;
1123 xfs_qcnt_t nblks;
1124 xfs_filblks_t rtblks = 0; /* total rt blks */
1125 int error;
1126
1127 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1128
1129 /*
1130 * rootino must have its resources accounted for, not so with the quota
1131 * inodes.
1132 */
1133 if (xfs_is_quota_inode(&mp->m_sb, ino))
1134 return 0;
1135
1136 /*
1137 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1138 * at mount time and therefore nobody will be racing chown/chproj.
1139 */
1140 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1141 if (error == -EINVAL || error == -ENOENT)
1142 return 0;
1143 if (error)
1144 return error;
1145
1146 ASSERT(ip->i_delayed_blks == 0);
1147
1148 if (XFS_IS_REALTIME_INODE(ip)) {
1149 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1150
1151 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1152 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1153 if (error)
1154 goto error0;
1155 }
1156
1157 xfs_bmap_count_leaves(ifp, &rtblks);
1158 }
1159
1160 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1161
1162 /*
1163 * Add the (disk blocks and inode) resources occupied by this
1164 * inode to its dquots. We do this adjustment in the incore dquot,
1165 * and also copy the changes to its buffer.
1166 * We don't care about putting these changes in a transaction
1167 * envelope because if we crash in the middle of a 'quotacheck'
1168 * we have to start from the beginning anyway.
1169 * Once we're done, we'll log all the dquot bufs.
1170 *
1171 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1172 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1173 */
1174 if (XFS_IS_UQUOTA_ON(mp)) {
1175 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks,
1176 rtblks);
1177 if (error)
1178 goto error0;
1179 }
1180
1181 if (XFS_IS_GQUOTA_ON(mp)) {
1182 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks,
1183 rtblks);
1184 if (error)
1185 goto error0;
1186 }
1187
1188 if (XFS_IS_PQUOTA_ON(mp)) {
1189 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks,
1190 rtblks);
1191 if (error)
1192 goto error0;
1193 }
1194
1195 error0:
1196 xfs_irele(ip);
1197 return error;
1198 }
1199
1200 STATIC int
xfs_qm_flush_one(struct xfs_dquot * dqp,void * data)1201 xfs_qm_flush_one(
1202 struct xfs_dquot *dqp,
1203 void *data)
1204 {
1205 struct xfs_mount *mp = dqp->q_mount;
1206 struct list_head *buffer_list = data;
1207 struct xfs_buf *bp = NULL;
1208 int error = 0;
1209
1210 xfs_dqlock(dqp);
1211 if (dqp->dq_flags & XFS_DQ_FREEING)
1212 goto out_unlock;
1213 if (!XFS_DQ_IS_DIRTY(dqp))
1214 goto out_unlock;
1215
1216 /*
1217 * The only way the dquot is already flush locked by the time quotacheck
1218 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1219 * it for the final time. Quotacheck collects all dquot bufs in the
1220 * local delwri queue before dquots are dirtied, so reclaim can't have
1221 * possibly queued it for I/O. The only way out is to push the buffer to
1222 * cycle the flush lock.
1223 */
1224 if (!xfs_dqflock_nowait(dqp)) {
1225 /* buf is pinned in-core by delwri list */
1226 bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1227 mp->m_quotainfo->qi_dqchunklen, 0);
1228 if (!bp) {
1229 error = -EINVAL;
1230 goto out_unlock;
1231 }
1232 xfs_buf_unlock(bp);
1233
1234 xfs_buf_delwri_pushbuf(bp, buffer_list);
1235 xfs_buf_rele(bp);
1236
1237 error = -EAGAIN;
1238 goto out_unlock;
1239 }
1240
1241 error = xfs_qm_dqflush(dqp, &bp);
1242 if (error)
1243 goto out_unlock;
1244
1245 xfs_buf_delwri_queue(bp, buffer_list);
1246 xfs_buf_relse(bp);
1247 out_unlock:
1248 xfs_dqunlock(dqp);
1249 return error;
1250 }
1251
1252 /*
1253 * Walk thru all the filesystem inodes and construct a consistent view
1254 * of the disk quota world. If the quotacheck fails, disable quotas.
1255 */
1256 STATIC int
xfs_qm_quotacheck(xfs_mount_t * mp)1257 xfs_qm_quotacheck(
1258 xfs_mount_t *mp)
1259 {
1260 int error, error2;
1261 uint flags;
1262 LIST_HEAD (buffer_list);
1263 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1264 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1265 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1266
1267 flags = 0;
1268
1269 ASSERT(uip || gip || pip);
1270 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1271
1272 xfs_notice(mp, "Quotacheck needed: Please wait.");
1273
1274 /*
1275 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1276 * their counters to zero. We need a clean slate.
1277 * We don't log our changes till later.
1278 */
1279 if (uip) {
1280 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA,
1281 &buffer_list);
1282 if (error)
1283 goto error_return;
1284 flags |= XFS_UQUOTA_CHKD;
1285 }
1286
1287 if (gip) {
1288 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA,
1289 &buffer_list);
1290 if (error)
1291 goto error_return;
1292 flags |= XFS_GQUOTA_CHKD;
1293 }
1294
1295 if (pip) {
1296 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA,
1297 &buffer_list);
1298 if (error)
1299 goto error_return;
1300 flags |= XFS_PQUOTA_CHKD;
1301 }
1302
1303 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1304 NULL);
1305 if (error)
1306 goto error_return;
1307
1308 /*
1309 * We've made all the changes that we need to make incore. Flush them
1310 * down to disk buffers if everything was updated successfully.
1311 */
1312 if (XFS_IS_UQUOTA_ON(mp)) {
1313 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1314 &buffer_list);
1315 }
1316 if (XFS_IS_GQUOTA_ON(mp)) {
1317 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1318 &buffer_list);
1319 if (!error)
1320 error = error2;
1321 }
1322 if (XFS_IS_PQUOTA_ON(mp)) {
1323 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1324 &buffer_list);
1325 if (!error)
1326 error = error2;
1327 }
1328
1329 error2 = xfs_buf_delwri_submit(&buffer_list);
1330 if (!error)
1331 error = error2;
1332
1333 /*
1334 * We can get this error if we couldn't do a dquot allocation inside
1335 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1336 * dirty dquots that might be cached, we just want to get rid of them
1337 * and turn quotaoff. The dquots won't be attached to any of the inodes
1338 * at this point (because we intentionally didn't in dqget_noattach).
1339 */
1340 if (error) {
1341 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1342 goto error_return;
1343 }
1344
1345 /*
1346 * If one type of quotas is off, then it will lose its
1347 * quotachecked status, since we won't be doing accounting for
1348 * that type anymore.
1349 */
1350 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1351 mp->m_qflags |= flags;
1352
1353 error_return:
1354 xfs_buf_delwri_cancel(&buffer_list);
1355
1356 if (error) {
1357 xfs_warn(mp,
1358 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1359 error);
1360 /*
1361 * We must turn off quotas.
1362 */
1363 ASSERT(mp->m_quotainfo != NULL);
1364 xfs_qm_destroy_quotainfo(mp);
1365 if (xfs_mount_reset_sbqflags(mp)) {
1366 xfs_warn(mp,
1367 "Quotacheck: Failed to reset quota flags.");
1368 }
1369 } else
1370 xfs_notice(mp, "Quotacheck: Done.");
1371 return error;
1372 }
1373
1374 /*
1375 * This is called from xfs_mountfs to start quotas and initialize all
1376 * necessary data structures like quotainfo. This is also responsible for
1377 * running a quotacheck as necessary. We are guaranteed that the superblock
1378 * is consistently read in at this point.
1379 *
1380 * If we fail here, the mount will continue with quota turned off. We don't
1381 * need to inidicate success or failure at all.
1382 */
1383 void
xfs_qm_mount_quotas(struct xfs_mount * mp)1384 xfs_qm_mount_quotas(
1385 struct xfs_mount *mp)
1386 {
1387 int error = 0;
1388 uint sbf;
1389
1390 /*
1391 * If quotas on realtime volumes is not supported, we disable
1392 * quotas immediately.
1393 */
1394 if (mp->m_sb.sb_rextents) {
1395 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1396 mp->m_qflags = 0;
1397 goto write_changes;
1398 }
1399
1400 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1401
1402 /*
1403 * Allocate the quotainfo structure inside the mount struct, and
1404 * create quotainode(s), and change/rev superblock if necessary.
1405 */
1406 error = xfs_qm_init_quotainfo(mp);
1407 if (error) {
1408 /*
1409 * We must turn off quotas.
1410 */
1411 ASSERT(mp->m_quotainfo == NULL);
1412 mp->m_qflags = 0;
1413 goto write_changes;
1414 }
1415 /*
1416 * If any of the quotas are not consistent, do a quotacheck.
1417 */
1418 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1419 error = xfs_qm_quotacheck(mp);
1420 if (error) {
1421 /* Quotacheck failed and disabled quotas. */
1422 return;
1423 }
1424 }
1425 /*
1426 * If one type of quotas is off, then it will lose its
1427 * quotachecked status, since we won't be doing accounting for
1428 * that type anymore.
1429 */
1430 if (!XFS_IS_UQUOTA_ON(mp))
1431 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1432 if (!XFS_IS_GQUOTA_ON(mp))
1433 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1434 if (!XFS_IS_PQUOTA_ON(mp))
1435 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1436
1437 write_changes:
1438 /*
1439 * We actually don't have to acquire the m_sb_lock at all.
1440 * This can only be called from mount, and that's single threaded. XXX
1441 */
1442 spin_lock(&mp->m_sb_lock);
1443 sbf = mp->m_sb.sb_qflags;
1444 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1445 spin_unlock(&mp->m_sb_lock);
1446
1447 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1448 if (xfs_sync_sb(mp, false)) {
1449 /*
1450 * We could only have been turning quotas off.
1451 * We aren't in very good shape actually because
1452 * the incore structures are convinced that quotas are
1453 * off, but the on disk superblock doesn't know that !
1454 */
1455 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1456 xfs_alert(mp, "%s: Superblock update failed!",
1457 __func__);
1458 }
1459 }
1460
1461 if (error) {
1462 xfs_warn(mp, "Failed to initialize disk quotas.");
1463 return;
1464 }
1465 }
1466
1467 /*
1468 * This is called after the superblock has been read in and we're ready to
1469 * iget the quota inodes.
1470 */
1471 STATIC int
xfs_qm_init_quotainos(xfs_mount_t * mp)1472 xfs_qm_init_quotainos(
1473 xfs_mount_t *mp)
1474 {
1475 struct xfs_inode *uip = NULL;
1476 struct xfs_inode *gip = NULL;
1477 struct xfs_inode *pip = NULL;
1478 int error;
1479 uint flags = 0;
1480
1481 ASSERT(mp->m_quotainfo);
1482
1483 /*
1484 * Get the uquota and gquota inodes
1485 */
1486 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1487 if (XFS_IS_UQUOTA_ON(mp) &&
1488 mp->m_sb.sb_uquotino != NULLFSINO) {
1489 ASSERT(mp->m_sb.sb_uquotino > 0);
1490 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1491 0, 0, &uip);
1492 if (error)
1493 return error;
1494 }
1495 if (XFS_IS_GQUOTA_ON(mp) &&
1496 mp->m_sb.sb_gquotino != NULLFSINO) {
1497 ASSERT(mp->m_sb.sb_gquotino > 0);
1498 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1499 0, 0, &gip);
1500 if (error)
1501 goto error_rele;
1502 }
1503 if (XFS_IS_PQUOTA_ON(mp) &&
1504 mp->m_sb.sb_pquotino != NULLFSINO) {
1505 ASSERT(mp->m_sb.sb_pquotino > 0);
1506 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1507 0, 0, &pip);
1508 if (error)
1509 goto error_rele;
1510 }
1511 } else {
1512 flags |= XFS_QMOPT_SBVERSION;
1513 }
1514
1515 /*
1516 * Create the three inodes, if they don't exist already. The changes
1517 * made above will get added to a transaction and logged in one of
1518 * the qino_alloc calls below. If the device is readonly,
1519 * temporarily switch to read-write to do this.
1520 */
1521 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1522 error = xfs_qm_qino_alloc(mp, &uip,
1523 flags | XFS_QMOPT_UQUOTA);
1524 if (error)
1525 goto error_rele;
1526
1527 flags &= ~XFS_QMOPT_SBVERSION;
1528 }
1529 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1530 error = xfs_qm_qino_alloc(mp, &gip,
1531 flags | XFS_QMOPT_GQUOTA);
1532 if (error)
1533 goto error_rele;
1534
1535 flags &= ~XFS_QMOPT_SBVERSION;
1536 }
1537 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1538 error = xfs_qm_qino_alloc(mp, &pip,
1539 flags | XFS_QMOPT_PQUOTA);
1540 if (error)
1541 goto error_rele;
1542 }
1543
1544 mp->m_quotainfo->qi_uquotaip = uip;
1545 mp->m_quotainfo->qi_gquotaip = gip;
1546 mp->m_quotainfo->qi_pquotaip = pip;
1547
1548 return 0;
1549
1550 error_rele:
1551 if (uip)
1552 xfs_irele(uip);
1553 if (gip)
1554 xfs_irele(gip);
1555 if (pip)
1556 xfs_irele(pip);
1557 return error;
1558 }
1559
1560 STATIC void
xfs_qm_destroy_quotainos(xfs_quotainfo_t * qi)1561 xfs_qm_destroy_quotainos(
1562 xfs_quotainfo_t *qi)
1563 {
1564 if (qi->qi_uquotaip) {
1565 xfs_irele(qi->qi_uquotaip);
1566 qi->qi_uquotaip = NULL; /* paranoia */
1567 }
1568 if (qi->qi_gquotaip) {
1569 xfs_irele(qi->qi_gquotaip);
1570 qi->qi_gquotaip = NULL;
1571 }
1572 if (qi->qi_pquotaip) {
1573 xfs_irele(qi->qi_pquotaip);
1574 qi->qi_pquotaip = NULL;
1575 }
1576 }
1577
1578 STATIC void
xfs_qm_dqfree_one(struct xfs_dquot * dqp)1579 xfs_qm_dqfree_one(
1580 struct xfs_dquot *dqp)
1581 {
1582 struct xfs_mount *mp = dqp->q_mount;
1583 struct xfs_quotainfo *qi = mp->m_quotainfo;
1584
1585 mutex_lock(&qi->qi_tree_lock);
1586 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1587 be32_to_cpu(dqp->q_core.d_id));
1588
1589 qi->qi_dquots--;
1590 mutex_unlock(&qi->qi_tree_lock);
1591
1592 xfs_qm_dqdestroy(dqp);
1593 }
1594
1595 /* --------------- utility functions for vnodeops ---------------- */
1596
1597
1598 /*
1599 * Given an inode, a uid, gid and prid make sure that we have
1600 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1601 * quotas by creating this file.
1602 * This also attaches dquot(s) to the given inode after locking it,
1603 * and returns the dquots corresponding to the uid and/or gid.
1604 *
1605 * in : inode (unlocked)
1606 * out : udquot, gdquot with references taken and unlocked
1607 */
1608 int
xfs_qm_vop_dqalloc(struct xfs_inode * ip,xfs_dqid_t uid,xfs_dqid_t gid,prid_t prid,uint flags,struct xfs_dquot ** O_udqpp,struct xfs_dquot ** O_gdqpp,struct xfs_dquot ** O_pdqpp)1609 xfs_qm_vop_dqalloc(
1610 struct xfs_inode *ip,
1611 xfs_dqid_t uid,
1612 xfs_dqid_t gid,
1613 prid_t prid,
1614 uint flags,
1615 struct xfs_dquot **O_udqpp,
1616 struct xfs_dquot **O_gdqpp,
1617 struct xfs_dquot **O_pdqpp)
1618 {
1619 struct xfs_mount *mp = ip->i_mount;
1620 struct xfs_dquot *uq = NULL;
1621 struct xfs_dquot *gq = NULL;
1622 struct xfs_dquot *pq = NULL;
1623 int error;
1624 uint lockflags;
1625
1626 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1627 return 0;
1628
1629 lockflags = XFS_ILOCK_EXCL;
1630 xfs_ilock(ip, lockflags);
1631
1632 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1633 gid = ip->i_d.di_gid;
1634
1635 /*
1636 * Attach the dquot(s) to this inode, doing a dquot allocation
1637 * if necessary. The dquot(s) will not be locked.
1638 */
1639 if (XFS_NOT_DQATTACHED(mp, ip)) {
1640 error = xfs_qm_dqattach_locked(ip, true);
1641 if (error) {
1642 xfs_iunlock(ip, lockflags);
1643 return error;
1644 }
1645 }
1646
1647 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1648 if (ip->i_d.di_uid != uid) {
1649 /*
1650 * What we need is the dquot that has this uid, and
1651 * if we send the inode to dqget, the uid of the inode
1652 * takes priority over what's sent in the uid argument.
1653 * We must unlock inode here before calling dqget if
1654 * we're not sending the inode, because otherwise
1655 * we'll deadlock by doing trans_reserve while
1656 * holding ilock.
1657 */
1658 xfs_iunlock(ip, lockflags);
1659 error = xfs_qm_dqget(mp, uid, XFS_DQ_USER, true, &uq);
1660 if (error) {
1661 ASSERT(error != -ENOENT);
1662 return error;
1663 }
1664 /*
1665 * Get the ilock in the right order.
1666 */
1667 xfs_dqunlock(uq);
1668 lockflags = XFS_ILOCK_SHARED;
1669 xfs_ilock(ip, lockflags);
1670 } else {
1671 /*
1672 * Take an extra reference, because we'll return
1673 * this to caller
1674 */
1675 ASSERT(ip->i_udquot);
1676 uq = xfs_qm_dqhold(ip->i_udquot);
1677 }
1678 }
1679 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1680 if (ip->i_d.di_gid != gid) {
1681 xfs_iunlock(ip, lockflags);
1682 error = xfs_qm_dqget(mp, gid, XFS_DQ_GROUP, true, &gq);
1683 if (error) {
1684 ASSERT(error != -ENOENT);
1685 goto error_rele;
1686 }
1687 xfs_dqunlock(gq);
1688 lockflags = XFS_ILOCK_SHARED;
1689 xfs_ilock(ip, lockflags);
1690 } else {
1691 ASSERT(ip->i_gdquot);
1692 gq = xfs_qm_dqhold(ip->i_gdquot);
1693 }
1694 }
1695 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1696 if (xfs_get_projid(ip) != prid) {
1697 xfs_iunlock(ip, lockflags);
1698 error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
1699 true, &pq);
1700 if (error) {
1701 ASSERT(error != -ENOENT);
1702 goto error_rele;
1703 }
1704 xfs_dqunlock(pq);
1705 lockflags = XFS_ILOCK_SHARED;
1706 xfs_ilock(ip, lockflags);
1707 } else {
1708 ASSERT(ip->i_pdquot);
1709 pq = xfs_qm_dqhold(ip->i_pdquot);
1710 }
1711 }
1712 if (uq)
1713 trace_xfs_dquot_dqalloc(ip);
1714
1715 xfs_iunlock(ip, lockflags);
1716 if (O_udqpp)
1717 *O_udqpp = uq;
1718 else
1719 xfs_qm_dqrele(uq);
1720 if (O_gdqpp)
1721 *O_gdqpp = gq;
1722 else
1723 xfs_qm_dqrele(gq);
1724 if (O_pdqpp)
1725 *O_pdqpp = pq;
1726 else
1727 xfs_qm_dqrele(pq);
1728 return 0;
1729
1730 error_rele:
1731 xfs_qm_dqrele(gq);
1732 xfs_qm_dqrele(uq);
1733 return error;
1734 }
1735
1736 /*
1737 * Actually transfer ownership, and do dquot modifications.
1738 * These were already reserved.
1739 */
1740 xfs_dquot_t *
xfs_qm_vop_chown(xfs_trans_t * tp,xfs_inode_t * ip,xfs_dquot_t ** IO_olddq,xfs_dquot_t * newdq)1741 xfs_qm_vop_chown(
1742 xfs_trans_t *tp,
1743 xfs_inode_t *ip,
1744 xfs_dquot_t **IO_olddq,
1745 xfs_dquot_t *newdq)
1746 {
1747 xfs_dquot_t *prevdq;
1748 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1749 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1750
1751
1752 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1753 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1754
1755 /* old dquot */
1756 prevdq = *IO_olddq;
1757 ASSERT(prevdq);
1758 ASSERT(prevdq != newdq);
1759
1760 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1761 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1762
1763 /* the sparkling new dquot */
1764 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1765 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1766
1767 /*
1768 * Take an extra reference, because the inode is going to keep
1769 * this dquot pointer even after the trans_commit.
1770 */
1771 *IO_olddq = xfs_qm_dqhold(newdq);
1772
1773 return prevdq;
1774 }
1775
1776 /*
1777 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1778 */
1779 int
xfs_qm_vop_chown_reserve(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,uint flags)1780 xfs_qm_vop_chown_reserve(
1781 struct xfs_trans *tp,
1782 struct xfs_inode *ip,
1783 struct xfs_dquot *udqp,
1784 struct xfs_dquot *gdqp,
1785 struct xfs_dquot *pdqp,
1786 uint flags)
1787 {
1788 struct xfs_mount *mp = ip->i_mount;
1789 uint64_t delblks;
1790 unsigned int blkflags, prjflags = 0;
1791 struct xfs_dquot *udq_unres = NULL;
1792 struct xfs_dquot *gdq_unres = NULL;
1793 struct xfs_dquot *pdq_unres = NULL;
1794 struct xfs_dquot *udq_delblks = NULL;
1795 struct xfs_dquot *gdq_delblks = NULL;
1796 struct xfs_dquot *pdq_delblks = NULL;
1797 int error;
1798
1799
1800 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1801 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1802
1803 delblks = ip->i_delayed_blks;
1804 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1805 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1806
1807 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1808 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1809 udq_delblks = udqp;
1810 /*
1811 * If there are delayed allocation blocks, then we have to
1812 * unreserve those from the old dquot, and add them to the
1813 * new dquot.
1814 */
1815 if (delblks) {
1816 ASSERT(ip->i_udquot);
1817 udq_unres = ip->i_udquot;
1818 }
1819 }
1820 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1821 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1822 gdq_delblks = gdqp;
1823 if (delblks) {
1824 ASSERT(ip->i_gdquot);
1825 gdq_unres = ip->i_gdquot;
1826 }
1827 }
1828
1829 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1830 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
1831 prjflags = XFS_QMOPT_ENOSPC;
1832 pdq_delblks = pdqp;
1833 if (delblks) {
1834 ASSERT(ip->i_pdquot);
1835 pdq_unres = ip->i_pdquot;
1836 }
1837 }
1838
1839 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1840 udq_delblks, gdq_delblks, pdq_delblks,
1841 ip->i_d.di_nblocks, 1,
1842 flags | blkflags | prjflags);
1843 if (error)
1844 return error;
1845
1846 /*
1847 * Do the delayed blks reservations/unreservations now. Since, these
1848 * are done without the help of a transaction, if a reservation fails
1849 * its previous reservations won't be automatically undone by trans
1850 * code. So, we have to do it manually here.
1851 */
1852 if (delblks) {
1853 /*
1854 * Do the reservations first. Unreservation can't fail.
1855 */
1856 ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1857 ASSERT(udq_unres || gdq_unres || pdq_unres);
1858 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1859 udq_delblks, gdq_delblks, pdq_delblks,
1860 (xfs_qcnt_t)delblks, 0,
1861 flags | blkflags | prjflags);
1862 if (error)
1863 return error;
1864 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1865 udq_unres, gdq_unres, pdq_unres,
1866 -((xfs_qcnt_t)delblks), 0, blkflags);
1867 }
1868
1869 return 0;
1870 }
1871
1872 int
xfs_qm_vop_rename_dqattach(struct xfs_inode ** i_tab)1873 xfs_qm_vop_rename_dqattach(
1874 struct xfs_inode **i_tab)
1875 {
1876 struct xfs_mount *mp = i_tab[0]->i_mount;
1877 int i;
1878
1879 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1880 return 0;
1881
1882 for (i = 0; (i < 4 && i_tab[i]); i++) {
1883 struct xfs_inode *ip = i_tab[i];
1884 int error;
1885
1886 /*
1887 * Watch out for duplicate entries in the table.
1888 */
1889 if (i == 0 || ip != i_tab[i-1]) {
1890 if (XFS_NOT_DQATTACHED(mp, ip)) {
1891 error = xfs_qm_dqattach(ip);
1892 if (error)
1893 return error;
1894 }
1895 }
1896 }
1897 return 0;
1898 }
1899
1900 void
xfs_qm_vop_create_dqattach(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp)1901 xfs_qm_vop_create_dqattach(
1902 struct xfs_trans *tp,
1903 struct xfs_inode *ip,
1904 struct xfs_dquot *udqp,
1905 struct xfs_dquot *gdqp,
1906 struct xfs_dquot *pdqp)
1907 {
1908 struct xfs_mount *mp = tp->t_mountp;
1909
1910 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1911 return;
1912
1913 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1914 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1915
1916 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1917 ASSERT(ip->i_udquot == NULL);
1918 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1919
1920 ip->i_udquot = xfs_qm_dqhold(udqp);
1921 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1922 }
1923 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1924 ASSERT(ip->i_gdquot == NULL);
1925 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
1926 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1927 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1928 }
1929 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1930 ASSERT(ip->i_pdquot == NULL);
1931 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
1932
1933 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1934 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1935 }
1936 }
1937
1938