Lines Matching +full:ip +full:- +full:blocks

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
10 * per-node file) and then are periodically synced to the quota file. This
32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
76 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
78 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
79 /* -> sd_bitmap_lock */
119 sdp = qd->qd_gl->gl_name.ln_sbd; in gfs2_qd_dispose()
121 list_del(&qd->qd_lru); in gfs2_qd_dispose()
123 /* Free from the filesystem-specific list */ in gfs2_qd_dispose()
125 list_del(&qd->qd_list); in gfs2_qd_dispose()
128 spin_lock_bucket(qd->qd_hash); in gfs2_qd_dispose()
129 hlist_bl_del_rcu(&qd->qd_hlist); in gfs2_qd_dispose()
130 spin_unlock_bucket(qd->qd_hash); in gfs2_qd_dispose()
132 gfs2_assert_warn(sdp, !qd->qd_change); in gfs2_qd_dispose()
133 gfs2_assert_warn(sdp, !qd->qd_slot_count); in gfs2_qd_dispose()
134 gfs2_assert_warn(sdp, !qd->qd_bh_count); in gfs2_qd_dispose()
136 gfs2_glock_put(qd->qd_gl); in gfs2_qd_dispose()
137 atomic_dec(&sdp->sd_quota_count); in gfs2_qd_dispose()
140 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); in gfs2_qd_dispose()
151 if (!spin_trylock(&qd->qd_lockref.lock)) in gfs2_qd_isolate()
154 if (qd->qd_lockref.count == 0) { in gfs2_qd_isolate()
155 lockref_mark_dead(&qd->qd_lockref); in gfs2_qd_isolate()
156 list_lru_isolate_move(lru, &qd->qd_lru, dispose); in gfs2_qd_isolate()
159 spin_unlock(&qd->qd_lockref.lock); in gfs2_qd_isolate()
169 if (!(sc->gfp_mask & __GFP_FS)) in gfs2_qd_shrink_scan()
196 struct kqid qid = qd->qd_id; in qd2index()
220 qd->qd_sbd = sdp; in qd_alloc()
221 qd->qd_lockref.count = 1; in qd_alloc()
222 spin_lock_init(&qd->qd_lockref.lock); in qd_alloc()
223 qd->qd_id = qid; in qd_alloc()
224 qd->qd_slot = -1; in qd_alloc()
225 INIT_LIST_HEAD(&qd->qd_lru); in qd_alloc()
226 qd->qd_hash = hash; in qd_alloc()
229 &gfs2_quota_glops, CREATE, &qd->qd_gl); in qd_alloc()
248 if (!qid_eq(qd->qd_id, qid)) in gfs2_qd_search_bucket()
250 if (qd->qd_sbd != sdp) in gfs2_qd_search_bucket()
252 if (lockref_get_not_dead(&qd->qd_lockref)) { in gfs2_qd_search_bucket()
253 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); in gfs2_qd_search_bucket()
277 return -ENOMEM; in qd_get()
284 list_add(&new_qd->qd_list, &sdp->sd_quota_list); in qd_get()
285 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); in qd_get()
286 atomic_inc(&sdp->sd_quota_count); in qd_get()
292 gfs2_glock_put(new_qd->qd_gl); in qd_get()
302 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in qd_hold()
303 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); in qd_hold()
304 lockref_get(&qd->qd_lockref); in qd_hold()
309 if (lockref_put_or_lock(&qd->qd_lockref)) in qd_put()
312 qd->qd_lockref.count = 0; in qd_put()
313 list_lru_add(&gfs2_qd_lru, &qd->qd_lru); in qd_put()
314 spin_unlock(&qd->qd_lockref.lock); in qd_put()
320 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_get()
324 spin_lock(&sdp->sd_bitmap_lock); in slot_get()
325 if (qd->qd_slot_count != 0) in slot_get()
328 error = -ENOSPC; in slot_get()
329 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots); in slot_get()
330 if (bit < sdp->sd_quota_slots) { in slot_get()
331 set_bit(bit, sdp->sd_quota_bitmap); in slot_get()
332 qd->qd_slot = bit; in slot_get()
335 qd->qd_slot_count++; in slot_get()
337 spin_unlock(&sdp->sd_bitmap_lock); in slot_get()
344 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_hold()
346 spin_lock(&sdp->sd_bitmap_lock); in slot_hold()
347 gfs2_assert(sdp, qd->qd_slot_count); in slot_hold()
348 qd->qd_slot_count++; in slot_hold()
349 spin_unlock(&sdp->sd_bitmap_lock); in slot_hold()
354 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_put()
356 spin_lock(&sdp->sd_bitmap_lock); in slot_put()
357 gfs2_assert(sdp, qd->qd_slot_count); in slot_put()
358 if (!--qd->qd_slot_count) { in slot_put()
359 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); in slot_put()
360 qd->qd_slot = -1; in slot_put()
362 spin_unlock(&sdp->sd_bitmap_lock); in slot_put()
367 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in bh_get()
368 struct inode *inode = sdp->sd_qc_inode; in bh_get()
369 struct gfs2_inode *ip = GFS2_I(inode); in bh_get() local
375 mutex_lock(&sdp->sd_quota_mutex); in bh_get()
377 if (qd->qd_bh_count++) { in bh_get()
378 mutex_unlock(&sdp->sd_quota_mutex); in bh_get()
382 block = qd->qd_slot / sdp->sd_qc_per_block; in bh_get()
383 offset = qd->qd_slot % sdp->sd_qc_per_block; in bh_get()
386 (loff_t)block << inode->i_blkbits, in bh_get()
390 error = -ENOENT; in bh_get()
394 error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits, in bh_get()
398 error = -EIO; in bh_get()
402 qd->qd_bh = bh; in bh_get()
403 qd->qd_bh_qc = (struct gfs2_quota_change *) in bh_get()
404 (bh->b_data + sizeof(struct gfs2_meta_header) + in bh_get()
407 mutex_unlock(&sdp->sd_quota_mutex); in bh_get()
414 qd->qd_bh_count--; in bh_get()
415 mutex_unlock(&sdp->sd_quota_mutex); in bh_get()
421 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in bh_put()
423 mutex_lock(&sdp->sd_quota_mutex); in bh_put()
424 gfs2_assert(sdp, qd->qd_bh_count); in bh_put()
425 if (!--qd->qd_bh_count) { in bh_put()
426 brelse(qd->qd_bh); in bh_put()
427 qd->qd_bh = NULL; in bh_put()
428 qd->qd_bh_qc = NULL; in bh_put()
430 mutex_unlock(&sdp->sd_quota_mutex); in bh_put()
436 if (test_bit(QDF_LOCKED, &qd->qd_flags) || in qd_check_sync()
437 !test_bit(QDF_CHANGE, &qd->qd_flags) || in qd_check_sync()
438 (sync_gen && (qd->qd_sync_gen >= *sync_gen))) in qd_check_sync()
441 if (!lockref_get_not_dead(&qd->qd_lockref)) in qd_check_sync()
444 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); in qd_check_sync()
445 set_bit(QDF_LOCKED, &qd->qd_flags); in qd_check_sync()
446 qd->qd_change_sync = qd->qd_change; in qd_check_sync()
458 if (sb_rdonly(sdp->sd_vfs)) in qd_fish()
463 list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) { in qd_fish()
464 if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) { in qd_fish()
473 gfs2_assert_warn(sdp, qd->qd_change_sync); in qd_fish()
476 clear_bit(QDF_LOCKED, &qd->qd_flags); in qd_fish()
490 gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd, in qd_unlock()
491 test_bit(QDF_LOCKED, &qd->qd_flags)); in qd_unlock()
492 clear_bit(QDF_LOCKED, &qd->qd_flags); in qd_unlock()
532 * gfs2_qa_get - make sure we have a quota allocations data structure,
534 * @ip: the inode for this reservation
536 int gfs2_qa_get(struct gfs2_inode *ip) in gfs2_qa_get() argument
538 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_qa_get()
539 struct inode *inode = &ip->i_inode; in gfs2_qa_get()
541 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_qa_get()
544 spin_lock(&inode->i_lock); in gfs2_qa_get()
545 if (ip->i_qadata == NULL) { in gfs2_qa_get()
548 spin_unlock(&inode->i_lock); in gfs2_qa_get()
551 return -ENOMEM; in gfs2_qa_get()
553 spin_lock(&inode->i_lock); in gfs2_qa_get()
554 if (ip->i_qadata == NULL) in gfs2_qa_get()
555 ip->i_qadata = tmp; in gfs2_qa_get()
559 ip->i_qadata->qa_ref++; in gfs2_qa_get()
560 spin_unlock(&inode->i_lock); in gfs2_qa_get()
564 void gfs2_qa_put(struct gfs2_inode *ip) in gfs2_qa_put() argument
566 struct inode *inode = &ip->i_inode; in gfs2_qa_put()
568 spin_lock(&inode->i_lock); in gfs2_qa_put()
569 if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) { in gfs2_qa_put()
570 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); in gfs2_qa_put()
571 ip->i_qadata = NULL; in gfs2_qa_put()
573 spin_unlock(&inode->i_lock); in gfs2_qa_put()
576 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) in gfs2_quota_hold() argument
578 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_hold()
582 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_quota_hold()
585 error = gfs2_qa_get(ip); in gfs2_quota_hold()
589 qd = ip->i_qadata->qa_qd; in gfs2_quota_hold()
591 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) || in gfs2_quota_hold()
592 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) { in gfs2_quota_hold()
593 error = -EIO; in gfs2_quota_hold()
597 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); in gfs2_quota_hold()
600 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
603 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); in gfs2_quota_hold()
606 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
610 !uid_eq(uid, ip->i_inode.i_uid)) { in gfs2_quota_hold()
614 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
619 !gid_eq(gid, ip->i_inode.i_gid)) { in gfs2_quota_hold()
623 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
629 gfs2_quota_unhold(ip); in gfs2_quota_hold()
634 void gfs2_quota_unhold(struct gfs2_inode *ip) in gfs2_quota_unhold() argument
636 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unhold()
639 if (ip->i_qadata == NULL) in gfs2_quota_unhold()
642 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); in gfs2_quota_unhold()
644 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_unhold()
645 qdsb_put(ip->i_qadata->qa_qd[x]); in gfs2_quota_unhold()
646 ip->i_qadata->qa_qd[x] = NULL; in gfs2_quota_unhold()
648 ip->i_qadata->qa_qd_num = 0; in gfs2_quota_unhold()
649 gfs2_qa_put(ip); in gfs2_quota_unhold()
657 if (qid_lt(qd_a->qd_id, qd_b->qd_id)) in sort_qd()
658 return -1; in sort_qd()
659 if (qid_lt(qd_b->qd_id, qd_a->qd_id)) in sort_qd()
666 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in do_qc()
667 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in do_qc() local
668 struct gfs2_quota_change *qc = qd->qd_bh_qc; in do_qc()
671 mutex_lock(&sdp->sd_quota_mutex); in do_qc()
672 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); in do_qc()
674 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { in do_qc()
675 qc->qc_change = 0; in do_qc()
676 qc->qc_flags = 0; in do_qc()
677 if (qd->qd_id.type == USRQUOTA) in do_qc()
678 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); in do_qc()
679 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); in do_qc()
682 x = be64_to_cpu(qc->qc_change) + change; in do_qc()
683 qc->qc_change = cpu_to_be64(x); in do_qc()
686 qd->qd_change = x; in do_qc()
690 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); in do_qc()
691 clear_bit(QDF_CHANGE, &qd->qd_flags); in do_qc()
692 qc->qc_flags = 0; in do_qc()
693 qc->qc_id = 0; in do_qc()
696 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { in do_qc()
701 if (change < 0) /* Reset quiet flag if we freed some blocks */ in do_qc()
702 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); in do_qc()
703 mutex_unlock(&sdp->sd_quota_mutex); in do_qc()
706 static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, in gfs2_write_buf_to_page() argument
709 struct inode *inode = &ip->i_inode; in gfs2_write_buf_to_page()
711 struct address_space *mapping = inode->i_mapping; in gfs2_write_buf_to_page()
716 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; in gfs2_write_buf_to_page()
720 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); in gfs2_write_buf_to_page()
725 return -ENOMEM; in gfs2_write_buf_to_page()
733 bh = bh->b_this_page; in gfs2_write_buf_to_page()
744 zero_user(page, bnum * bsize, bh->b_size); in gfs2_write_buf_to_page()
750 if (gfs2_is_jdata(ip)) in gfs2_write_buf_to_page()
751 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_write_buf_to_page()
753 gfs2_ordered_add_inode(ip); in gfs2_write_buf_to_page()
756 if (to_write > (bsize - boff)) { in gfs2_write_buf_to_page()
757 pg_off += (bsize - boff); in gfs2_write_buf_to_page()
758 to_write -= (bsize - boff); in gfs2_write_buf_to_page()
778 return -EIO; in gfs2_write_buf_to_page()
781 static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, in gfs2_write_disk_quota() argument
797 overflow = (pg_off + nbytes) - PAGE_SIZE; in gfs2_write_disk_quota()
801 error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr, in gfs2_write_disk_quota()
802 nbytes - overflow); in gfs2_write_disk_quota()
805 error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0, in gfs2_write_disk_quota()
806 ptr + nbytes - overflow, in gfs2_write_disk_quota()
812 * gfs2_adjust_quota - adjust record of current block usage
813 * @ip: The quota inode
822 * Returns: 0 or -ve on error
825 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, in gfs2_adjust_quota() argument
829 struct inode *inode = &ip->i_inode; in gfs2_adjust_quota()
835 if (gfs2_is_stuffed(ip)) { in gfs2_adjust_quota()
836 err = gfs2_unstuff_dinode(ip); in gfs2_adjust_quota()
842 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); in gfs2_adjust_quota()
846 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ in gfs2_adjust_quota()
847 err = -EIO; in gfs2_adjust_quota()
851 qd->qd_qb.qb_value = q.qu_value; in gfs2_adjust_quota()
853 if (fdq->d_fieldmask & QC_SPC_SOFT) { in gfs2_adjust_quota()
854 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
855 qd->qd_qb.qb_warn = q.qu_warn; in gfs2_adjust_quota()
857 if (fdq->d_fieldmask & QC_SPC_HARD) { in gfs2_adjust_quota()
858 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
859 qd->qd_qb.qb_limit = q.qu_limit; in gfs2_adjust_quota()
861 if (fdq->d_fieldmask & QC_SPACE) { in gfs2_adjust_quota()
862 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
863 qd->qd_qb.qb_value = q.qu_value; in gfs2_adjust_quota()
867 err = gfs2_write_disk_quota(ip, &q, loc); in gfs2_adjust_quota()
870 if (size > inode->i_size) in gfs2_adjust_quota()
872 inode->i_mtime = inode->i_atime = current_time(inode); in gfs2_adjust_quota()
874 set_bit(QDF_REFRESH, &qd->qd_flags); in gfs2_adjust_quota()
882 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd; in do_sync()
883 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in do_sync() local
891 unsigned int nalloc = 0, blocks; in do_sync() local
894 error = gfs2_qa_get(ip); in do_sync()
898 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), in do_sync()
903 error = -ENOMEM; in do_sync()
908 inode_lock(&ip->i_inode); in do_sync()
910 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, in do_sync()
916 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in do_sync()
922 if (gfs2_write_alloc_required(ip, offset, in do_sync()
935 * two blocks need to be updated instead of 1 */ in do_sync()
936 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; in do_sync()
940 error = gfs2_inplace_reserve(ip, &ap); in do_sync()
945 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; in do_sync()
947 error = gfs2_trans_begin(sdp, blocks, 0); in do_sync()
954 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); in do_sync()
958 do_qc(qd, -qd->qd_change_sync); in do_sync()
959 set_bit(QDF_REFRESH, &qd->qd_flags); in do_sync()
967 gfs2_inplace_release(ip); in do_sync()
971 while (qx--) in do_sync()
973 inode_unlock(&ip->i_inode); in do_sync()
975 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, in do_sync()
978 gfs2_qa_put(ip); in do_sync()
984 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in update_qd() local
992 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); in update_qd()
996 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in update_qd()
997 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); in update_qd()
998 qlvb->__pad = 0; in update_qd()
999 qlvb->qb_limit = q.qu_limit; in update_qd()
1000 qlvb->qb_warn = q.qu_warn; in update_qd()
1001 qlvb->qb_value = q.qu_value; in update_qd()
1002 qd->qd_qb = *qlvb; in update_qd()
1010 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in do_glock()
1011 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in do_glock() local
1016 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); in do_glock()
1020 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) in do_glock()
1023 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in do_glock()
1025 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { in do_glock()
1027 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, in do_glock()
1032 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); in do_glock()
1055 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) in gfs2_quota_lock() argument
1057 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_lock()
1062 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) in gfs2_quota_lock()
1065 error = gfs2_quota_hold(ip, uid, gid); in gfs2_quota_lock()
1069 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num, in gfs2_quota_lock()
1072 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_lock()
1073 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_lock()
1074 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_lock()
1080 set_bit(GIF_QD_LOCKED, &ip->i_flags); in gfs2_quota_lock()
1082 while (x--) in gfs2_quota_lock()
1083 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_lock()
1084 gfs2_quota_unhold(ip); in gfs2_quota_lock()
1092 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in need_sync()
1093 struct gfs2_tune *gt = &sdp->sd_tune; in need_sync()
1098 if (!qd->qd_qb.qb_limit) in need_sync()
1102 value = qd->qd_change; in need_sync()
1105 spin_lock(&gt->gt_spin); in need_sync()
1106 num = gt->gt_quota_scale_num; in need_sync()
1107 den = gt->gt_quota_scale_den; in need_sync()
1108 spin_unlock(&gt->gt_spin); in need_sync()
1112 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= in need_sync()
1113 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) in need_sync()
1118 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); in need_sync()
1119 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) in need_sync()
1126 void gfs2_quota_unlock(struct gfs2_inode *ip) in gfs2_quota_unlock() argument
1128 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unlock()
1134 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) in gfs2_quota_unlock()
1137 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_unlock()
1141 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_unlock()
1144 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_unlock()
1155 gfs2_assert_warn(sdp, qd->qd_change_sync); in gfs2_quota_unlock()
1157 clear_bit(QDF_LOCKED, &qd->qd_flags); in gfs2_quota_unlock()
1172 gfs2_quota_unhold(ip); in gfs2_quota_unlock()
1179 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in print_message()
1183 (qd->qd_id.type == USRQUOTA) ? "user" : "group", in print_message()
1184 from_kqid(&init_user_ns, qd->qd_id)); in print_message()
1190 * gfs2_quota_check - check if allocating new blocks will exceed quota
1191 * @ip: The inode for which this check is being performed
1194 * @ap: The allocation parameters. ap->target contains the requested
1195 * blocks. ap->min_target, if set, contains the minimum blks
1199 * min_req = ap->min_target ? ap->min_target : ap->target;
1201 * ap->allowed is set to the number of blocks allowed
1203 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1204 * of blocks available.
1206 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, in gfs2_quota_check() argument
1209 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_check()
1215 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ in gfs2_quota_check()
1216 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) in gfs2_quota_check()
1219 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_check()
1220 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_check()
1222 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) || in gfs2_quota_check()
1223 qid_eq(qd->qd_id, make_kqid_gid(gid)))) in gfs2_quota_check()
1226 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); in gfs2_quota_check()
1227 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); in gfs2_quota_check()
1228 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); in gfs2_quota_check()
1230 value += qd->qd_change; in gfs2_quota_check()
1233 if (limit > 0 && (limit - value) < ap->allowed) in gfs2_quota_check()
1234 ap->allowed = limit - value; in gfs2_quota_check()
1236 if (limit && limit < (value + (s64)ap->target)) { in gfs2_quota_check()
1238 * min_target, return -EDQUOT */ in gfs2_quota_check()
1239 if (!ap->min_target || ap->min_target > ap->allowed) { in gfs2_quota_check()
1241 &qd->qd_flags)) { in gfs2_quota_check()
1243 quota_send_warning(qd->qd_id, in gfs2_quota_check()
1244 sdp->sd_vfs->s_dev, in gfs2_quota_check()
1247 error = -EDQUOT; in gfs2_quota_check()
1251 time_after_eq(jiffies, qd->qd_last_warn + in gfs2_quota_check()
1254 quota_send_warning(qd->qd_id, in gfs2_quota_check()
1255 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); in gfs2_quota_check()
1257 qd->qd_last_warn = jiffies; in gfs2_quota_check()
1263 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, in gfs2_quota_change() argument
1268 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_change()
1270 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON || in gfs2_quota_change()
1273 if (ip->i_diskflags & GFS2_DIF_SYSTEM) in gfs2_quota_change()
1276 if (gfs2_assert_withdraw(sdp, ip->i_qadata && in gfs2_quota_change()
1277 ip->i_qadata->qa_ref > 0)) in gfs2_quota_change()
1279 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_change()
1280 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_change()
1282 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || in gfs2_quota_change()
1283 qid_eq(qd->qd_id, make_kqid_gid(gid))) { in gfs2_quota_change()
1291 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_quota_sync()
1300 return -ENOMEM; in gfs2_quota_sync()
1302 mutex_lock(&sdp->sd_quota_sync_mutex); in gfs2_quota_sync()
1303 sdp->sd_quota_sync_gen++; in gfs2_quota_sync()
1321 qda[x]->qd_sync_gen = in gfs2_quota_sync()
1322 sdp->sd_quota_sync_gen; in gfs2_quota_sync()
1329 mutex_unlock(&sdp->sd_quota_sync_mutex); in gfs2_quota_sync()
1355 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in gfs2_quota_init() local
1356 u64 size = i_size_read(sdp->sd_qc_inode); in gfs2_quota_init()
1357 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; in gfs2_quota_init() local
1366 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) in gfs2_quota_init()
1367 return -EIO; in gfs2_quota_init()
1369 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; in gfs2_quota_init()
1370 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long)); in gfs2_quota_init()
1372 error = -ENOMEM; in gfs2_quota_init()
1373 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); in gfs2_quota_init()
1374 if (sdp->sd_quota_bitmap == NULL) in gfs2_quota_init()
1375 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | in gfs2_quota_init()
1377 if (!sdp->sd_quota_bitmap) in gfs2_quota_init()
1380 for (x = 0; x < blocks; x++) { in gfs2_quota_init()
1387 error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen); in gfs2_quota_init()
1391 error = -EIO; in gfs2_quota_init()
1392 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_quota_init()
1400 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); in gfs2_quota_init()
1401 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; in gfs2_quota_init()
1404 s64 qc_change = be64_to_cpu(qc->qc_change); in gfs2_quota_init()
1405 u32 qc_flags = be32_to_cpu(qc->qc_flags); in gfs2_quota_init()
1409 be32_to_cpu(qc->qc_id)); in gfs2_quota_init()
1421 set_bit(QDF_CHANGE, &qd->qd_flags); in gfs2_quota_init()
1422 qd->qd_change = qc_change; in gfs2_quota_init()
1423 qd->qd_slot = slot; in gfs2_quota_init()
1424 qd->qd_slot_count = 1; in gfs2_quota_init()
1427 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); in gfs2_quota_init()
1428 list_add(&qd->qd_list, &sdp->sd_quota_list); in gfs2_quota_init()
1429 atomic_inc(&sdp->sd_quota_count); in gfs2_quota_init()
1433 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); in gfs2_quota_init()
1441 extlen--; in gfs2_quota_init()
1456 struct list_head *head = &sdp->sd_quota_list; in gfs2_quota_cleanup()
1463 list_del(&qd->qd_list); in gfs2_quota_cleanup()
1466 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); in gfs2_quota_cleanup()
1467 atomic_dec(&sdp->sd_quota_count); in gfs2_quota_cleanup()
1470 spin_lock_bucket(qd->qd_hash); in gfs2_quota_cleanup()
1471 hlist_bl_del_rcu(&qd->qd_hlist); in gfs2_quota_cleanup()
1472 spin_unlock_bucket(qd->qd_hash); in gfs2_quota_cleanup()
1474 gfs2_assert_warn(sdp, !qd->qd_change); in gfs2_quota_cleanup()
1475 gfs2_assert_warn(sdp, !qd->qd_slot_count); in gfs2_quota_cleanup()
1476 gfs2_assert_warn(sdp, !qd->qd_bh_count); in gfs2_quota_cleanup()
1478 gfs2_glock_put(qd->qd_gl); in gfs2_quota_cleanup()
1479 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); in gfs2_quota_cleanup()
1485 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); in gfs2_quota_cleanup()
1487 kvfree(sdp->sd_quota_bitmap); in gfs2_quota_cleanup()
1488 sdp->sd_quota_bitmap = NULL; in gfs2_quota_cleanup()
1493 if (error == 0 || error == -EROFS) in quotad_error()
1496 if (!cmpxchg(&sdp->sd_log_error, 0, error)) in quotad_error()
1498 wake_up(&sdp->sd_logd_waitq); in quotad_error()
1508 int error = fxn(sdp->sd_vfs, 0); in quotad_check_timeo()
1510 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; in quotad_check_timeo()
1512 *timeo -= t; in quotad_check_timeo()
1517 if (!sdp->sd_statfs_force_sync) { in gfs2_wake_up_statfs()
1518 sdp->sd_statfs_force_sync = 1; in gfs2_wake_up_statfs()
1519 wake_up(&sdp->sd_quota_wait); in gfs2_wake_up_statfs()
1525 * gfs2_quotad - Write cached quota changes into the quota file
1533 struct gfs2_tune *tune = &sdp->sd_tune; in gfs2_quotad()
1544 if (sdp->sd_statfs_force_sync) { in gfs2_quotad()
1545 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); in gfs2_quotad()
1552 &tune->gt_statfs_quantum); in gfs2_quotad()
1556 &quotad_timeo, &tune->gt_quota_quantum); in gfs2_quotad()
1563 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); in gfs2_quotad()
1564 if (!sdp->sd_statfs_force_sync) in gfs2_quotad()
1565 t -= schedule_timeout(t); in gfs2_quotad()
1568 finish_wait(&sdp->sd_quota_wait, &wait); in gfs2_quotad()
1576 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_quota_get_state()
1580 switch (sdp->sd_args.ar_quota) { in gfs2_quota_get_state()
1582 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; in gfs2_quota_get_state()
1583 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; in gfs2_quota_get_state()
1586 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | in gfs2_quota_get_state()
1588 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED | in gfs2_quota_get_state()
1594 if (sdp->sd_quota_inode) { in gfs2_quota_get_state()
1595 state->s_state[USRQUOTA].ino = in gfs2_quota_get_state()
1596 GFS2_I(sdp->sd_quota_inode)->i_no_addr; in gfs2_quota_get_state()
1597 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks; in gfs2_quota_get_state()
1599 state->s_state[USRQUOTA].nextents = 1; /* unsupported */ in gfs2_quota_get_state()
1600 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA]; in gfs2_quota_get_state()
1601 state->s_incoredqs = list_lru_count(&gfs2_qd_lru); in gfs2_quota_get_state()
1608 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_get_dqblk()
1616 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_get_dqblk()
1617 return -ESRCH; /* Crazy XFS error code */ in gfs2_get_dqblk()
1621 return -EINVAL; in gfs2_get_dqblk()
1630 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in gfs2_get_dqblk()
1631 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1632 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1633 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1647 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_set_dqblk()
1648 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in gfs2_set_dqblk() local
1652 unsigned int blocks = 0; in gfs2_set_dqblk() local
1657 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_set_dqblk()
1658 return -ESRCH; /* Crazy XFS error code */ in gfs2_set_dqblk()
1662 return -EINVAL; in gfs2_set_dqblk()
1664 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) in gfs2_set_dqblk()
1665 return -EINVAL; in gfs2_set_dqblk()
1671 error = gfs2_qa_get(ip); in gfs2_set_dqblk()
1675 inode_lock(&ip->i_inode); in gfs2_set_dqblk()
1676 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); in gfs2_set_dqblk()
1679 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in gfs2_set_dqblk()
1683 /* Check for existing entry, if none then alloc new blocks */ in gfs2_set_dqblk()
1688 /* If nothing has changed, this is a no-op */ in gfs2_set_dqblk()
1689 if ((fdq->d_fieldmask & QC_SPC_SOFT) && in gfs2_set_dqblk()
1690 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) in gfs2_set_dqblk()
1691 fdq->d_fieldmask ^= QC_SPC_SOFT; in gfs2_set_dqblk()
1693 if ((fdq->d_fieldmask & QC_SPC_HARD) && in gfs2_set_dqblk()
1694 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) in gfs2_set_dqblk()
1695 fdq->d_fieldmask ^= QC_SPC_HARD; in gfs2_set_dqblk()
1697 if ((fdq->d_fieldmask & QC_SPACE) && in gfs2_set_dqblk()
1698 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) in gfs2_set_dqblk()
1699 fdq->d_fieldmask ^= QC_SPACE; in gfs2_set_dqblk()
1701 if (fdq->d_fieldmask == 0) in gfs2_set_dqblk()
1705 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); in gfs2_set_dqblk()
1706 if (gfs2_is_stuffed(ip)) in gfs2_set_dqblk()
1710 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), in gfs2_set_dqblk()
1712 blocks = 1 + data_blocks + ind_blocks; in gfs2_set_dqblk()
1713 ap.target = blocks; in gfs2_set_dqblk()
1714 error = gfs2_inplace_reserve(ip, &ap); in gfs2_set_dqblk()
1717 blocks += gfs2_rg_blocks(ip, blocks); in gfs2_set_dqblk()
1720 /* Some quotas span block boundaries and can update two blocks, in gfs2_set_dqblk()
1722 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); in gfs2_set_dqblk()
1727 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); in gfs2_set_dqblk()
1729 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); in gfs2_set_dqblk()
1734 gfs2_inplace_release(ip); in gfs2_set_dqblk()
1740 gfs2_qa_put(ip); in gfs2_set_dqblk()
1741 inode_unlock(&ip->i_inode); in gfs2_set_dqblk()