Lines Matching +full:deep +full:- +full:touch
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL); in xfs_inode_alloc()
83 if (inode_init_always(mp->m_super, VFS_I(ip))) { in xfs_inode_alloc()
89 VFS_I(ip)->i_mode = 0; in xfs_inode_alloc()
90 VFS_I(ip)->i_state = 0; in xfs_inode_alloc()
91 mapping_set_large_folios(VFS_I(ip)->i_mapping); in xfs_inode_alloc()
94 ASSERT(atomic_read(&ip->i_pincount) == 0); in xfs_inode_alloc()
95 ASSERT(ip->i_ino == 0); in xfs_inode_alloc()
98 ip->i_ino = ino; in xfs_inode_alloc()
99 ip->i_mount = mp; in xfs_inode_alloc()
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); in xfs_inode_alloc()
101 ip->i_cowfp = NULL; in xfs_inode_alloc()
102 memset(&ip->i_af, 0, sizeof(ip->i_af)); in xfs_inode_alloc()
103 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS; in xfs_inode_alloc()
104 memset(&ip->i_df, 0, sizeof(ip->i_df)); in xfs_inode_alloc()
105 ip->i_flags = 0; in xfs_inode_alloc()
106 ip->i_delayed_blks = 0; in xfs_inode_alloc()
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; in xfs_inode_alloc()
108 ip->i_nblocks = 0; in xfs_inode_alloc()
109 ip->i_forkoff = 0; in xfs_inode_alloc()
110 ip->i_sick = 0; in xfs_inode_alloc()
111 ip->i_checked = 0; in xfs_inode_alloc()
112 INIT_WORK(&ip->i_ioend_work, xfs_end_io); in xfs_inode_alloc()
113 INIT_LIST_HEAD(&ip->i_ioend_list); in xfs_inode_alloc()
114 spin_lock_init(&ip->i_ioend_lock); in xfs_inode_alloc()
115 ip->i_next_unlinked = NULLAGINO; in xfs_inode_alloc()
116 ip->i_prev_unlinked = NULLAGINO; in xfs_inode_alloc()
128 switch (VFS_I(ip)->i_mode & S_IFMT) { in xfs_inode_free_callback()
132 xfs_idestroy_fork(&ip->i_df); in xfs_inode_free_callback()
138 if (ip->i_cowfp) { in xfs_inode_free_callback()
139 xfs_idestroy_fork(ip->i_cowfp); in xfs_inode_free_callback()
140 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp); in xfs_inode_free_callback()
142 if (ip->i_itemp) { in xfs_inode_free_callback()
144 &ip->i_itemp->ili_item.li_flags)); in xfs_inode_free_callback()
146 ip->i_itemp = NULL; in xfs_inode_free_callback()
157 ASSERT(atomic_read(&ip->i_pincount) == 0); in __xfs_inode_free()
158 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); in __xfs_inode_free()
159 XFS_STATS_DEC(ip->i_mount, vn_active); in __xfs_inode_free()
161 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); in __xfs_inode_free()
173 * free state. The ip->i_flags_lock provides the barrier against lookup in xfs_inode_free()
176 spin_lock(&ip->i_flags_lock); in xfs_inode_free()
177 ip->i_flags = XFS_IRECLAIM; in xfs_inode_free()
178 ip->i_ino = 0; in xfs_inode_free()
179 spin_unlock(&ip->i_flags_lock); in xfs_inode_free()
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_work_queue()
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, in xfs_reclaim_work_queue()
209 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_queue()
215 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) in xfs_blockgc_queue()
216 queue_delayed_work(pag->pag_mount->m_blockgc_wq, in xfs_blockgc_queue()
217 &pag->pag_blockgc_work, in xfs_blockgc_queue()
229 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_set_inode_tag()
232 lockdep_assert_held(&pag->pag_ici_lock); in xfs_perag_set_inode_tag()
234 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag); in xfs_perag_set_inode_tag()
235 radix_tree_tag_set(&pag->pag_ici_root, agino, tag); in xfs_perag_set_inode_tag()
238 pag->pag_ici_reclaimable++; in xfs_perag_set_inode_tag()
244 spin_lock(&mp->m_perag_lock); in xfs_perag_set_inode_tag()
245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag); in xfs_perag_set_inode_tag()
246 spin_unlock(&mp->m_perag_lock); in xfs_perag_set_inode_tag()
258 trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); in xfs_perag_set_inode_tag()
268 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_clear_inode_tag()
270 lockdep_assert_held(&pag->pag_ici_lock); in xfs_perag_clear_inode_tag()
277 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag); in xfs_perag_clear_inode_tag()
282 pag->pag_ici_reclaimable--; in xfs_perag_clear_inode_tag()
284 if (radix_tree_tagged(&pag->pag_ici_root, tag)) in xfs_perag_clear_inode_tag()
288 spin_lock(&mp->m_perag_lock); in xfs_perag_clear_inode_tag()
289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag); in xfs_perag_clear_inode_tag()
290 spin_unlock(&mp->m_perag_lock); in xfs_perag_clear_inode_tag()
292 trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); in xfs_perag_clear_inode_tag()
296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
298 * information about the on-disk values in the VFS inode and so we can't just
309 uint32_t nlink = inode->i_nlink; in xfs_reinit_inode()
310 uint32_t generation = inode->i_generation; in xfs_reinit_inode()
312 umode_t mode = inode->i_mode; in xfs_reinit_inode()
313 dev_t dev = inode->i_rdev; in xfs_reinit_inode()
314 kuid_t uid = inode->i_uid; in xfs_reinit_inode()
315 kgid_t gid = inode->i_gid; in xfs_reinit_inode()
317 error = inode_init_always(mp->m_super, inode); in xfs_reinit_inode()
320 inode->i_generation = generation; in xfs_reinit_inode()
322 inode->i_mode = mode; in xfs_reinit_inode()
323 inode->i_rdev = dev; in xfs_reinit_inode()
324 inode->i_uid = uid; in xfs_reinit_inode()
325 inode->i_gid = gid; in xfs_reinit_inode()
326 mapping_set_large_folios(inode->i_mapping); in xfs_reinit_inode()
337 struct xfs_inode *ip) __releases(&ip->i_flags_lock) in xfs_iget_recycle()
339 struct xfs_mount *mp = ip->i_mount; in xfs_iget_recycle()
351 ip->i_flags |= XFS_IRECLAIM; in xfs_iget_recycle()
353 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
356 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); in xfs_iget_recycle()
360 * Re-initializing the inode failed, and we are in deep in xfs_iget_recycle()
361 * trouble. Try to re-add it to the reclaim list. in xfs_iget_recycle()
364 spin_lock(&ip->i_flags_lock); in xfs_iget_recycle()
365 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); in xfs_iget_recycle()
366 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); in xfs_iget_recycle()
367 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
374 spin_lock(&pag->pag_ici_lock); in xfs_iget_recycle()
375 spin_lock(&ip->i_flags_lock); in xfs_iget_recycle()
378 * Clear the per-lifetime state in the inode as we are now effectively in xfs_iget_recycle()
382 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; in xfs_iget_recycle()
383 ip->i_flags |= XFS_INEW; in xfs_iget_recycle()
384 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_iget_recycle()
386 inode->i_state = I_NEW; in xfs_iget_recycle()
387 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
388 spin_unlock(&pag->pag_ici_lock); in xfs_iget_recycle()
400 * -ENOENT if the inode is free and we are not allocating
401 * -EFSCORRUPTED if there is any state mismatch at all
410 if (VFS_I(ip)->i_mode != 0) { in xfs_iget_check_free_state()
411 xfs_warn(ip->i_mount, in xfs_iget_check_free_state()
413 ip->i_ino, VFS_I(ip)->i_mode); in xfs_iget_check_free_state()
414 return -EFSCORRUPTED; in xfs_iget_check_free_state()
417 if (ip->i_nblocks != 0) { in xfs_iget_check_free_state()
418 xfs_warn(ip->i_mount, in xfs_iget_check_free_state()
420 ip->i_ino); in xfs_iget_check_free_state()
421 return -EFSCORRUPTED; in xfs_iget_check_free_state()
427 if (VFS_I(ip)->i_mode == 0) in xfs_iget_check_free_state()
428 return -ENOENT; in xfs_iget_check_free_state()
442 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_queue_all()
443 if (!llist_empty(&gc->list)) in xfs_inodegc_queue_all()
444 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_queue_all()
460 struct xfs_mount *mp = ip->i_mount; in xfs_iget_cache_hit()
464 * check for re-use of an inode within an RCU grace period due to the in xfs_iget_cache_hit()
470 spin_lock(&ip->i_flags_lock); in xfs_iget_cache_hit()
471 if (ip->i_ino != ino) in xfs_iget_cache_hit()
492 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING)) in xfs_iget_cache_hit()
495 if (ip->i_flags & XFS_NEED_INACTIVE) { in xfs_iget_cache_hit()
496 /* Unlinked inodes cannot be re-grabbed. */ in xfs_iget_cache_hit()
497 if (VFS_I(ip)->i_nlink == 0) { in xfs_iget_cache_hit()
498 error = -ENOENT; in xfs_iget_cache_hit()
514 (ip->i_flags & XFS_IRECLAIMABLE)) in xfs_iget_cache_hit()
518 if (ip->i_flags & XFS_IRECLAIMABLE) { in xfs_iget_cache_hit()
529 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
546 error = -EAGAIN; in xfs_iget_cache_hit()
548 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
553 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
561 return -EAGAIN; in xfs_iget_cache_hit()
581 return -ENOMEM; in xfs_iget_cache_miss()
583 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags); in xfs_iget_cache_miss()
593 * the i_flushiter field being initialised from the current on-disk in xfs_iget_cache_miss()
599 VFS_I(ip)->i_generation = get_random_u32(); in xfs_iget_cache_miss()
603 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); in xfs_iget_cache_miss()
608 xfs_buf_offset(bp, ip->i_imap.im_boffset)); in xfs_iget_cache_miss()
634 error = -EAGAIN; in xfs_iget_cache_miss()
639 * Because the inode hasn't been added to the radix-tree yet it can't in xfs_iget_cache_miss()
640 * be found by another thread, so we can do the non-sleeping lock here. in xfs_iget_cache_miss()
652 * The ip->i_flags_lock that protects the XFS_INEW flag forms the in xfs_iget_cache_miss()
659 ip->i_udquot = NULL; in xfs_iget_cache_miss()
660 ip->i_gdquot = NULL; in xfs_iget_cache_miss()
661 ip->i_pdquot = NULL; in xfs_iget_cache_miss()
665 spin_lock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
666 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); in xfs_iget_cache_miss()
668 WARN_ON(error != -EEXIST); in xfs_iget_cache_miss()
670 error = -EAGAIN; in xfs_iget_cache_miss()
673 spin_unlock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
680 spin_unlock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
719 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) in xfs_iget()
720 return -EINVAL; in xfs_iget()
731 ip = radix_tree_lookup(&pag->pag_ici_root, agino); in xfs_iget()
740 error = -ENODATA; in xfs_iget()
755 * If we have a real type for an on-disk inode, we can setup the inode in xfs_iget()
759 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) in xfs_iget()
764 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { in xfs_iget()
784 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
785 * inode is not in the cache, -ENOENT will be returned. The caller must
805 *inuse = !!(VFS_I(ip)->i_mode); in xfs_icache_inode_is_allocated()
834 spin_lock(&ip->i_flags_lock); in xfs_reclaim_igrab()
838 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
843 if (ip->i_sick && in xfs_reclaim_igrab()
844 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) { in xfs_reclaim_igrab()
845 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
850 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
855 * Inode reclaim is non-blocking, so the default action if progress cannot be
861 * We do no IO here - if callers require inodes to be cleaned they must push the
863 * done in the background in a non-blocking manner, and enables memory reclaim
871 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ in xfs_reclaim_inode()
882 * then the in-memory log tail movement caused by the abort can be in xfs_reclaim_inode()
885 if (xlog_is_shutdown(ip->i_mount->m_log)) { in xfs_reclaim_inode()
909 spin_lock(&ip->i_flags_lock); in xfs_reclaim_inode()
910 ip->i_flags = XFS_IRECLAIM; in xfs_reclaim_inode()
911 ip->i_ino = 0; in xfs_reclaim_inode()
912 ip->i_sick = 0; in xfs_reclaim_inode()
913 ip->i_checked = 0; in xfs_reclaim_inode()
914 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_inode()
916 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL); in xfs_reclaim_inode()
919 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); in xfs_reclaim_inode()
921 * Remove the inode from the per-AG radix tree. in xfs_reclaim_inode()
927 spin_lock(&pag->pag_ici_lock); in xfs_reclaim_inode()
928 if (!radix_tree_delete(&pag->pag_ici_root, in xfs_reclaim_inode()
929 XFS_INO_TO_AGINO(ip->i_mount, ino))) in xfs_reclaim_inode()
932 spin_unlock(&pag->pag_ici_lock); in xfs_reclaim_inode()
943 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot); in xfs_reclaim_inode()
978 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_inodes()
979 xfs_ail_push_all_sync(mp->m_ail); in xfs_reclaim_inodes()
1006 xfs_ail_push_all(mp->m_ail); in xfs_reclaim_inodes_nr()
1025 ag = pag->pag_agno + 1; in xfs_reclaim_inodes_count()
1026 reclaimable += pag->pag_ici_reclaimable; in xfs_reclaim_inodes_count()
1037 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) && in xfs_icwalk_match_id()
1038 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) in xfs_icwalk_match_id()
1041 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) && in xfs_icwalk_match_id()
1042 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) in xfs_icwalk_match_id()
1045 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) && in xfs_icwalk_match_id()
1046 ip->i_projid != icw->icw_prid) in xfs_icwalk_match_id()
1053 * A union-based inode filtering algorithm. Process the inode if any of the
1061 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) && in xfs_icwalk_match_id_union()
1062 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) in xfs_icwalk_match_id_union()
1065 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) && in xfs_icwalk_match_id_union()
1066 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) in xfs_icwalk_match_id_union()
1069 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) && in xfs_icwalk_match_id_union()
1070 ip->i_projid == icw->icw_prid) in xfs_icwalk_match_id_union()
1091 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION) in xfs_icwalk_match()
1099 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) && in xfs_icwalk_match()
1100 XFS_ISIZE(ip) < icw->icw_min_file_size) in xfs_icwalk_match()
1131 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); in xfs_inode_free_eofblocks()
1140 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) in xfs_inode_free_eofblocks()
1147 * If the caller is waiting, return -EAGAIN to keep the background in xfs_inode_free_eofblocks()
1152 return -EAGAIN; in xfs_inode_free_eofblocks()
1160 /* inode could be preallocated or append-only */ in xfs_inode_free_eofblocks()
1171 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_set_iflag()
1180 if (ip->i_flags & iflag) in xfs_blockgc_set_iflag()
1182 spin_lock(&ip->i_flags_lock); in xfs_blockgc_set_iflag()
1183 ip->i_flags |= iflag; in xfs_blockgc_set_iflag()
1184 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_set_iflag()
1186 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_set_iflag()
1187 spin_lock(&pag->pag_ici_lock); in xfs_blockgc_set_iflag()
1189 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_set_iflag()
1192 spin_unlock(&pag->pag_ici_lock); in xfs_blockgc_set_iflag()
1209 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_clear_iflag()
1215 spin_lock(&ip->i_flags_lock); in xfs_blockgc_clear_iflag()
1216 ip->i_flags &= ~iflag; in xfs_blockgc_clear_iflag()
1217 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0; in xfs_blockgc_clear_iflag()
1218 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_clear_iflag()
1223 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_clear_iflag()
1224 spin_lock(&pag->pag_ici_lock); in xfs_blockgc_clear_iflag()
1226 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_clear_iflag()
1229 spin_unlock(&pag->pag_ici_lock); in xfs_blockgc_clear_iflag()
1261 * If the mapping is dirty or under writeback we cannot touch the in xfs_prep_free_cowblocks()
1264 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || in xfs_prep_free_cowblocks()
1265 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || in xfs_prep_free_cowblocks()
1266 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || in xfs_prep_free_cowblocks()
1267 atomic_read(&VFS_I(ip)->i_dio_count)) in xfs_prep_free_cowblocks()
1294 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); in xfs_inode_free_cowblocks()
1306 * If the caller is waiting, return -EAGAIN to keep the background in xfs_inode_free_cowblocks()
1312 return -EAGAIN; in xfs_inode_free_cowblocks()
1319 return -EAGAIN; in xfs_inode_free_cowblocks()
1349 /* Disable post-EOF and CoW block auto-reclamation. */
1361 cancel_delayed_work_sync(&pag->pag_blockgc_work); in xfs_blockgc_stop()
1365 /* Enable post-EOF and CoW block auto-reclamation. */
1401 spin_lock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1402 if (!ip->i_ino) in xfs_blockgc_igrab()
1405 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS) in xfs_blockgc_igrab()
1407 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1410 if (xfs_is_shutdown(ip->i_mount)) in xfs_blockgc_igrab()
1421 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1453 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_worker()
1461 pag->pag_agno, error); in xfs_blockgc_worker()
1505 mod_delayed_work(pag->pag_mount->m_blockgc_wq, in xfs_blockgc_flush_all()
1506 &pag->pag_blockgc_work, 0); in xfs_blockgc_flush_all()
1509 flush_delayed_work(&pag->pag_blockgc_work); in xfs_blockgc_flush_all()
1545 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); in xfs_blockgc_free_dquots()
1551 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); in xfs_blockgc_free_dquots()
1557 icw.icw_prid = pdqp->q_id; in xfs_blockgc_free_dquots()
1574 return xfs_blockgc_free_dquots(ip->i_mount, in xfs_blockgc_free_quota()
1613 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1636 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1645 struct xfs_mount *mp = pag->pag_mount; in xfs_icwalk_ag()
1656 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor); in xfs_icwalk_ag()
1667 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, in xfs_icwalk_ag()
1698 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) in xfs_icwalk_ag()
1700 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); in xfs_icwalk_ag()
1701 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) in xfs_icwalk_ag()
1713 if (error == -EAGAIN) { in xfs_icwalk_ag()
1717 if (error && last_error != -EFSCORRUPTED) in xfs_icwalk_ag()
1722 if (error == -EFSCORRUPTED) in xfs_icwalk_ag()
1727 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) { in xfs_icwalk_ag()
1728 icw->icw_scan_limit -= XFS_LOOKUP_BATCH; in xfs_icwalk_ag()
1729 if (icw->icw_scan_limit <= 0) in xfs_icwalk_ag()
1737 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index); in xfs_icwalk_ag()
1763 if (error == -EFSCORRUPTED) { in xfs_icwalk()
1787 xfs_warn(ip->i_mount, in xfs_check_delalloc()
1789 ip->i_ino, in xfs_check_delalloc()
1804 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_set_reclaimable()
1807 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) { in xfs_inodegc_set_reclaimable()
1813 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_inodegc_set_reclaimable()
1814 spin_lock(&pag->pag_ici_lock); in xfs_inodegc_set_reclaimable()
1815 spin_lock(&ip->i_flags_lock); in xfs_inodegc_set_reclaimable()
1818 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING); in xfs_inodegc_set_reclaimable()
1819 ip->i_flags |= XFS_IRECLAIMABLE; in xfs_inodegc_set_reclaimable()
1820 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_inodegc_set_reclaimable()
1823 spin_unlock(&ip->i_flags_lock); in xfs_inodegc_set_reclaimable()
1824 spin_unlock(&pag->pag_ici_lock); in xfs_inodegc_set_reclaimable()
1848 struct llist_node *node = llist_del_all(&gc->list); in xfs_inodegc_worker()
1851 WRITE_ONCE(gc->items, 0); in xfs_inodegc_worker()
1857 trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits)); in xfs_inodegc_worker()
1859 WRITE_ONCE(gc->shrinker_hits, 0); in xfs_inodegc_worker()
1890 flush_workqueue(mp->m_inodegc_wq); in xfs_inodegc_flush()
1905 drain_workqueue(mp->m_inodegc_wq); in xfs_inodegc_stop()
1930 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_rt_file()
1935 if (__percpu_counter_compare(&mp->m_frextents, in xfs_inodegc_want_queue_rt_file()
1936 mp->m_low_rtexts[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_rt_file()
1949 * - We've accumulated more than one inode cluster buffer's worth of inodes.
1950 * - There is less than 5% free space left.
1951 * - Any of the quotas for this inode are near an enforcement limit.
1958 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_work()
1960 if (items > mp->m_ino_geo.inodes_per_cluster) in xfs_inodegc_want_queue_work()
1963 if (__percpu_counter_compare(&mp->m_fdblocks, in xfs_inodegc_want_queue_work()
1964 mp->m_low_space[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_work()
1992 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
1993 * - The queue depth exceeds the maximum allowable percpu backlog.
2004 if (current->journal_info) in xfs_inodegc_want_flush_work()
2025 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_queue()
2032 spin_lock(&ip->i_flags_lock); in xfs_inodegc_queue()
2033 ip->i_flags |= XFS_NEED_INACTIVE; in xfs_inodegc_queue()
2034 spin_unlock(&ip->i_flags_lock); in xfs_inodegc_queue()
2036 gc = get_cpu_ptr(mp->m_inodegc); in xfs_inodegc_queue()
2037 llist_add(&ip->i_gclist, &gc->list); in xfs_inodegc_queue()
2038 items = READ_ONCE(gc->items); in xfs_inodegc_queue()
2039 WRITE_ONCE(gc->items, items + 1); in xfs_inodegc_queue()
2040 shrinker_hits = READ_ONCE(gc->shrinker_hits); in xfs_inodegc_queue()
2055 mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay); in xfs_inodegc_queue()
2060 flush_delayed_work(&gc->work); in xfs_inodegc_queue()
2076 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu); in xfs_inodegc_cpu_dead()
2077 cancel_delayed_work_sync(&dead_gc->work); in xfs_inodegc_cpu_dead()
2079 if (llist_empty(&dead_gc->list)) in xfs_inodegc_cpu_dead()
2082 first = dead_gc->list.first; in xfs_inodegc_cpu_dead()
2084 while (last->next) { in xfs_inodegc_cpu_dead()
2085 last = last->next; in xfs_inodegc_cpu_dead()
2088 dead_gc->list.first = NULL; in xfs_inodegc_cpu_dead()
2089 dead_gc->items = 0; in xfs_inodegc_cpu_dead()
2092 gc = get_cpu_ptr(mp->m_inodegc); in xfs_inodegc_cpu_dead()
2093 llist_add_batch(first, last, &gc->list); in xfs_inodegc_cpu_dead()
2094 count += READ_ONCE(gc->items); in xfs_inodegc_cpu_dead()
2095 WRITE_ONCE(gc->items, count); in xfs_inodegc_cpu_dead()
2099 mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_cpu_dead()
2118 struct xfs_mount *mp = ip->i_mount; in xfs_inode_mark_reclaimable()
2166 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_count()
2167 if (!llist_empty(&gc->list)) in xfs_inodegc_shrinker_count()
2191 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_scan()
2192 if (!llist_empty(&gc->list)) { in xfs_inodegc_shrinker_scan()
2193 unsigned int h = READ_ONCE(gc->shrinker_hits); in xfs_inodegc_shrinker_scan()
2195 WRITE_ONCE(gc->shrinker_hits, h + 1); in xfs_inodegc_shrinker_scan()
2196 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_shrinker_scan()
2216 struct shrinker *shrink = &mp->m_inodegc_shrinker; in xfs_inodegc_register_shrinker()
2218 shrink->count_objects = xfs_inodegc_shrinker_count; in xfs_inodegc_register_shrinker()
2219 shrink->scan_objects = xfs_inodegc_shrinker_scan; in xfs_inodegc_register_shrinker()
2220 shrink->seeks = 0; in xfs_inodegc_register_shrinker()
2221 shrink->flags = SHRINKER_NONSLAB; in xfs_inodegc_register_shrinker()
2222 shrink->batch = XFS_INODEGC_SHRINKER_BATCH; in xfs_inodegc_register_shrinker()
2224 return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id); in xfs_inodegc_register_shrinker()