Lines Matching +full:deep +full:- +full:touch
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
45 if (inode_init_always(mp->m_super, VFS_I(ip))) { in xfs_inode_alloc()
51 VFS_I(ip)->i_mode = 0; in xfs_inode_alloc()
54 ASSERT(atomic_read(&ip->i_pincount) == 0); in xfs_inode_alloc()
55 ASSERT(ip->i_ino == 0); in xfs_inode_alloc()
58 ip->i_ino = ino; in xfs_inode_alloc()
59 ip->i_mount = mp; in xfs_inode_alloc()
60 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); in xfs_inode_alloc()
61 ip->i_afp = NULL; in xfs_inode_alloc()
62 ip->i_cowfp = NULL; in xfs_inode_alloc()
63 memset(&ip->i_df, 0, sizeof(ip->i_df)); in xfs_inode_alloc()
64 ip->i_flags = 0; in xfs_inode_alloc()
65 ip->i_delayed_blks = 0; in xfs_inode_alloc()
66 memset(&ip->i_d, 0, sizeof(ip->i_d)); in xfs_inode_alloc()
67 ip->i_sick = 0; in xfs_inode_alloc()
68 ip->i_checked = 0; in xfs_inode_alloc()
69 INIT_WORK(&ip->i_ioend_work, xfs_end_io); in xfs_inode_alloc()
70 INIT_LIST_HEAD(&ip->i_ioend_list); in xfs_inode_alloc()
71 spin_lock_init(&ip->i_ioend_lock); in xfs_inode_alloc()
83 switch (VFS_I(ip)->i_mode & S_IFMT) { in xfs_inode_free_callback()
87 xfs_idestroy_fork(&ip->i_df); in xfs_inode_free_callback()
91 if (ip->i_afp) { in xfs_inode_free_callback()
92 xfs_idestroy_fork(ip->i_afp); in xfs_inode_free_callback()
93 kmem_cache_free(xfs_ifork_zone, ip->i_afp); in xfs_inode_free_callback()
95 if (ip->i_cowfp) { in xfs_inode_free_callback()
96 xfs_idestroy_fork(ip->i_cowfp); in xfs_inode_free_callback()
97 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp); in xfs_inode_free_callback()
99 if (ip->i_itemp) { in xfs_inode_free_callback()
101 &ip->i_itemp->ili_item.li_flags)); in xfs_inode_free_callback()
103 ip->i_itemp = NULL; in xfs_inode_free_callback()
114 ASSERT(atomic_read(&ip->i_pincount) == 0); in __xfs_inode_free()
115 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); in __xfs_inode_free()
116 XFS_STATS_DEC(ip->i_mount, vn_active); in __xfs_inode_free()
118 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); in __xfs_inode_free()
130 * free state. The ip->i_flags_lock provides the barrier against lookup in xfs_inode_free()
133 spin_lock(&ip->i_flags_lock); in xfs_inode_free()
134 ip->i_flags = XFS_IRECLAIM; in xfs_inode_free()
135 ip->i_ino = 0; in xfs_inode_free()
136 spin_unlock(&ip->i_flags_lock); in xfs_inode_free()
151 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_work_queue()
152 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, in xfs_reclaim_work_queue()
162 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_set_reclaim_tag()
164 lockdep_assert_held(&pag->pag_ici_lock); in xfs_perag_set_reclaim_tag()
165 if (pag->pag_ici_reclaimable++) in xfs_perag_set_reclaim_tag()
169 spin_lock(&mp->m_perag_lock); in xfs_perag_set_reclaim_tag()
170 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, in xfs_perag_set_reclaim_tag()
172 spin_unlock(&mp->m_perag_lock); in xfs_perag_set_reclaim_tag()
177 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); in xfs_perag_set_reclaim_tag()
184 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_clear_reclaim_tag()
186 lockdep_assert_held(&pag->pag_ici_lock); in xfs_perag_clear_reclaim_tag()
187 if (--pag->pag_ici_reclaimable) in xfs_perag_clear_reclaim_tag()
191 spin_lock(&mp->m_perag_lock); in xfs_perag_clear_reclaim_tag()
192 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, in xfs_perag_clear_reclaim_tag()
194 spin_unlock(&mp->m_perag_lock); in xfs_perag_clear_reclaim_tag()
195 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); in xfs_perag_clear_reclaim_tag()
208 struct xfs_mount *mp = ip->i_mount; in xfs_inode_set_reclaim_tag()
211 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_inode_set_reclaim_tag()
212 spin_lock(&pag->pag_ici_lock); in xfs_inode_set_reclaim_tag()
213 spin_lock(&ip->i_flags_lock); in xfs_inode_set_reclaim_tag()
215 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_inode_set_reclaim_tag()
220 spin_unlock(&ip->i_flags_lock); in xfs_inode_set_reclaim_tag()
221 spin_unlock(&pag->pag_ici_lock); in xfs_inode_set_reclaim_tag()
230 radix_tree_tag_clear(&pag->pag_ici_root, in xfs_inode_clear_reclaim_tag()
231 XFS_INO_TO_AGINO(pag->pag_mount, ino), in xfs_inode_clear_reclaim_tag()
240 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); in xfs_inew_wait()
241 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); in xfs_inew_wait()
253 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
255 * information about the on-disk values in the VFS inode and so we can't just
266 uint32_t nlink = inode->i_nlink; in xfs_reinit_inode()
267 uint32_t generation = inode->i_generation; in xfs_reinit_inode()
269 umode_t mode = inode->i_mode; in xfs_reinit_inode()
270 dev_t dev = inode->i_rdev; in xfs_reinit_inode()
271 kuid_t uid = inode->i_uid; in xfs_reinit_inode()
272 kgid_t gid = inode->i_gid; in xfs_reinit_inode()
274 error = inode_init_always(mp->m_super, inode); in xfs_reinit_inode()
277 inode->i_generation = generation; in xfs_reinit_inode()
279 inode->i_mode = mode; in xfs_reinit_inode()
280 inode->i_rdev = dev; in xfs_reinit_inode()
281 inode->i_uid = uid; in xfs_reinit_inode()
282 inode->i_gid = gid; in xfs_reinit_inode()
293 * -ENOENT if the inode is free and we are not allocating
294 * -EFSCORRUPTED if there is any state mismatch at all
303 if (VFS_I(ip)->i_mode != 0) { in xfs_iget_check_free_state()
304 xfs_warn(ip->i_mount, in xfs_iget_check_free_state()
306 ip->i_ino, VFS_I(ip)->i_mode); in xfs_iget_check_free_state()
307 return -EFSCORRUPTED; in xfs_iget_check_free_state()
310 if (ip->i_d.di_nblocks != 0) { in xfs_iget_check_free_state()
311 xfs_warn(ip->i_mount, in xfs_iget_check_free_state()
313 ip->i_ino); in xfs_iget_check_free_state()
314 return -EFSCORRUPTED; in xfs_iget_check_free_state()
320 if (VFS_I(ip)->i_mode == 0) in xfs_iget_check_free_state()
321 return -ENOENT; in xfs_iget_check_free_state()
338 struct xfs_mount *mp = ip->i_mount; in xfs_iget_cache_hit()
342 * check for re-use of an inode within an RCU grace period due to the in xfs_iget_cache_hit()
348 spin_lock(&ip->i_flags_lock); in xfs_iget_cache_hit()
349 if (ip->i_ino != ino) { in xfs_iget_cache_hit()
352 error = -EAGAIN; in xfs_iget_cache_hit()
367 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { in xfs_iget_cache_hit()
370 error = -EAGAIN; in xfs_iget_cache_hit()
386 if (ip->i_flags & XFS_IRECLAIMABLE) { in xfs_iget_cache_hit()
390 error = -EAGAIN; in xfs_iget_cache_hit()
400 ip->i_flags |= XFS_IRECLAIM; in xfs_iget_cache_hit()
402 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
405 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); in xfs_iget_cache_hit()
410 * Re-initializing the inode failed, and we are in deep in xfs_iget_cache_hit()
411 * trouble. Try to re-add it to the reclaim list. in xfs_iget_cache_hit()
414 spin_lock(&ip->i_flags_lock); in xfs_iget_cache_hit()
416 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); in xfs_iget_cache_hit()
418 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); in xfs_iget_cache_hit()
419 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); in xfs_iget_cache_hit()
424 spin_lock(&pag->pag_ici_lock); in xfs_iget_cache_hit()
425 spin_lock(&ip->i_flags_lock); in xfs_iget_cache_hit()
428 * Clear the per-lifetime state in the inode as we are now in xfs_iget_cache_hit()
432 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; in xfs_iget_cache_hit()
433 ip->i_flags |= XFS_INEW; in xfs_iget_cache_hit()
434 xfs_inode_clear_reclaim_tag(pag, ip->i_ino); in xfs_iget_cache_hit()
435 inode->i_state = I_NEW; in xfs_iget_cache_hit()
436 ip->i_sick = 0; in xfs_iget_cache_hit()
437 ip->i_checked = 0; in xfs_iget_cache_hit()
439 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
440 spin_unlock(&pag->pag_ici_lock); in xfs_iget_cache_hit()
445 error = -EAGAIN; in xfs_iget_cache_hit()
450 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
465 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
488 return -ENOMEM; in xfs_iget_cache_miss()
490 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags); in xfs_iget_cache_miss()
500 * the di_flushiter field being initialised from the current on-disk in xfs_iget_cache_miss()
504 if (xfs_sb_version_has_v3inode(&mp->m_sb) && in xfs_iget_cache_miss()
505 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) { in xfs_iget_cache_miss()
506 VFS_I(ip)->i_generation = prandom_u32(); in xfs_iget_cache_miss()
511 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0); in xfs_iget_cache_miss()
541 error = -EAGAIN; in xfs_iget_cache_miss()
546 * Because the inode hasn't been added to the radix-tree yet it can't in xfs_iget_cache_miss()
547 * be found by another thread, so we can do the non-sleeping lock here. in xfs_iget_cache_miss()
559 * The ip->i_flags_lock that protects the XFS_INEW flag forms the in xfs_iget_cache_miss()
566 ip->i_udquot = NULL; in xfs_iget_cache_miss()
567 ip->i_gdquot = NULL; in xfs_iget_cache_miss()
568 ip->i_pdquot = NULL; in xfs_iget_cache_miss()
572 spin_lock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
573 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); in xfs_iget_cache_miss()
575 WARN_ON(error != -EEXIST); in xfs_iget_cache_miss()
577 error = -EAGAIN; in xfs_iget_cache_miss()
580 spin_unlock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
587 spin_unlock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
626 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) in xfs_iget()
627 return -EINVAL; in xfs_iget()
638 ip = radix_tree_lookup(&pag->pag_ici_root, agino); in xfs_iget()
647 error = -ENODATA; in xfs_iget()
662 * If we have a real type for an on-disk inode, we can setup the inode in xfs_iget()
665 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) in xfs_iget()
670 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { in xfs_iget()
690 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
691 * inode is not in the cache, -ENOENT will be returned. The caller must
711 *inuse = !!(VFS_I(ip)->i_mode); in xfs_icache_inode_is_allocated()
740 spin_lock(&ip->i_flags_lock); in xfs_inode_walk_ag_grab()
741 if (!ip->i_ino) in xfs_inode_walk_ag_grab()
748 spin_unlock(&ip->i_flags_lock); in xfs_inode_walk_ag_grab()
751 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) in xfs_inode_walk_ag_grab()
762 spin_unlock(&ip->i_flags_lock); in xfs_inode_walk_ag_grab()
767 * For a given per-AG structure @pag, grab, @execute, and rele all incore
778 struct xfs_mount *mp = pag->pag_mount; in xfs_inode_walk_ag()
798 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, in xfs_inode_walk_ag()
803 &pag->pag_ici_root, in xfs_inode_walk_ag()
834 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) in xfs_inode_walk_ag()
836 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); in xfs_inode_walk_ag()
837 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) in xfs_inode_walk_ag()
852 if (error == -EAGAIN) { in xfs_inode_walk_ag()
856 if (error && last_error != -EFSCORRUPTED) in xfs_inode_walk_ag()
861 if (error == -EFSCORRUPTED) in xfs_inode_walk_ag()
875 /* Fetch the next (possibly tagged) per-AG structure. */
906 ag = pag->pag_agno + 1; in xfs_inode_walk()
911 if (error == -EFSCORRUPTED) in xfs_inode_walk()
919 * Background scanning to trim post-EOF preallocated space. This is queued
927 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) in xfs_queue_eofblocks()
928 queue_delayed_work(mp->m_eofblocks_workqueue, in xfs_queue_eofblocks()
929 &mp->m_eofblocks_work, in xfs_queue_eofblocks()
941 if (!sb_start_write_trylock(mp->m_super)) in xfs_eofblocks_worker()
944 sb_end_write(mp->m_super); in xfs_eofblocks_worker()
952 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
959 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG)) in xfs_queue_cowblocks()
960 queue_delayed_work(mp->m_eofblocks_workqueue, in xfs_queue_cowblocks()
961 &mp->m_cowblocks_work, in xfs_queue_cowblocks()
973 if (!sb_start_write_trylock(mp->m_super)) in xfs_cowblocks_worker()
976 sb_end_write(mp->m_super); in xfs_cowblocks_worker()
1004 spin_lock(&ip->i_flags_lock); in xfs_reclaim_inode_grab()
1008 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_inode_grab()
1012 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_inode_grab()
1017 * Inode reclaim is non-blocking, so the default action if progress cannot be
1023 * We do no IO here - if callers require inodes to be cleaned they must push the
1025 * done in the background in a non-blocking manner, and enables memory reclaim
1033 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ in xfs_reclaim_inode()
1040 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { in xfs_reclaim_inode()
1063 spin_lock(&ip->i_flags_lock); in xfs_reclaim_inode()
1064 ip->i_flags = XFS_IRECLAIM; in xfs_reclaim_inode()
1065 ip->i_ino = 0; in xfs_reclaim_inode()
1066 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_inode()
1070 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); in xfs_reclaim_inode()
1072 * Remove the inode from the per-AG radix tree. in xfs_reclaim_inode()
1078 spin_lock(&pag->pag_ici_lock); in xfs_reclaim_inode()
1079 if (!radix_tree_delete(&pag->pag_ici_root, in xfs_reclaim_inode()
1080 XFS_INO_TO_AGINO(ip->i_mount, ino))) in xfs_reclaim_inode()
1083 spin_unlock(&pag->pag_ici_lock); in xfs_reclaim_inode()
1115 * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
1132 ag = pag->pag_agno + 1; in xfs_reclaim_inodes_ag()
1134 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor); in xfs_reclaim_inodes_ag()
1141 &pag->pag_ici_root, in xfs_reclaim_inodes_ag()
1175 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != in xfs_reclaim_inodes_ag()
1176 pag->pag_agno) in xfs_reclaim_inodes_ag()
1178 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); in xfs_reclaim_inodes_ag()
1179 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) in xfs_reclaim_inodes_ag()
1191 *nr_to_scan -= XFS_LOOKUP_BATCH; in xfs_reclaim_inodes_ag()
1197 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index); in xfs_reclaim_inodes_ag()
1208 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_inodes()
1209 xfs_ail_push_all_sync(mp->m_ail); in xfs_reclaim_inodes()
1228 xfs_ail_push_all(mp->m_ail); in xfs_reclaim_inodes_nr()
1247 ag = pag->pag_agno + 1; in xfs_reclaim_inodes_count()
1248 reclaimable += pag->pag_ici_reclaimable; in xfs_reclaim_inodes_count()
1259 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && in xfs_inode_match_id()
1260 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) in xfs_inode_match_id()
1263 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && in xfs_inode_match_id()
1264 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) in xfs_inode_match_id()
1267 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && in xfs_inode_match_id()
1268 ip->i_d.di_projid != eofb->eof_prid) in xfs_inode_match_id()
1275 * A union-based inode filtering algorithm. Process the inode if any of the
1283 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && in xfs_inode_match_id_union()
1284 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) in xfs_inode_match_id_union()
1287 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && in xfs_inode_match_id_union()
1288 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) in xfs_inode_match_id_union()
1291 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && in xfs_inode_match_id_union()
1292 ip->i_d.di_projid == eofb->eof_prid) in xfs_inode_match_id_union()
1313 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) in xfs_inode_matches_eofb()
1321 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) && in xfs_inode_matches_eofb()
1322 XFS_ISIZE(ip) < eofb->eof_min_file_size) in xfs_inode_matches_eofb()
1355 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC); in xfs_inode_free_eofblocks()
1358 /* inode could be preallocated or append-only */ in xfs_inode_free_eofblocks()
1368 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) in xfs_inode_free_eofblocks()
1375 * If the caller is waiting, return -EAGAIN to keep the background in xfs_inode_free_eofblocks()
1380 return -EAGAIN; in xfs_inode_free_eofblocks()
1421 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { in __xfs_inode_free_quota_eofblocks()
1424 eofb.eof_uid = VFS_I(ip)->i_uid; in __xfs_inode_free_quota_eofblocks()
1430 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { in __xfs_inode_free_quota_eofblocks()
1433 eofb.eof_gid = VFS_I(ip)->i_gid; in __xfs_inode_free_quota_eofblocks()
1440 execute(ip->i_mount, &eofb); in __xfs_inode_free_quota_eofblocks()
1475 struct xfs_mount *mp = ip->i_mount; in __xfs_inode_set_blocks_tag()
1483 if (ip->i_flags & xfs_iflag_for_tag(tag)) in __xfs_inode_set_blocks_tag()
1485 spin_lock(&ip->i_flags_lock); in __xfs_inode_set_blocks_tag()
1486 ip->i_flags |= xfs_iflag_for_tag(tag); in __xfs_inode_set_blocks_tag()
1487 spin_unlock(&ip->i_flags_lock); in __xfs_inode_set_blocks_tag()
1489 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in __xfs_inode_set_blocks_tag()
1490 spin_lock(&pag->pag_ici_lock); in __xfs_inode_set_blocks_tag()
1492 tagged = radix_tree_tagged(&pag->pag_ici_root, tag); in __xfs_inode_set_blocks_tag()
1493 radix_tree_tag_set(&pag->pag_ici_root, in __xfs_inode_set_blocks_tag()
1494 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); in __xfs_inode_set_blocks_tag()
1497 spin_lock(&ip->i_mount->m_perag_lock); in __xfs_inode_set_blocks_tag()
1498 radix_tree_tag_set(&ip->i_mount->m_perag_tree, in __xfs_inode_set_blocks_tag()
1499 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), in __xfs_inode_set_blocks_tag()
1501 spin_unlock(&ip->i_mount->m_perag_lock); in __xfs_inode_set_blocks_tag()
1504 execute(ip->i_mount); in __xfs_inode_set_blocks_tag()
1506 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); in __xfs_inode_set_blocks_tag()
1509 spin_unlock(&pag->pag_ici_lock); in __xfs_inode_set_blocks_tag()
1530 struct xfs_mount *mp = ip->i_mount; in __xfs_inode_clear_blocks_tag()
1533 spin_lock(&ip->i_flags_lock); in __xfs_inode_clear_blocks_tag()
1534 ip->i_flags &= ~xfs_iflag_for_tag(tag); in __xfs_inode_clear_blocks_tag()
1535 spin_unlock(&ip->i_flags_lock); in __xfs_inode_clear_blocks_tag()
1537 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in __xfs_inode_clear_blocks_tag()
1538 spin_lock(&pag->pag_ici_lock); in __xfs_inode_clear_blocks_tag()
1540 radix_tree_tag_clear(&pag->pag_ici_root, in __xfs_inode_clear_blocks_tag()
1541 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); in __xfs_inode_clear_blocks_tag()
1542 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { in __xfs_inode_clear_blocks_tag()
1544 spin_lock(&ip->i_mount->m_perag_lock); in __xfs_inode_clear_blocks_tag()
1545 radix_tree_tag_clear(&ip->i_mount->m_perag_tree, in __xfs_inode_clear_blocks_tag()
1546 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), in __xfs_inode_clear_blocks_tag()
1548 spin_unlock(&ip->i_mount->m_perag_lock); in __xfs_inode_clear_blocks_tag()
1549 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); in __xfs_inode_clear_blocks_tag()
1552 spin_unlock(&pag->pag_ici_lock); in __xfs_inode_clear_blocks_tag()
1585 * If the mapping is dirty or under writeback we cannot touch the in xfs_prep_free_cowblocks()
1588 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || in xfs_prep_free_cowblocks()
1589 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || in xfs_prep_free_cowblocks()
1590 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || in xfs_prep_free_cowblocks()
1591 atomic_read(&VFS_I(ip)->i_dio_count)) in xfs_prep_free_cowblocks()
1675 /* Disable post-EOF and CoW block auto-reclamation. */
1680 cancel_delayed_work_sync(&mp->m_eofblocks_work); in xfs_stop_block_reaping()
1681 cancel_delayed_work_sync(&mp->m_cowblocks_work); in xfs_stop_block_reaping()
1684 /* Enable post-EOF and CoW block auto-reclamation. */